generateChatResponse method
Implementation
Future<ModelResponse> generateChatResponse() async {
debugPrint('InferenceChat: Getting response from native model...');
final response = await session.getResponse();
final cleanedResponse = ModelThinkingFilter.cleanResponse(response, isThinking: isThinking, modelType: modelType, fileType: fileType);
if (cleanedResponse.isEmpty) {
debugPrint('InferenceChat: Raw response from native model is EMPTY after cleaning.');
return const TextResponse(''); // Return TextResponse instead of String
}
debugPrint('InferenceChat: Raw response from native model:\n--- START ---\n$cleanedResponse\n--- END ---');
// Try to parse as function call if tools are available and model supports function calls
if (tools.isNotEmpty && supportsFunctionCalls) {
final functionCall = FunctionCallParser.parse(cleanedResponse);
if (functionCall != null) {
debugPrint('InferenceChat: Detected function call in sync response');
final toolCallMessage = Message.toolCall(text: cleanedResponse);
_fullHistory.add(toolCallMessage);
_modelHistory.add(toolCallMessage);
debugPrint('InferenceChat: Added tool call to history: ${toolCallMessage.text}');
return functionCall;
}
}
// Regular text response
final chatMessage = Message(text: cleanedResponse, isUser: false);
_fullHistory.add(chatMessage);
_modelHistory.add(chatMessage);
return TextResponse(cleanedResponse); // Return TextResponse instead of String
}