feat: Enhance system prompt JSON schema and clarify tool result formatting in conversational agent
This commit is contained in:
@@ -270,7 +270,12 @@ absl::StatusOr<ChatMessage> ConversationalAgentService::SendMessage(
|
||||
}
|
||||
|
||||
// Add tool result with a clear marker for the LLM
|
||||
std::string marked_output = "[TOOL RESULT] " + tool_output;
|
||||
// Format as plain text to avoid confusing the LLM with nested JSON
|
||||
std::string marked_output = absl::StrCat(
|
||||
"[TOOL RESULT for ", tool_call.tool_name, "]\n",
|
||||
"The tool returned the following data:\n",
|
||||
tool_output, "\n\n",
|
||||
"Please provide a text_response field in your JSON to summarize this information for the user.");
|
||||
history_.push_back(
|
||||
CreateMessage(ChatMessage::Sender::kUser, marked_output));
|
||||
}
|
||||
|
||||
@@ -222,6 +222,13 @@ absl::StatusOr<AgentResponse> OllamaAIService::GenerateResponse(
|
||||
|
||||
std::string llm_output = ollama_wrapper["response"].get<std::string>();
|
||||
|
||||
// Debug: Print raw LLM output when verbose mode is enabled
|
||||
const char* verbose_env = std::getenv("Z3ED_VERBOSE");
|
||||
if (verbose_env && std::string(verbose_env) == "1") {
|
||||
std::cout << "\n" << "\033[35m" << "🔍 Raw LLM Response:" << "\033[0m" << "\n"
|
||||
<< "\033[2m" << llm_output << "\033[0m" << "\n\n";
|
||||
}
|
||||
|
||||
// Parse the LLM's JSON response (the agent structure)
|
||||
nlohmann::json response_json;
|
||||
try {
|
||||
|
||||
Reference in New Issue
Block a user