feat: Enhance AI agent capabilities with new tool calling instructions, improved response handling, and terminal color utilities

This commit is contained in:
scawful
2025-10-04 03:04:22 -04:00
parent 06dcffb6ac
commit 2931634837
10 changed files with 562 additions and 15 deletions

View File

@@ -11,6 +11,7 @@
#include "absl/strings/str_join.h"
#include "absl/time/clock.h"
#include "cli/service/ai/service_factory.h"
#include "cli/util/terminal_colors.h"
#include "nlohmann/json.hpp"
namespace yaze {
@@ -174,9 +175,23 @@ absl::StatusOr<ChatMessage> ConversationalAgentService::SendMessage(
}
constexpr int kMaxToolIterations = 4;
bool waiting_for_text_response = false;
for (int iteration = 0; iteration < kMaxToolIterations; ++iteration) {
// Show loading indicator while waiting for AI response
util::LoadingIndicator loader(
waiting_for_text_response
? "Generating final response..."
: "Thinking...",
true);
loader.Start();
auto response_or = ai_service_->GenerateResponse(history_);
loader.Stop();
if (!response_or.ok()) {
util::PrintError(absl::StrCat(
"Failed to get AI response: ", response_or.status().message()));
return absl::InternalError(absl::StrCat(
"Failed to get AI response: ", response_or.status().message()));
}
@@ -184,28 +199,61 @@ absl::StatusOr<ChatMessage> ConversationalAgentService::SendMessage(
const auto& agent_response = response_or.value();
if (!agent_response.tool_calls.empty()) {
// Check if we were waiting for a text response but got more tool calls instead
if (waiting_for_text_response) {
util::PrintWarning(
absl::StrCat("LLM called tools again instead of providing final response (Iteration: ",
iteration, "/", kMaxToolIterations, ")"));
}
bool executed_tool = false;
for (const auto& tool_call : agent_response.tool_calls) {
// Format tool arguments for display
std::vector<std::string> arg_parts;
for (const auto& [key, value] : tool_call.args) {
arg_parts.push_back(absl::StrCat(key, "=", value));
}
std::string args_str = absl::StrJoin(arg_parts, ", ");
util::PrintToolCall(tool_call.tool_name, args_str);
auto tool_result_or = tool_dispatcher_.Dispatch(tool_call);
if (!tool_result_or.ok()) {
util::PrintError(absl::StrCat(
"Tool execution failed: ", tool_result_or.status().message()));
return absl::InternalError(absl::StrCat(
"Tool execution failed: ", tool_result_or.status().message()));
}
const std::string& tool_output = tool_result_or.value();
if (!tool_output.empty()) {
util::PrintSuccess("Tool executed successfully");
// Add tool result with a clear marker for the LLM
std::string marked_output = "[TOOL RESULT] " + tool_output;
history_.push_back(
CreateMessage(ChatMessage::Sender::kAgent, tool_output));
CreateMessage(ChatMessage::Sender::kUser, marked_output));
}
executed_tool = true;
}
if (executed_tool) {
// Now we're waiting for the LLM to provide a text response
waiting_for_text_response = true;
// Re-query the AI with updated context.
continue;
}
}
// Check if we received a text response after tool execution
if (waiting_for_text_response && agent_response.text_response.empty() &&
agent_response.commands.empty()) {
util::PrintWarning(
absl::StrCat("LLM did not provide text_response after receiving tool results (Iteration: ",
iteration, "/", kMaxToolIterations, ")"));
// Continue to give it another chance
continue;
}
std::string response_text = agent_response.text_response;
if (!agent_response.reasoning.empty()) {
if (!response_text.empty()) {