feat: Update AI Model References and Enhance Agent Chat Widget Functionality
- Updated AI model references from "gemini-2.0-flash-exp" to "gemini-2.5-flash" across various components, ensuring compatibility with the latest AI capabilities. - Enhanced the AgentChatWidget to improve user experience with new features such as message copying, improved input handling, and session management. - Introduced a chat session management system, allowing users to create and switch between multiple chat sessions seamlessly. - Improved UI elements for better visibility and interaction, including styled buttons and tooltips for enhanced usability.
This commit is contained in:
@@ -499,16 +499,21 @@ absl::StatusOr<AgentResponse> GeminiAIService::ParseGeminiResponse(
|
||||
// Try to parse as JSON object
|
||||
auto parsed_text = nlohmann::json::parse(text_content, nullptr, false);
|
||||
if (!parsed_text.is_discarded()) {
|
||||
// Extract text_response
|
||||
if (parsed_text.contains("text_response") &&
|
||||
parsed_text["text_response"].is_string()) {
|
||||
agent_response.text_response =
|
||||
parsed_text["text_response"].get<std::string>();
|
||||
}
|
||||
|
||||
// Extract reasoning
|
||||
if (parsed_text.contains("reasoning") &&
|
||||
parsed_text["reasoning"].is_string()) {
|
||||
agent_response.reasoning =
|
||||
parsed_text["reasoning"].get<std::string>();
|
||||
}
|
||||
|
||||
// Extract commands
|
||||
if (parsed_text.contains("commands") &&
|
||||
parsed_text["commands"].is_array()) {
|
||||
for (const auto& cmd : parsed_text["commands"]) {
|
||||
@@ -521,6 +526,30 @@ absl::StatusOr<AgentResponse> GeminiAIService::ParseGeminiResponse(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract tool_calls from the parsed JSON
|
||||
if (parsed_text.contains("tool_calls") &&
|
||||
parsed_text["tool_calls"].is_array()) {
|
||||
for (const auto& call : parsed_text["tool_calls"]) {
|
||||
if (call.contains("tool_name") && call["tool_name"].is_string()) {
|
||||
ToolCall tool_call;
|
||||
tool_call.tool_name = call["tool_name"].get<std::string>();
|
||||
|
||||
if (call.contains("args") && call["args"].is_object()) {
|
||||
for (auto& [key, value] : call["args"].items()) {
|
||||
if (value.is_string()) {
|
||||
tool_call.args[key] = value.get<std::string>();
|
||||
} else if (value.is_number()) {
|
||||
tool_call.args[key] = std::to_string(value.get<double>());
|
||||
} else if (value.is_boolean()) {
|
||||
tool_call.args[key] = value.get<bool>() ? "true" : "false";
|
||||
}
|
||||
}
|
||||
}
|
||||
agent_response.tool_calls.push_back(tool_call);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If parsing the full object fails, fallback to extracting commands from text
|
||||
std::vector<std::string> lines = absl::StrSplit(text_content, '\n');
|
||||
|
||||
@@ -76,10 +76,21 @@ std::vector<fs::path> BuildCatalogueSearchPaths(const std::string& explicit_path
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get executable directory for better path resolution
|
||||
fs::path exe_dir;
|
||||
try {
|
||||
exe_dir = fs::current_path();
|
||||
} catch (...) {
|
||||
exe_dir = ".";
|
||||
}
|
||||
|
||||
const std::vector<std::string> defaults = {
|
||||
"assets/agent/prompt_catalogue.yaml",
|
||||
"../assets/agent/prompt_catalogue.yaml",
|
||||
"../../assets/agent/prompt_catalogue.yaml",
|
||||
"../../../assets/agent/prompt_catalogue.yaml", // From build/bin/
|
||||
"../../../../assets/agent/prompt_catalogue.yaml", // From build/bin/yaze.app/Contents/MacOS/
|
||||
"../Resources/assets/agent/prompt_catalogue.yaml", // macOS app bundle
|
||||
"assets/z3ed/prompt_catalogue.yaml",
|
||||
"../assets/z3ed/prompt_catalogue.yaml",
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user