Fix agent2 compilation errors and warnings

- Add cloud_llm_client dependency for CompletionIntent and CompletionMode
- Fix LanguageModelRequest initialization with missing thinking_allowed field
- Update StartMessage handling to use Assistant role
- Fix MessageContent conversions to use enum variants directly
- Fix input_schema implementation to use schemars directly
- Suppress unused variable and dead code warnings
This commit is contained in:
Nathan Sobo 2025-08-01 22:39:08 -06:00
parent afb5c4147a
commit 84d6a0fae9
4 changed files with 31 additions and 18 deletions

1
Cargo.lock generated
View file

@ -157,6 +157,7 @@ dependencies = [
"assistant_tools", "assistant_tools",
"chrono", "chrono",
"client", "client",
"cloud_llm_client",
"collections", "collections",
"ctor", "ctor",
"env_logger 0.11.8", "env_logger 0.11.8",

View file

@ -16,6 +16,7 @@ anyhow.workspace = true
assistant_tool.workspace = true assistant_tool.workspace = true
assistant_tools.workspace = true assistant_tools.workspace = true
chrono.workspace = true chrono.workspace = true
cloud_llm_client.workspace = true
collections.workspace = true collections.workspace = true
fs.workspace = true fs.workspace = true
futures.workspace = true futures.workspace = true

View file

@ -6,11 +6,12 @@ use anyhow::Result;
use gpui::{App, Entity}; use gpui::{App, Entity};
use project::Project; use project::Project;
struct BasePrompt { #[allow(dead_code)]
struct _BasePrompt {
project: Entity<Project>, project: Entity<Project>,
} }
impl Prompt for BasePrompt { impl Prompt for _BasePrompt {
fn render(&self, templates: &Templates, cx: &App) -> Result<String> { fn render(&self, templates: &Templates, cx: &App) -> Result<String> {
BaseTemplate { BaseTemplate {
os: std::env::consts::OS.to_string(), os: std::env::consts::OS.to_string(),

View file

@ -1,12 +1,13 @@
use crate::templates::Templates; use crate::templates::Templates;
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use cloud_llm_client::{CompletionIntent, CompletionMode};
use futures::{channel::mpsc, future}; use futures::{channel::mpsc, future};
use gpui::{App, Context, SharedString, Task}; use gpui::{App, Context, SharedString, Task};
use language_model::{ use language_model::{
CompletionIntent, CompletionMode, LanguageModel, LanguageModelCompletionError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
LanguageModelCompletionEvent, LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
LanguageModelRequestTool, LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, Role, StopReason, LanguageModelToolUse, MessageContent, Role, StopReason,
}; };
use schemars::{JsonSchema, Schema}; use schemars::{JsonSchema, Schema};
use serde::Deserialize; use serde::Deserialize;
@ -138,7 +139,10 @@ impl Thread {
.update(cx, |thread, _cx| { .update(cx, |thread, _cx| {
thread.messages.push(AgentMessage { thread.messages.push(AgentMessage {
role: Role::User, role: Role::User,
content: tool_results.into_iter().map(Into::into).collect(), content: tool_results
.into_iter()
.map(MessageContent::ToolResult)
.collect(),
}); });
}) })
.ok(); .ok();
@ -187,27 +191,30 @@ impl Thread {
match event { match event {
Text(new_text) => self.handle_text_event(new_text, cx), Text(new_text) => self.handle_text_event(new_text, cx),
Thinking { text, signature } => { Thinking {
text: _text,
signature: _signature,
} => {
todo!() todo!()
} }
ToolUse(tool_use) => { ToolUse(tool_use) => {
return self.handle_tool_use_event(tool_use, cx); return self.handle_tool_use_event(tool_use, cx);
} }
StartMessage { role, .. } => { StartMessage { .. } => {
self.messages.push(AgentMessage { self.messages.push(AgentMessage {
role, role: Role::Assistant,
content: Vec::new(), content: Vec::new(),
}); });
} }
UsageUpdate(_) => {} UsageUpdate(_) => {}
Stop(stop_reason) => self.handle_stop_event(stop_reason), Stop(stop_reason) => self.handle_stop_event(stop_reason),
StatusUpdate(_completion_request_status) => {} StatusUpdate(_completion_request_status) => {}
RedactedThinking { data } => todo!(), RedactedThinking { data: _data } => todo!(),
ToolUseJsonParseError { ToolUseJsonParseError {
id, id: _id,
tool_name, tool_name: _tool_name,
raw_input, raw_input: _raw_input,
json_parse_error, json_parse_error: _json_parse_error,
} => todo!(), } => todo!(),
} }
@ -256,7 +263,9 @@ impl Thread {
} }
}); });
if push_new_tool_use { if push_new_tool_use {
last_message.content.push(tool_use.clone().into()); last_message
.content
.push(MessageContent::ToolUse(tool_use.clone()));
} }
if !tool_use.is_input_complete { if !tool_use.is_input_complete {
@ -340,6 +349,7 @@ impl Thread {
tool_choice: None, tool_choice: None,
stop: Vec::new(), stop: Vec::new(),
temperature: None, temperature: None,
thinking_allowed: false,
} }
} }
@ -373,8 +383,8 @@ where
} }
/// Returns the JSON schema that describes the tool's input. /// Returns the JSON schema that describes the tool's input.
fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema { fn input_schema(&self, _format: LanguageModelToolSchemaFormat) -> Schema {
assistant_tools::root_schema_for::<Self::Input>(format) schemars::schema_for!(Self::Input)
} }
/// Runs the tool with the provided input. /// Runs the tool with the provided input.