Reuse conversation cache when streaming edits (#30245)

Release Notes:

- Improved latency when the agent applies edits.
This commit is contained in:
Antonio Scandurra 2025-05-08 14:36:34 +02:00 committed by GitHub
parent 032022e37b
commit 9f6809a28d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
50 changed files with 847 additions and 21557 deletions

View file

@ -12,8 +12,8 @@ use gpui::{
use http_client::HttpClient;
use language_model::{
AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
LanguageModelToolSchemaFormat, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
StopReason,
LanguageModelToolChoice, LanguageModelToolSchemaFormat, LanguageModelToolUse,
LanguageModelToolUseId, MessageContent, StopReason,
};
use language_model::{
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
@ -313,6 +313,14 @@ impl LanguageModel for GoogleLanguageModel {
true
}
fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
match choice {
LanguageModelToolChoice::Auto
| LanguageModelToolChoice::Any
| LanguageModelToolChoice::None => true,
}
}
fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
LanguageModelToolSchemaFormat::JsonSchemaSubset
}
@ -484,7 +492,16 @@ pub fn into_google(
.collect(),
}]
}),
tool_config: None,
tool_config: request.tool_choice.map(|choice| google_ai::ToolConfig {
function_calling_config: google_ai::FunctionCallingConfig {
mode: match choice {
LanguageModelToolChoice::Auto => google_ai::FunctionCallingMode::Auto,
LanguageModelToolChoice::Any => google_ai::FunctionCallingMode::Any,
LanguageModelToolChoice::None => google_ai::FunctionCallingMode::None,
},
allowed_function_names: None,
},
}),
}
}