language_model: Remove use_any_tool method from LanguageModel (#27930)

This PR removes the `use_any_tool` method from the `LanguageModel`
trait.

It was not being used anywhere, and doesn't really fit in our new tool
use story.

Release Notes:

- N/A
This commit is contained in:
Marshall Bowers 2025-04-02 11:49:21 -04:00 committed by GitHub
parent da3383b10e
commit 889bc13b7d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 8 additions and 541 deletions

View file

@ -6,7 +6,7 @@ use collections::{BTreeMap, HashMap};
use credentials_provider::CredentialsProvider;
use editor::{Editor, EditorElement, EditorStyle};
use futures::Stream;
use futures::{FutureExt, StreamExt, TryStreamExt as _, future::BoxFuture, stream::BoxStream};
use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
use gpui::{
AnyView, App, AsyncApp, Context, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
};
@ -457,44 +457,6 @@ impl LanguageModel for AnthropicModel {
min_total_token: config.min_total_token,
})
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
tool_name: String,
tool_description: String,
input_schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let mut request = into_anthropic(
request,
self.model.tool_model_id().into(),
self.model.default_temperature(),
self.model.max_output_tokens(),
self.model.mode(),
);
request.tool_choice = Some(anthropic::ToolChoice::Tool {
name: tool_name.clone(),
});
request.tools = vec![anthropic::Tool {
name: tool_name.clone(),
description: tool_description,
input_schema,
}];
let response = self.stream_completion(request, cx);
self.request_limiter
.run(async move {
let response = response.await?;
Ok(anthropic::extract_tool_args_from_events(
tool_name,
Box::pin(response.map_err(|e| anyhow!(e))),
)
.await?
.boxed())
})
.boxed()
}
}
pub fn into_anthropic(

View file

@ -12,11 +12,7 @@ use bedrock::bedrock_client::types::{
ContentBlockDelta, ContentBlockStart, ContentBlockStartEvent, ConverseStreamOutput,
};
use bedrock::bedrock_client::{self, Config};
use bedrock::{
BedrockError, BedrockInnerContent, BedrockMessage, BedrockSpecificTool,
BedrockStreamingResponse, BedrockTool, BedrockToolChoice, BedrockToolInputSchema, Model,
value_to_aws_document,
};
use bedrock::{BedrockError, BedrockInnerContent, BedrockMessage, BedrockStreamingResponse, Model};
use collections::{BTreeMap, HashMap};
use credentials_provider::CredentialsProvider;
use editor::{Editor, EditorElement, EditorStyle};
@ -414,50 +410,6 @@ impl LanguageModel for BedrockModel {
async move { Ok(future.await?.boxed()) }.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
name: String,
description: String,
schema: Value,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let mut request = into_bedrock(
request,
self.model.id().into(),
self.model.default_temperature(),
self.model.max_output_tokens(),
);
request.tool_choice = BedrockSpecificTool::builder()
.name(name.clone())
.build()
.log_err()
.map(BedrockToolChoice::Tool);
if let Some(tool) = BedrockTool::builder()
.name(name.clone())
.description(description.clone())
.input_schema(BedrockToolInputSchema::Json(value_to_aws_document(&schema)))
.build()
.log_err()
{
request.tools.push(tool);
}
let handle = self.handler.clone();
let request = self.stream_completion(request, _cx);
self.request_limiter
.run(async move {
let response = request.map_err(|err| anyhow!(err))?.await;
Ok(extract_tool_args_from_events(name, response, handle)
.await?
.boxed())
})
.boxed()
}
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
None
}

View file

@ -29,7 +29,6 @@ use settings::{Settings, SettingsStore};
use smol::Timer;
use smol::io::{AsyncReadExt, BufReader};
use std::{
future,
sync::{Arc, LazyLock},
time::Duration,
};
@ -743,109 +742,6 @@ impl LanguageModel for CloudLanguageModel {
}
}
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
tool_name: String,
tool_description: String,
input_schema: serde_json::Value,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let client = self.client.clone();
let llm_api_token = self.llm_api_token.clone();
match &self.model {
CloudModel::Anthropic(model) => {
let mut request = into_anthropic(
request,
model.tool_model_id().into(),
model.default_temperature(),
model.max_output_tokens(),
model.mode(),
);
request.tool_choice = Some(anthropic::ToolChoice::Tool {
name: tool_name.clone(),
});
request.tools = vec![anthropic::Tool {
name: tool_name.clone(),
description: tool_description,
input_schema,
}];
self.request_limiter
.run(async move {
let response = Self::perform_llm_completion(
client.clone(),
llm_api_token,
PerformCompletionParams {
provider: client::LanguageModelProvider::Anthropic,
model: request.model.clone(),
provider_request: RawValue::from_string(serde_json::to_string(
&request,
)?)?,
},
)
.await?;
Ok(anthropic::extract_tool_args_from_events(
tool_name,
Box::pin(response_lines(response)),
)
.await?
.boxed())
})
.boxed()
}
CloudModel::OpenAi(model) => {
let mut request =
into_open_ai(request, model.id().into(), model.max_output_tokens());
request.tool_choice = Some(open_ai::ToolChoice::Other(
open_ai::ToolDefinition::Function {
function: open_ai::FunctionDefinition {
name: tool_name.clone(),
description: None,
parameters: None,
},
},
));
request.tools = vec![open_ai::ToolDefinition::Function {
function: open_ai::FunctionDefinition {
name: tool_name.clone(),
description: Some(tool_description),
parameters: Some(input_schema),
},
}];
self.request_limiter
.run(async move {
let response = Self::perform_llm_completion(
client.clone(),
llm_api_token,
PerformCompletionParams {
provider: client::LanguageModelProvider::OpenAi,
model: request.model.clone(),
provider_request: RawValue::from_string(serde_json::to_string(
&request,
)?)?,
},
)
.await?;
Ok(open_ai::extract_tool_args_from_events(
tool_name,
Box::pin(response_lines(response)),
)
.await?
.boxed())
})
.boxed()
}
CloudModel::Google(_) => {
future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
}
}
}
}
fn response_lines<T: DeserializeOwned>(

View file

@ -1,4 +1,3 @@
use std::future;
use std::sync::Arc;
use anyhow::{Result, anyhow};
@ -293,17 +292,6 @@ impl LanguageModel for CopilotChatLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
_request: LanguageModelRequest,
_name: String,
_description: String,
_schema: serde_json::Value,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
future::ready(Err(anyhow!("not implemented"))).boxed()
}
}
impl CopilotChatLanguageModel {

View file

@ -356,61 +356,6 @@ impl LanguageModel for DeepSeekLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
name: String,
description: String,
schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
let mut deepseek_request = into_deepseek(
request,
self.model.id().to_string(),
self.max_output_tokens(),
);
deepseek_request.tools = vec![deepseek::ToolDefinition::Function {
function: deepseek::FunctionDefinition {
name: name.clone(),
description: Some(description),
parameters: Some(schema),
},
}];
let response_stream = self.stream_completion(deepseek_request, cx);
self.request_limiter
.run(async move {
let stream = response_stream.await?;
let tool_args_stream = stream
.filter_map(move |response| async move {
match response {
Ok(response) => {
for choice in response.choices {
if let Some(tool_calls) = choice.delta.tool_calls {
for tool_call in tool_calls {
if let Some(function) = tool_call.function {
if let Some(args) = function.arguments {
return Some(Ok(args));
}
}
}
}
}
None
}
Err(e) => Some(Err(e)),
}
})
.boxed();
Ok(tool_args_stream)
})
.boxed()
}
}
pub fn into_deepseek(

View file

@ -356,61 +356,6 @@ impl LanguageModel for GoogleLanguageModel {
});
async move { Ok(future.await?.boxed()) }.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
name: String,
description: String,
schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
let mut request = into_google(request, self.model.id().to_string());
request.tools = Some(vec![google_ai::Tool {
function_declarations: vec![google_ai::FunctionDeclaration {
name: name.clone(),
description,
parameters: schema,
}],
}]);
request.tool_config = Some(google_ai::ToolConfig {
function_calling_config: google_ai::FunctionCallingConfig {
mode: google_ai::FunctionCallingMode::Any,
allowed_function_names: Some(vec![name]),
},
});
let response = self.stream_completion(request, cx);
self.request_limiter
.run(async move {
let response = response.await?;
Ok(response
.filter_map(|event| async move {
match event {
Ok(response) => {
if let Some(candidates) = &response.candidates {
for candidate in candidates {
for part in &candidate.content.parts {
if let google_ai::Part::FunctionCallPart(
function_call_part,
) = part
{
return Some(Ok(serde_json::to_string(
&function_call_part.function_call.args,
)
.unwrap_or_default()));
}
}
}
}
None
}
Err(e) => Some(Err(e)),
}
})
.boxed())
})
.boxed()
}
}
pub fn into_google(

View file

@ -364,17 +364,6 @@ impl LanguageModel for LmStudioLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
_request: LanguageModelRequest,
_tool_name: String,
_tool_description: String,
_schema: serde_json::Value,
_cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
async move { Ok(futures::stream::empty().boxed()) }.boxed()
}
}
struct ConfigurationView {

View file

@ -368,55 +368,6 @@ impl LanguageModel for MistralLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
tool_name: String,
tool_description: String,
schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
let mut request = into_mistral(request, self.model.id().into(), self.max_output_tokens());
request.tools = vec![mistral::ToolDefinition::Function {
function: mistral::FunctionDefinition {
name: tool_name.clone(),
description: Some(tool_description),
parameters: Some(schema),
},
}];
let response = self.stream_completion(request, cx);
self.request_limiter
.run(async move {
let stream = response.await?;
let tool_args_stream = stream
.filter_map(move |response| async move {
match response {
Ok(response) => {
for choice in response.choices {
if let Some(tool_calls) = choice.delta.tool_calls {
for tool_call in tool_calls {
if let Some(function) = tool_call.function {
if let Some(args) = function.arguments {
return Some(Ok(args));
}
}
}
}
}
None
}
Err(e) => Some(Err(e)),
}
})
.boxed();
Ok(tool_args_stream)
})
.boxed()
}
}
pub fn into_mistral(

View file

@ -1,4 +1,4 @@
use anyhow::{Result, anyhow, bail};
use anyhow::{Result, anyhow};
use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
use http_client::HttpClient;
@ -9,8 +9,8 @@ use language_model::{
LanguageModelRequest, RateLimiter, Role,
};
use ollama::{
ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, KeepAlive, OllamaToolCall,
get_models, preload_model, stream_chat_completion,
ChatMessage, ChatOptions, ChatRequest, KeepAlive, get_models, preload_model,
stream_chat_completion,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@ -265,22 +265,6 @@ impl OllamaLanguageModel {
tools: vec![],
}
}
fn request_completion(
&self,
request: ChatRequest,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<ChatResponseDelta>> {
let http_client = self.http_client.clone();
let Ok(api_url) = cx.update(|cx| {
let settings = &AllLanguageModelSettings::get_global(cx).ollama;
settings.api_url.clone()
}) else {
return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
};
async move { ollama::complete(http_client.as_ref(), &api_url, request).await }.boxed()
}
}
impl LanguageModel for OllamaLanguageModel {
@ -372,48 +356,6 @@ impl LanguageModel for OllamaLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
tool_name: String,
tool_description: String,
schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
use ollama::{OllamaFunctionTool, OllamaTool};
let function = OllamaFunctionTool {
name: tool_name.clone(),
description: Some(tool_description),
parameters: Some(schema),
};
let tools = vec![OllamaTool::Function { function }];
let request = self.to_ollama_request(request).with_tools(tools);
let response = self.request_completion(request, cx);
self.request_limiter
.run(async move {
let response = response.await?;
let ChatMessage::Assistant { tool_calls, .. } = response.message else {
bail!("message does not have an assistant role");
};
if let Some(tool_calls) = tool_calls.filter(|calls| !calls.is_empty()) {
for call in tool_calls {
let OllamaToolCall::Function(function) = call;
if function.name == tool_name {
return Ok(futures::stream::once(async move {
Ok(function.arguments.to_string())
})
.boxed());
}
}
} else {
bail!("assistant message does not have any tool calls");
};
bail!("tool not used")
})
.boxed()
}
}
struct ConfigurationView {

View file

@ -12,9 +12,7 @@ use language_model::{
LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use open_ai::{
FunctionDefinition, ResponseStreamEvent, ToolChoice, ToolDefinition, stream_completion,
};
use open_ai::{ResponseStreamEvent, stream_completion};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::{Settings, SettingsStore};
@ -331,43 +329,6 @@ impl LanguageModel for OpenAiLanguageModel {
}
.boxed()
}
fn use_any_tool(
&self,
request: LanguageModelRequest,
tool_name: String,
tool_description: String,
schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
let mut request = into_open_ai(request, self.model.id().into(), self.max_output_tokens());
request.tool_choice = Some(ToolChoice::Other(ToolDefinition::Function {
function: FunctionDefinition {
name: tool_name.clone(),
description: None,
parameters: None,
},
}));
request.tools = vec![ToolDefinition::Function {
function: FunctionDefinition {
name: tool_name.clone(),
description: Some(tool_description),
parameters: Some(schema),
},
}];
let response = self.stream_completion(request, cx);
self.request_limiter
.run(async move {
let response = response.await?;
Ok(
open_ai::extract_tool_args_from_events(tool_name, Box::pin(response))
.await?
.boxed(),
)
})
.boxed()
}
}
pub fn into_open_ai(