language_model: Remove dependencies on individual model provider crates (#25503)

This PR removes the dependencies on the individual model provider crates
from the `language_model` crate.

The various conversion methods for converting a `LanguageModelRequest`
into its provider-specific request type have been inlined into the
various provider modules in the `language_models` crate.

The model providers we provide via Zed's cloud offering get to stay, for
now.

Release Notes:

- N/A
This commit is contained in:
Marshall Bowers 2025-02-24 16:41:35 -05:00 committed by GitHub
parent 2f7a62780a
commit 0acd556106
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 347 additions and 366 deletions

View file

@ -13,7 +13,7 @@ use http_client::HttpClient;
use language_model::{
AuthenticateError, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId,
LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
LanguageModelProviderState, LanguageModelRequest, MessageContent, RateLimiter, Role,
};
use language_model::{LanguageModelCompletionEvent, LanguageModelToolUse, StopReason};
use schemars::JsonSchema;
@ -396,7 +396,8 @@ impl LanguageModel for AnthropicModel {
request: LanguageModelRequest,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
let request = request.into_anthropic(
let request = into_anthropic(
request,
self.model.id().into(),
self.model.default_temperature(),
self.model.max_output_tokens(),
@ -427,7 +428,8 @@ impl LanguageModel for AnthropicModel {
input_schema: serde_json::Value,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
let mut request = request.into_anthropic(
let mut request = into_anthropic(
request,
self.model.tool_model_id().into(),
self.model.default_temperature(),
self.model.max_output_tokens(),
@ -456,6 +458,117 @@ impl LanguageModel for AnthropicModel {
}
}
pub fn into_anthropic(
request: LanguageModelRequest,
model: String,
default_temperature: f32,
max_output_tokens: u32,
) -> anthropic::Request {
let mut new_messages: Vec<anthropic::Message> = Vec::new();
let mut system_message = String::new();
for message in request.messages {
if message.contents_empty() {
continue;
}
match message.role {
Role::User | Role::Assistant => {
let cache_control = if message.cache {
Some(anthropic::CacheControl {
cache_type: anthropic::CacheControlType::Ephemeral,
})
} else {
None
};
let anthropic_message_content: Vec<anthropic::RequestContent> = message
.content
.into_iter()
.filter_map(|content| match content {
MessageContent::Text(text) => {
if !text.is_empty() {
Some(anthropic::RequestContent::Text {
text,
cache_control,
})
} else {
None
}
}
MessageContent::Image(image) => Some(anthropic::RequestContent::Image {
source: anthropic::ImageSource {
source_type: "base64".to_string(),
media_type: "image/png".to_string(),
data: image.source.to_string(),
},
cache_control,
}),
MessageContent::ToolUse(tool_use) => {
Some(anthropic::RequestContent::ToolUse {
id: tool_use.id.to_string(),
name: tool_use.name,
input: tool_use.input,
cache_control,
})
}
MessageContent::ToolResult(tool_result) => {
Some(anthropic::RequestContent::ToolResult {
tool_use_id: tool_result.tool_use_id,
is_error: tool_result.is_error,
content: tool_result.content,
cache_control,
})
}
})
.collect();
let anthropic_role = match message.role {
Role::User => anthropic::Role::User,
Role::Assistant => anthropic::Role::Assistant,
Role::System => unreachable!("System role should never occur here"),
};
if let Some(last_message) = new_messages.last_mut() {
if last_message.role == anthropic_role {
last_message.content.extend(anthropic_message_content);
continue;
}
}
new_messages.push(anthropic::Message {
role: anthropic_role,
content: anthropic_message_content,
});
}
Role::System => {
if !system_message.is_empty() {
system_message.push_str("\n\n");
}
system_message.push_str(&message.string_contents());
}
}
}
anthropic::Request {
model,
messages: new_messages,
max_tokens: max_output_tokens,
system: Some(system_message),
tools: request
.tools
.into_iter()
.map(|tool| anthropic::Tool {
name: tool.name,
description: tool.description,
input_schema: tool.input_schema,
})
.collect(),
tool_choice: None,
metadata: None,
stop_sequences: Vec::new(),
temperature: request.temperature.or(Some(default_temperature)),
top_k: None,
top_p: None,
}
}
pub fn map_to_language_model_completion_events(
events: Pin<Box<dyn Send + Stream<Item = Result<Event, AnthropicError>>>>,
) -> impl Stream<Item = Result<LanguageModelCompletionEvent>> {