language_model: Remove dependencies on individual model provider crates (#25503)
This PR removes the dependencies on the individual model provider crates from the `language_model` crate. The various conversion methods for converting a `LanguageModelRequest` into its provider-specific request type have been inlined into the various provider modules in the `language_models` crate. The model providers we provide via Zed's cloud offering get to stay, for now. Release Notes: - N/A
This commit is contained in:
parent
2f7a62780a
commit
0acd556106
11 changed files with 347 additions and 366 deletions
|
@ -318,7 +318,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
'static,
|
||||
Result<futures::stream::BoxStream<'static, Result<LanguageModelCompletionEvent>>>,
|
||||
> {
|
||||
let request = request.into_open_ai(self.model.id().into(), self.max_output_tokens());
|
||||
let request = into_open_ai(request, self.model.id().into(), self.max_output_tokens());
|
||||
let completions = self.stream_completion(request, cx);
|
||||
async move {
|
||||
Ok(open_ai::extract_text_from_events(completions.await?)
|
||||
|
@ -336,7 +336,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
schema: serde_json::Value,
|
||||
cx: &AsyncApp,
|
||||
) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
|
||||
let mut request = request.into_open_ai(self.model.id().into(), self.max_output_tokens());
|
||||
let mut request = into_open_ai(request, self.model.id().into(), self.max_output_tokens());
|
||||
request.tool_choice = Some(ToolChoice::Other(ToolDefinition::Function {
|
||||
function: FunctionDefinition {
|
||||
name: tool_name.clone(),
|
||||
|
@ -366,6 +366,39 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn into_open_ai(
|
||||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
max_output_tokens: Option<u32>,
|
||||
) -> open_ai::Request {
|
||||
let stream = !model.starts_with("o1-");
|
||||
open_ai::Request {
|
||||
model,
|
||||
messages: request
|
||||
.messages
|
||||
.into_iter()
|
||||
.map(|msg| match msg.role {
|
||||
Role::User => open_ai::RequestMessage::User {
|
||||
content: msg.string_contents(),
|
||||
},
|
||||
Role::Assistant => open_ai::RequestMessage::Assistant {
|
||||
content: Some(msg.string_contents()),
|
||||
tool_calls: Vec::new(),
|
||||
},
|
||||
Role::System => open_ai::RequestMessage::System {
|
||||
content: msg.string_contents(),
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
stream,
|
||||
stop: request.stop,
|
||||
temperature: request.temperature.unwrap_or(1.0),
|
||||
max_tokens: max_output_tokens,
|
||||
tools: Vec::new(),
|
||||
tool_choice: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn count_open_ai_tokens(
|
||||
request: LanguageModelRequest,
|
||||
model: open_ai::Model,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue