use anyhow::{Context as _, Result, anyhow}; use collections::{BTreeMap, HashMap}; use credentials_provider::CredentialsProvider; use futures::Stream; use futures::{FutureExt, StreamExt, future::BoxFuture}; use gpui::{AnyView, App, AsyncApp, Context, Entity, Subscription, Task, Window}; use http_client::HttpClient; use language_model::{ AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, }; use menu; use open_ai::{ImageUrl, ResponseStreamEvent, stream_completion}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use settings::{Settings, SettingsStore}; use std::pin::Pin; use std::str::FromStr as _; use std::sync::Arc; use strum::IntoEnumIterator; use vercel::Model; use ui::{ElevationIndex, List, Tooltip, prelude::*}; use ui_input::SingleLineInput; use util::ResultExt; use crate::{AllLanguageModelSettings, ui::InstructionListItem}; const PROVIDER_ID: &str = "vercel"; const PROVIDER_NAME: &str = "Vercel"; #[derive(Default, Clone, Debug, PartialEq)] pub struct VercelSettings { pub api_url: String, pub available_models: Vec, pub needs_setting_migration: bool, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)] pub struct AvailableModel { pub name: String, pub display_name: Option, pub max_tokens: u64, pub max_output_tokens: Option, pub max_completion_tokens: Option, } pub struct VercelLanguageModelProvider { http_client: Arc, state: gpui::Entity, } pub struct State { api_key: Option, api_key_from_env: bool, _subscription: Subscription, } const VERCEL_API_KEY_VAR: &str = "VERCEL_API_KEY"; impl State { fn is_authenticated(&self) -> bool { self.api_key.is_some() } fn reset_api_key(&self, cx: &mut Context) -> Task> { let credentials_provider = ::global(cx); let settings = &AllLanguageModelSettings::get_global(cx).vercel; let api_url = if settings.api_url.is_empty() { vercel::VERCEL_API_URL.to_string() } else { settings.api_url.clone() }; cx.spawn(async move |this, cx| { credentials_provider .delete_credentials(&api_url, &cx) .await .log_err(); this.update(cx, |this, cx| { this.api_key = None; this.api_key_from_env = false; cx.notify(); }) }) } fn set_api_key(&mut self, api_key: String, cx: &mut Context) -> Task> { let credentials_provider = ::global(cx); let settings = &AllLanguageModelSettings::get_global(cx).vercel; let api_url = if settings.api_url.is_empty() { vercel::VERCEL_API_URL.to_string() } else { settings.api_url.clone() }; cx.spawn(async move |this, cx| { credentials_provider .write_credentials(&api_url, "Bearer", api_key.as_bytes(), &cx) .await .log_err(); this.update(cx, |this, cx| { this.api_key = Some(api_key); cx.notify(); }) }) } fn authenticate(&self, cx: &mut Context) -> Task> { if self.is_authenticated() { return Task::ready(Ok(())); } let credentials_provider = ::global(cx); let settings = &AllLanguageModelSettings::get_global(cx).vercel; let api_url = if settings.api_url.is_empty() { vercel::VERCEL_API_URL.to_string() } else { settings.api_url.clone() }; cx.spawn(async move |this, cx| { let (api_key, from_env) = if let Ok(api_key) = std::env::var(VERCEL_API_KEY_VAR) { (api_key, true) } else { let (_, api_key) = credentials_provider .read_credentials(&api_url, &cx) .await? .ok_or(AuthenticateError::CredentialsNotFound)?; ( String::from_utf8(api_key).context("invalid {PROVIDER_NAME} API key")?, false, ) }; this.update(cx, |this, cx| { this.api_key = Some(api_key); this.api_key_from_env = from_env; cx.notify(); })?; Ok(()) }) } } impl VercelLanguageModelProvider { pub fn new(http_client: Arc, cx: &mut App) -> Self { let state = cx.new(|cx| State { api_key: None, api_key_from_env: false, _subscription: cx.observe_global::(|_this: &mut State, cx| { cx.notify(); }), }); Self { http_client, state } } fn create_language_model(&self, model: vercel::Model) -> Arc { Arc::new(VercelLanguageModel { id: LanguageModelId::from(model.id().to_string()), model, state: self.state.clone(), http_client: self.http_client.clone(), request_limiter: RateLimiter::new(4), }) } } impl LanguageModelProviderState for VercelLanguageModelProvider { type ObservableEntity = State; fn observable_entity(&self) -> Option> { Some(self.state.clone()) } } impl LanguageModelProvider for VercelLanguageModelProvider { fn id(&self) -> LanguageModelProviderId { LanguageModelProviderId(PROVIDER_ID.into()) } fn name(&self) -> LanguageModelProviderName { LanguageModelProviderName(PROVIDER_NAME.into()) } fn icon(&self) -> IconName { IconName::AiVZero } fn default_model(&self, _cx: &App) -> Option> { Some(self.create_language_model(vercel::Model::default())) } fn default_fast_model(&self, _cx: &App) -> Option> { Some(self.create_language_model(vercel::Model::default_fast())) } fn provided_models(&self, cx: &App) -> Vec> { let mut models = BTreeMap::default(); // Add base models from vercel::Model::iter() for model in vercel::Model::iter() { if !matches!(model, vercel::Model::Custom { .. }) { models.insert(model.id().to_string(), model); } } // Override with available models from settings for model in &AllLanguageModelSettings::get_global(cx) .vercel .available_models { models.insert( model.name.clone(), vercel::Model::Custom { name: model.name.clone(), display_name: model.display_name.clone(), max_tokens: model.max_tokens, max_output_tokens: model.max_output_tokens, max_completion_tokens: model.max_completion_tokens, }, ); } models .into_values() .map(|model| self.create_language_model(model)) .collect() } fn is_authenticated(&self, cx: &App) -> bool { self.state.read(cx).is_authenticated() } fn authenticate(&self, cx: &mut App) -> Task> { self.state.update(cx, |state, cx| state.authenticate(cx)) } fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView { cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx)) .into() } fn reset_credentials(&self, cx: &mut App) -> Task> { self.state.update(cx, |state, cx| state.reset_api_key(cx)) } } pub struct VercelLanguageModel { id: LanguageModelId, model: vercel::Model, state: gpui::Entity, http_client: Arc, request_limiter: RateLimiter, } impl VercelLanguageModel { fn stream_completion( &self, request: open_ai::Request, cx: &AsyncApp, ) -> BoxFuture<'static, Result>>> { let http_client = self.http_client.clone(); let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| { let settings = &AllLanguageModelSettings::get_global(cx).vercel; let api_url = if settings.api_url.is_empty() { vercel::VERCEL_API_URL.to_string() } else { settings.api_url.clone() }; (state.api_key.clone(), api_url) }) else { return futures::future::ready(Err(anyhow!("App state dropped"))).boxed(); }; let future = self.request_limiter.stream(async move { let api_key = api_key.context("Missing Vercel API Key")?; let request = stream_completion(http_client.as_ref(), &api_url, &api_key, request); let response = request.await?; Ok(response) }); async move { Ok(future.await?.boxed()) }.boxed() } } impl LanguageModel for VercelLanguageModel { fn id(&self) -> LanguageModelId { self.id.clone() } fn name(&self) -> LanguageModelName { LanguageModelName::from(self.model.display_name().to_string()) } fn provider_id(&self) -> LanguageModelProviderId { LanguageModelProviderId(PROVIDER_ID.into()) } fn provider_name(&self) -> LanguageModelProviderName { LanguageModelProviderName(PROVIDER_NAME.into()) } fn supports_tools(&self) -> bool { true } fn supports_images(&self) -> bool { false } fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool { match choice { LanguageModelToolChoice::Auto => true, LanguageModelToolChoice::Any => true, LanguageModelToolChoice::None => true, } } fn telemetry_id(&self) -> String { format!("vercel/{}", self.model.id()) } fn max_token_count(&self) -> u64 { self.model.max_token_count() } fn max_output_tokens(&self) -> Option { self.model.max_output_tokens() } fn count_tokens( &self, request: LanguageModelRequest, cx: &App, ) -> BoxFuture<'static, Result> { count_vercel_tokens(request, self.model.clone(), cx) } fn stream_completion( &self, request: LanguageModelRequest, cx: &AsyncApp, ) -> BoxFuture< 'static, Result< futures::stream::BoxStream< 'static, Result, >, LanguageModelCompletionError, >, > { let request = into_vercel(request, &self.model, self.max_output_tokens()); let completions = self.stream_completion(request, cx); async move { let mapper = VercelEventMapper::new(); Ok(mapper.map_stream(completions.await?).boxed()) } .boxed() } } pub fn into_vercel( request: LanguageModelRequest, model: &vercel::Model, max_output_tokens: Option, ) -> open_ai::Request { let stream = !model.id().starts_with("o1-"); let mut messages = Vec::new(); for message in request.messages { for content in message.content { match content { MessageContent::Text(text) | MessageContent::Thinking { text, .. } => { add_message_content_part( open_ai::MessagePart::Text { text: text }, message.role, &mut messages, ) } MessageContent::RedactedThinking(_) => {} MessageContent::Image(image) => { add_message_content_part( open_ai::MessagePart::Image { image_url: ImageUrl { url: image.to_base64_url(), detail: None, }, }, message.role, &mut messages, ); } MessageContent::ToolUse(tool_use) => { let tool_call = open_ai::ToolCall { id: tool_use.id.to_string(), content: open_ai::ToolCallContent::Function { function: open_ai::FunctionContent { name: tool_use.name.to_string(), arguments: serde_json::to_string(&tool_use.input) .unwrap_or_default(), }, }, }; if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) = messages.last_mut() { tool_calls.push(tool_call); } else { messages.push(open_ai::RequestMessage::Assistant { content: None, tool_calls: vec![tool_call], }); } } MessageContent::ToolResult(tool_result) => { let content = match &tool_result.content { LanguageModelToolResultContent::Text(text) => { vec![open_ai::MessagePart::Text { text: text.to_string(), }] } LanguageModelToolResultContent::Image(image) => { vec![open_ai::MessagePart::Image { image_url: ImageUrl { url: image.to_base64_url(), detail: None, }, }] } }; messages.push(open_ai::RequestMessage::Tool { content: content.into(), tool_call_id: tool_result.tool_use_id.to_string(), }); } } } } open_ai::Request { model: model.id().into(), messages, stream, stop: request.stop, temperature: request.temperature.unwrap_or(1.0), max_completion_tokens: max_output_tokens, parallel_tool_calls: if model.supports_parallel_tool_calls() && !request.tools.is_empty() { // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn. Some(false) } else { None }, tools: request .tools .into_iter() .map(|tool| open_ai::ToolDefinition::Function { function: open_ai::FunctionDefinition { name: tool.name, description: Some(tool.description), parameters: Some(tool.input_schema), }, }) .collect(), tool_choice: request.tool_choice.map(|choice| match choice { LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto, LanguageModelToolChoice::Any => open_ai::ToolChoice::Required, LanguageModelToolChoice::None => open_ai::ToolChoice::None, }), } } fn add_message_content_part( new_part: open_ai::MessagePart, role: Role, messages: &mut Vec, ) { match (role, messages.last_mut()) { (Role::User, Some(open_ai::RequestMessage::User { content })) | ( Role::Assistant, Some(open_ai::RequestMessage::Assistant { content: Some(content), .. }), ) | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => { content.push_part(new_part); } _ => { messages.push(match role { Role::User => open_ai::RequestMessage::User { content: open_ai::MessageContent::from(vec![new_part]), }, Role::Assistant => open_ai::RequestMessage::Assistant { content: Some(open_ai::MessageContent::from(vec![new_part])), tool_calls: Vec::new(), }, Role::System => open_ai::RequestMessage::System { content: open_ai::MessageContent::from(vec![new_part]), }, }); } } } pub struct VercelEventMapper { tool_calls_by_index: HashMap, } impl VercelEventMapper { pub fn new() -> Self { Self { tool_calls_by_index: HashMap::default(), } } pub fn map_stream( mut self, events: Pin>>>, ) -> impl Stream> { events.flat_map(move |event| { futures::stream::iter(match event { Ok(event) => self.map_event(event), Err(error) => vec![Err(LanguageModelCompletionError::Other(anyhow!(error)))], }) }) } pub fn map_event( &mut self, event: ResponseStreamEvent, ) -> Vec> { let Some(choice) = event.choices.first() else { return Vec::new(); }; let mut events = Vec::new(); if let Some(content) = choice.delta.content.clone() { events.push(Ok(LanguageModelCompletionEvent::Text(content))); } if let Some(tool_calls) = choice.delta.tool_calls.as_ref() { for tool_call in tool_calls { let entry = self.tool_calls_by_index.entry(tool_call.index).or_default(); if let Some(tool_id) = tool_call.id.clone() { entry.id = tool_id; } if let Some(function) = tool_call.function.as_ref() { if let Some(name) = function.name.clone() { entry.name = name; } if let Some(arguments) = function.arguments.clone() { entry.arguments.push_str(&arguments); } } } } match choice.finish_reason.as_deref() { Some("stop") => { events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn))); } Some("tool_calls") => { events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| { match serde_json::Value::from_str(&tool_call.arguments) { Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse( LanguageModelToolUse { id: tool_call.id.clone().into(), name: tool_call.name.as_str().into(), is_input_complete: true, input, raw_input: tool_call.arguments.clone(), }, )), Err(error) => Err(LanguageModelCompletionError::BadInputJson { id: tool_call.id.into(), tool_name: tool_call.name.as_str().into(), raw_input: tool_call.arguments.into(), json_parse_error: error.to_string(), }), } })); events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse))); } Some(stop_reason) => { log::error!("Unexpected Vercel stop_reason: {stop_reason:?}",); events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn))); } None => {} } events } } #[derive(Default)] struct RawToolCall { id: String, name: String, arguments: String, } pub fn count_vercel_tokens( request: LanguageModelRequest, model: Model, cx: &App, ) -> BoxFuture<'static, Result> { cx.background_spawn(async move { let messages = request .messages .into_iter() .map(|message| tiktoken_rs::ChatCompletionRequestMessage { role: match message.role { Role::User => "user".into(), Role::Assistant => "assistant".into(), Role::System => "system".into(), }, content: Some(message.string_contents()), name: None, function_call: None, }) .collect::>(); match model { Model::Custom { max_tokens, .. } => { let model = if max_tokens >= 100_000 { // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o "gpt-4o" } else { // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are // supported with this tiktoken method "gpt-4" }; tiktoken_rs::num_tokens_from_messages(model, &messages) } // Map Vercel models to appropriate OpenAI models for token counting // since Vercel uses OpenAI-compatible API Model::VZero => { // Vercel v0 is similar to GPT-4o, so use gpt-4o for token counting tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages) } } .map(|tokens| tokens as u64) }) .boxed() } struct ConfigurationView { api_key_editor: Entity, state: gpui::Entity, load_credentials_task: Option>, } impl ConfigurationView { fn new(state: gpui::Entity, window: &mut Window, cx: &mut Context) -> Self { let api_key_editor = cx.new(|cx| { SingleLineInput::new( window, cx, "v1:0000000000000000000000000000000000000000000000000", ) .label("API key") }); cx.observe(&state, |_, _, cx| { cx.notify(); }) .detach(); let load_credentials_task = Some(cx.spawn_in(window, { let state = state.clone(); async move |this, cx| { if let Some(task) = state .update(cx, |state, cx| state.authenticate(cx)) .log_err() { // We don't log an error, because "not signed in" is also an error. let _ = task.await; } this.update(cx, |this, cx| { this.load_credentials_task = None; cx.notify(); }) .log_err(); } })); Self { api_key_editor, state, load_credentials_task, } } fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context) { let api_key = self .api_key_editor .read(cx) .editor() .read(cx) .text(cx) .trim() .to_string(); // Don't proceed if no API key is provided and we're not authenticated if api_key.is_empty() && !self.state.read(cx).is_authenticated() { return; } let state = self.state.clone(); cx.spawn_in(window, async move |_, cx| { state .update(cx, |state, cx| state.set_api_key(api_key, cx))? .await }) .detach_and_log_err(cx); cx.notify(); } fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context) { self.api_key_editor.update(cx, |input, cx| { input.editor.update(cx, |editor, cx| { editor.set_text("", window, cx); }); }); let state = self.state.clone(); cx.spawn_in(window, async move |_, cx| { state.update(cx, |state, cx| state.reset_api_key(cx))?.await }) .detach_and_log_err(cx); cx.notify(); } fn should_render_editor(&self, cx: &mut Context) -> bool { !self.state.read(cx).is_authenticated() } } impl Render for ConfigurationView { fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement { let env_var_set = self.state.read(cx).api_key_from_env; let api_key_section = if self.should_render_editor(cx) { v_flex() .on_action(cx.listener(Self::save_api_key)) .child(Label::new("To use Zed's agent with Vercel v0, you need to add an API key. Follow these steps:")) .child( List::new() .child(InstructionListItem::new( "Create one by visiting", Some("Vercel v0's console"), Some("https://v0.dev/chat/settings/keys"), )) .child(InstructionListItem::text_only( "Paste your API key below and hit enter to start using the agent", )), ) .child(self.api_key_editor.clone()) .child( Label::new(format!( "You can also assign the {VERCEL_API_KEY_VAR} environment variable and restart Zed." )) .size(LabelSize::Small) .color(Color::Muted), ) .child( Label::new("Note that Vercel v0 is a custom OpenAI-compatible provider.") .size(LabelSize::Small) .color(Color::Muted), ) .into_any() } else { h_flex() .mt_1() .p_1() .justify_between() .rounded_md() .border_1() .border_color(cx.theme().colors().border) .bg(cx.theme().colors().background) .child( h_flex() .gap_1() .child(Icon::new(IconName::Check).color(Color::Success)) .child(Label::new(if env_var_set { format!("API key set in {VERCEL_API_KEY_VAR} environment variable.") } else { "API key configured.".to_string() })), ) .child( Button::new("reset-api-key", "Reset API Key") .label_size(LabelSize::Small) .icon(IconName::Undo) .icon_size(IconSize::Small) .icon_position(IconPosition::Start) .layer(ElevationIndex::ModalSurface) .when(env_var_set, |this| { this.tooltip(Tooltip::text(format!("To reset your API key, unset the {VERCEL_API_KEY_VAR} environment variable."))) }) .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx))), ) .into_any() }; if self.load_credentials_task.is_some() { div().child(Label::new("Loading credentials…")).into_any() } else { v_flex().size_full().child(api_key_section).into_any() } } } #[cfg(test)] mod tests { use gpui::TestAppContext; use language_model::LanguageModelRequestMessage; use super::*; #[gpui::test] fn tiktoken_rs_support(cx: &TestAppContext) { let request = LanguageModelRequest { thread_id: None, prompt_id: None, intent: None, mode: None, messages: vec![LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::Text("message".into())], cache: false, }], tools: vec![], tool_choice: None, stop: vec![], temperature: None, }; // Validate that all models are supported by tiktoken-rs for model in Model::iter() { let count = cx .executor() .block(count_vercel_tokens( request.clone(), model, &cx.app.borrow(), )) .unwrap(); assert!(count > 0); } } }