vercel: Use proper model identifiers and add image support (#33377)

Follow up to previous PRs:
- Return `true` in `supports_images` - v0 supports images already
- Rename model id to match the exact version of the model `v0-1.5-md`
(For now we do not expose `sm`/`lg` variants since they seem not to be
available via the API)
- Provide autocompletion in settings for using `vercel` as a `provider`

Release Notes:

- N/A
This commit is contained in:
Bennet Bo Fenner 2025-06-25 15:26:41 +02:00 committed by GitHub
parent 18f1221a44
commit 59aeede50d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 15 additions and 18 deletions

View file

@ -734,6 +734,7 @@ impl JsonSchema for LanguageModelProviderSetting {
"deepseek".into(), "deepseek".into(),
"openrouter".into(), "openrouter".into(),
"mistral".into(), "mistral".into(),
"vercel".into(),
]), ]),
..Default::default() ..Default::default()
} }

View file

@ -303,14 +303,14 @@ impl LanguageModel for VercelLanguageModel {
} }
fn supports_images(&self) -> bool { fn supports_images(&self) -> bool {
false true
} }
fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool { fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
match choice { match choice {
LanguageModelToolChoice::Auto => true, LanguageModelToolChoice::Auto
LanguageModelToolChoice::Any => true, | LanguageModelToolChoice::Any
LanguageModelToolChoice::None => true, | LanguageModelToolChoice::None => true,
} }
} }
@ -398,7 +398,7 @@ pub fn count_vercel_tokens(
} }
// Map Vercel models to appropriate OpenAI models for token counting // Map Vercel models to appropriate OpenAI models for token counting
// since Vercel uses OpenAI-compatible API // since Vercel uses OpenAI-compatible API
Model::VZero => { Model::VZeroOnePointFiveMedium => {
// Vercel v0 is similar to GPT-4o, so use gpt-4o for token counting // Vercel v0 is similar to GPT-4o, so use gpt-4o for token counting
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages)
} }

View file

@ -7,10 +7,9 @@ pub const VERCEL_API_URL: &str = "https://api.v0.dev/v1";
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)]
pub enum Model { pub enum Model {
#[serde(rename = "v-0")]
#[default] #[default]
VZero, #[serde(rename = "v0-1.5-md")]
VZeroOnePointFiveMedium,
#[serde(rename = "custom")] #[serde(rename = "custom")]
Custom { Custom {
name: String, name: String,
@ -24,26 +23,26 @@ pub enum Model {
impl Model { impl Model {
pub fn default_fast() -> Self { pub fn default_fast() -> Self {
Self::VZero Self::VZeroOnePointFiveMedium
} }
pub fn from_id(id: &str) -> Result<Self> { pub fn from_id(id: &str) -> Result<Self> {
match id { match id {
"v-0" => Ok(Self::VZero), "v0-1.5-md" => Ok(Self::VZeroOnePointFiveMedium),
invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"), invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"),
} }
} }
pub fn id(&self) -> &str { pub fn id(&self) -> &str {
match self { match self {
Self::VZero => "v-0", Self::VZeroOnePointFiveMedium => "v0-1.5-md",
Self::Custom { name, .. } => name, Self::Custom { name, .. } => name,
} }
} }
pub fn display_name(&self) -> &str { pub fn display_name(&self) -> &str {
match self { match self {
Self::VZero => "Vercel v0", Self::VZeroOnePointFiveMedium => "v0-1.5-md",
Self::Custom { Self::Custom {
name, display_name, .. name, display_name, ..
} => display_name.as_ref().unwrap_or(name), } => display_name.as_ref().unwrap_or(name),
@ -52,26 +51,23 @@ impl Model {
pub fn max_token_count(&self) -> u64 { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::VZero => 128_000, Self::VZeroOnePointFiveMedium => 128_000,
Self::Custom { max_tokens, .. } => *max_tokens, Self::Custom { max_tokens, .. } => *max_tokens,
} }
} }
pub fn max_output_tokens(&self) -> Option<u64> { pub fn max_output_tokens(&self) -> Option<u64> {
match self { match self {
Self::VZeroOnePointFiveMedium => Some(32_000),
Self::Custom { Self::Custom {
max_output_tokens, .. max_output_tokens, ..
} => *max_output_tokens, } => *max_output_tokens,
Self::VZero => Some(32_768),
} }
} }
/// Returns whether the given model supports the `parallel_tool_calls` parameter.
///
/// If the model does not support the parameter, do not pass it up, or the API will return an error.
pub fn supports_parallel_tool_calls(&self) -> bool { pub fn supports_parallel_tool_calls(&self) -> bool {
match self { match self {
Self::VZero => true, Self::VZeroOnePointFiveMedium => true,
Model::Custom { .. } => false, Model::Custom { .. } => false,
} }
} }