Standardize on u64 for token counts (#32869)

Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens:
usize, max_output_tokens: Option<u32>` in the same `struct`.

Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`,
token counts should be consistent across targets (e.g. the same model
doesn't suddenly get a smaller context window if you're compiling for
wasm32), and these token counts could end up getting serialized using a
binary protocol, so `usize` is not the right choice for token counts.

I chose to standardize on `u64` over `u32` because we don't store many
of them (so the extra size should be insignificant) and future models
may exceed `u32::MAX` tokens.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-06-17 10:43:07 -04:00 committed by GitHub
parent a391d67366
commit 5405c2c2d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 191 additions and 192 deletions

View file

@ -750,7 +750,7 @@ struct EditingMessageState {
editor: Entity<Editor>, editor: Entity<Editor>,
context_strip: Entity<ContextStrip>, context_strip: Entity<ContextStrip>,
context_picker_menu_handle: PopoverMenuHandle<ContextPicker>, context_picker_menu_handle: PopoverMenuHandle<ContextPicker>,
last_estimated_token_count: Option<usize>, last_estimated_token_count: Option<u64>,
_subscriptions: [Subscription; 2], _subscriptions: [Subscription; 2],
_update_token_count_task: Option<Task<()>>, _update_token_count_task: Option<Task<()>>,
} }
@ -857,7 +857,7 @@ impl ActiveThread {
} }
/// Returns the editing message id and the estimated token count in the content /// Returns the editing message id and the estimated token count in the content
pub fn editing_message_id(&self) -> Option<(MessageId, usize)> { pub fn editing_message_id(&self) -> Option<(MessageId, u64)> {
self.editing_message self.editing_message
.as_ref() .as_ref()
.map(|(id, state)| (*id, state.last_estimated_token_count.unwrap_or(0))) .map(|(id, state)| (*id, state.last_estimated_token_count.unwrap_or(0)))

View file

@ -76,7 +76,7 @@ pub struct MessageEditor {
profile_selector: Entity<ProfileSelector>, profile_selector: Entity<ProfileSelector>,
edits_expanded: bool, edits_expanded: bool,
editor_is_expanded: bool, editor_is_expanded: bool,
last_estimated_token_count: Option<usize>, last_estimated_token_count: Option<u64>,
update_token_count_task: Option<Task<()>>, update_token_count_task: Option<Task<()>>,
_subscriptions: Vec<Subscription>, _subscriptions: Vec<Subscription>,
} }
@ -1335,7 +1335,7 @@ impl MessageEditor {
) )
} }
pub fn last_estimated_token_count(&self) -> Option<usize> { pub fn last_estimated_token_count(&self) -> Option<u64> {
self.last_estimated_token_count self.last_estimated_token_count
} }

View file

@ -272,8 +272,8 @@ impl DetailedSummaryState {
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct TotalTokenUsage { pub struct TotalTokenUsage {
pub total: usize, pub total: u64,
pub max: usize, pub max: u64,
} }
impl TotalTokenUsage { impl TotalTokenUsage {
@ -299,7 +299,7 @@ impl TotalTokenUsage {
} }
} }
pub fn add(&self, tokens: usize) -> TotalTokenUsage { pub fn add(&self, tokens: u64) -> TotalTokenUsage {
TotalTokenUsage { TotalTokenUsage {
total: self.total + tokens, total: self.total + tokens,
max: self.max, max: self.max,
@ -396,7 +396,7 @@ pub struct ExceededWindowError {
/// Model used when last message exceeded context window /// Model used when last message exceeded context window
model_id: LanguageModelId, model_id: LanguageModelId,
/// Token count including last message /// Token count including last message
token_count: usize, token_count: u64,
} }
impl Thread { impl Thread {
@ -2769,7 +2769,7 @@ impl Thread {
.unwrap_or_default(); .unwrap_or_default();
TotalTokenUsage { TotalTokenUsage {
total: token_usage.total_tokens() as usize, total: token_usage.total_tokens(),
max, max,
} }
} }
@ -2791,7 +2791,7 @@ impl Thread {
let total = self let total = self
.token_usage_at_last_message() .token_usage_at_last_message()
.unwrap_or_default() .unwrap_or_default()
.total_tokens() as usize; .total_tokens();
Some(TotalTokenUsage { total, max }) Some(TotalTokenUsage { total, max })
} }

View file

@ -427,7 +427,7 @@ impl ToolUseState {
// Protect from overly large output // Protect from overly large output
let tool_output_limit = configured_model let tool_output_limit = configured_model
.map(|model| model.model.max_token_count() * BYTES_PER_TOKEN_ESTIMATE) .map(|model| model.model.max_token_count() as usize * BYTES_PER_TOKEN_ESTIMATE)
.unwrap_or(usize::MAX); .unwrap_or(usize::MAX);
let content = match tool_result { let content = match tool_result {

View file

@ -15,7 +15,7 @@ pub const ANTHROPIC_API_URL: &str = "https://api.anthropic.com";
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct AnthropicModelCacheConfiguration { pub struct AnthropicModelCacheConfiguration {
pub min_total_token: usize, pub min_total_token: u64,
pub should_speculate: bool, pub should_speculate: bool,
pub max_cache_anchors: usize, pub max_cache_anchors: usize,
} }
@ -68,14 +68,14 @@ pub enum Model {
#[serde(rename = "custom")] #[serde(rename = "custom")]
Custom { Custom {
name: String, name: String,
max_tokens: usize, max_tokens: u64,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
/// Override this model with a different Anthropic model for tool calls. /// Override this model with a different Anthropic model for tool calls.
tool_override: Option<String>, tool_override: Option<String>,
/// Indicates whether this custom model supports caching. /// Indicates whether this custom model supports caching.
cache_configuration: Option<AnthropicModelCacheConfiguration>, cache_configuration: Option<AnthropicModelCacheConfiguration>,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
default_temperature: Option<f32>, default_temperature: Option<f32>,
#[serde(default)] #[serde(default)]
extra_beta_headers: Vec<String>, extra_beta_headers: Vec<String>,
@ -211,7 +211,7 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::ClaudeOpus4 Self::ClaudeOpus4
| Self::ClaudeOpus4Thinking | Self::ClaudeOpus4Thinking
@ -228,7 +228,7 @@ impl Model {
} }
} }
pub fn max_output_tokens(&self) -> u32 { pub fn max_output_tokens(&self) -> u64 {
match self { match self {
Self::ClaudeOpus4 Self::ClaudeOpus4
| Self::ClaudeOpus4Thinking | Self::ClaudeOpus4Thinking
@ -693,7 +693,7 @@ pub enum StringOrContents {
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct Request { pub struct Request {
pub model: String, pub model: String,
pub max_tokens: u32, pub max_tokens: u64,
pub messages: Vec<Message>, pub messages: Vec<Message>,
#[serde(default, skip_serializing_if = "Vec::is_empty")] #[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tools: Vec<Tool>, pub tools: Vec<Tool>,
@ -730,13 +730,13 @@ pub struct Metadata {
#[derive(Debug, Serialize, Deserialize, Default)] #[derive(Debug, Serialize, Deserialize, Default)]
pub struct Usage { pub struct Usage {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub input_tokens: Option<u32>, pub input_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub output_tokens: Option<u32>, pub output_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub cache_creation_input_tokens: Option<u32>, pub cache_creation_input_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub cache_read_input_tokens: Option<u32>, pub cache_read_input_tokens: Option<u64>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -846,7 +846,7 @@ impl ApiError {
matches!(self.error_type.as_str(), "rate_limit_error") matches!(self.error_type.as_str(), "rate_limit_error")
} }
pub fn match_window_exceeded(&self) -> Option<usize> { pub fn match_window_exceeded(&self) -> Option<u64> {
let Some(ApiErrorCode::InvalidRequestError) = self.code() else { let Some(ApiErrorCode::InvalidRequestError) = self.code() else {
return None; return None;
}; };
@ -855,12 +855,12 @@ impl ApiError {
} }
} }
pub fn parse_prompt_too_long(message: &str) -> Option<usize> { pub fn parse_prompt_too_long(message: &str) -> Option<u64> {
message message
.strip_prefix("prompt is too long: ")? .strip_prefix("prompt is too long: ")?
.split_once(" tokens")? .split_once(" tokens")?
.0 .0
.parse::<usize>() .parse()
.ok() .ok()
} }

View file

@ -678,7 +678,7 @@ pub struct AssistantContext {
summary_task: Task<Option<()>>, summary_task: Task<Option<()>>,
completion_count: usize, completion_count: usize,
pending_completions: Vec<PendingCompletion>, pending_completions: Vec<PendingCompletion>,
token_count: Option<usize>, token_count: Option<u64>,
pending_token_count: Task<Option<()>>, pending_token_count: Task<Option<()>>,
pending_save: Task<Result<()>>, pending_save: Task<Result<()>>,
pending_cache_warming_task: Task<Option<()>>, pending_cache_warming_task: Task<Option<()>>,
@ -1250,7 +1250,7 @@ impl AssistantContext {
} }
} }
pub fn token_count(&self) -> Option<usize> { pub fn token_count(&self) -> Option<u64> {
self.token_count self.token_count
} }

View file

@ -3121,12 +3121,12 @@ fn invoked_slash_command_fold_placeholder(
enum TokenState { enum TokenState {
NoTokensLeft { NoTokensLeft {
max_token_count: usize, max_token_count: u64,
token_count: usize, token_count: u64,
}, },
HasMoreTokens { HasMoreTokens {
max_token_count: usize, max_token_count: u64,
token_count: usize, token_count: u64,
over_warn_threshold: bool, over_warn_threshold: bool,
}, },
} }
@ -3139,9 +3139,7 @@ fn token_state(context: &Entity<AssistantContext>, cx: &App) -> Option<TokenStat
.model; .model;
let token_count = context.read(cx).token_count()?; let token_count = context.read(cx).token_count()?;
let max_token_count = model.max_token_count(); let max_token_count = model.max_token_count();
let token_state = if max_token_count.saturating_sub(token_count) == 0 {
let remaining_tokens = max_token_count as isize - token_count as isize;
let token_state = if remaining_tokens <= 0 {
TokenState::NoTokensLeft { TokenState::NoTokensLeft {
max_token_count, max_token_count,
token_count, token_count,
@ -3182,7 +3180,7 @@ fn size_for_image(data: &RenderImage, max_size: Size<Pixels>) -> Size<Pixels> {
} }
} }
pub fn humanize_token_count(count: usize) -> String { pub fn humanize_token_count(count: u64) -> String {
match count { match count {
0..=999 => count.to_string(), 0..=999 => count.to_string(),
1000..=9999 => { 1000..=9999 => {

View file

@ -664,7 +664,7 @@ mod tests {
format!("{}/{}", self.provider_id.0, self.name.0) format!("{}/{}", self.provider_id.0, self.name.0)
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
1000 1000
} }
@ -672,7 +672,7 @@ mod tests {
&self, &self,
_: LanguageModelRequest, _: LanguageModelRequest,
_: &App, _: &App,
) -> BoxFuture<'static, http_client::Result<usize>> { ) -> BoxFuture<'static, http_client::Result<u64>> {
unimplemented!() unimplemented!()
} }

View file

@ -152,7 +152,7 @@ pub enum Thinking {
#[derive(Debug)] #[derive(Debug)]
pub struct Request { pub struct Request {
pub model: String, pub model: String,
pub max_tokens: u32, pub max_tokens: u64,
pub messages: Vec<BedrockMessage>, pub messages: Vec<BedrockMessage>,
pub tools: Option<BedrockToolConfig>, pub tools: Option<BedrockToolConfig>,
pub thinking: Option<Thinking>, pub thinking: Option<Thinking>,

View file

@ -99,10 +99,10 @@ pub enum Model {
#[serde(rename = "custom")] #[serde(rename = "custom")]
Custom { Custom {
name: String, name: String,
max_tokens: usize, max_tokens: u64,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
default_temperature: Option<f32>, default_temperature: Option<f32>,
}, },
} }
@ -309,7 +309,7 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::Claude3_5SonnetV2 Self::Claude3_5SonnetV2
| Self::Claude3Opus | Self::Claude3Opus
@ -328,7 +328,7 @@ impl Model {
} }
} }
pub fn max_output_tokens(&self) -> u32 { pub fn max_output_tokens(&self) -> u64 {
match self { match self {
Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096, Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096,
Self::Claude3_7Sonnet Self::Claude3_7Sonnet

View file

@ -126,7 +126,7 @@ struct ModelLimits {
#[serde(default)] #[serde(default)]
max_output_tokens: usize, max_output_tokens: usize,
#[serde(default)] #[serde(default)]
max_prompt_tokens: usize, max_prompt_tokens: u64,
} }
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)] #[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
@ -182,7 +182,7 @@ impl Model {
self.name.as_str() self.name.as_str()
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
self.capabilities.limits.max_prompt_tokens self.capabilities.limits.max_prompt_tokens
} }
@ -316,15 +316,15 @@ pub struct ResponseEvent {
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct Usage { pub struct Usage {
pub completion_tokens: u32, pub completion_tokens: u64,
pub prompt_tokens: u32, pub prompt_tokens: u64,
pub prompt_tokens_details: PromptTokensDetails, pub prompt_tokens_details: PromptTokensDetails,
pub total_tokens: u32, pub total_tokens: u64,
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
pub struct PromptTokensDetails { pub struct PromptTokensDetails {
pub cached_tokens: u32, pub cached_tokens: u64,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]

View file

@ -58,8 +58,8 @@ pub enum Model {
name: String, name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
max_tokens: usize, max_tokens: u64,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
}, },
} }
@ -94,14 +94,14 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::Chat | Self::Reasoner => 64_000, Self::Chat | Self::Reasoner => 64_000,
Self::Custom { max_tokens, .. } => *max_tokens, Self::Custom { max_tokens, .. } => *max_tokens,
} }
} }
pub fn max_output_tokens(&self) -> Option<u32> { pub fn max_output_tokens(&self) -> Option<u64> {
match self { match self {
Self::Chat => Some(8_192), Self::Chat => Some(8_192),
Self::Reasoner => Some(8_192), Self::Reasoner => Some(8_192),
@ -118,7 +118,7 @@ pub struct Request {
pub messages: Vec<RequestMessage>, pub messages: Vec<RequestMessage>,
pub stream: bool, pub stream: bool,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u32>, pub max_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>, pub temperature: Option<f32>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]

View file

@ -276,17 +276,17 @@ pub struct PromptFeedback {
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct UsageMetadata { pub struct UsageMetadata {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub prompt_token_count: Option<usize>, pub prompt_token_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub cached_content_token_count: Option<usize>, pub cached_content_token_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub candidates_token_count: Option<usize>, pub candidates_token_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub tool_use_prompt_token_count: Option<usize>, pub tool_use_prompt_token_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub thoughts_token_count: Option<usize>, pub thoughts_token_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub total_token_count: Option<usize>, pub total_token_count: Option<u64>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -395,7 +395,7 @@ pub struct CountTokensRequest {
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct CountTokensResponse { pub struct CountTokensResponse {
pub total_tokens: usize, pub total_tokens: u64,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -523,7 +523,7 @@ pub enum Model {
name: String, name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
max_tokens: usize, max_tokens: u64,
#[serde(default)] #[serde(default)]
mode: GoogleModelMode, mode: GoogleModelMode,
}, },
@ -586,9 +586,9 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
const ONE_MILLION: usize = 1_048_576; const ONE_MILLION: u64 = 1_048_576;
const TWO_MILLION: usize = 2_097_152; const TWO_MILLION: u64 = 2_097_152;
match self { match self {
Model::Gemini15Pro => TWO_MILLION, Model::Gemini15Pro => TWO_MILLION,
Model::Gemini15Flash => ONE_MILLION, Model::Gemini15Flash => ONE_MILLION,

View file

@ -169,11 +169,11 @@ impl LanguageModel for FakeLanguageModel {
"fake".to_string() "fake".to_string()
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
1000000 1000000
} }
fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<usize>> { fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<u64>> {
futures::future::ready(Ok(0)).boxed() futures::future::ready(Ok(0)).boxed()
} }

View file

@ -53,7 +53,7 @@ pub fn init_settings(cx: &mut App) {
pub struct LanguageModelCacheConfiguration { pub struct LanguageModelCacheConfiguration {
pub max_cache_anchors: usize, pub max_cache_anchors: usize,
pub should_speculate: bool, pub should_speculate: bool,
pub min_total_token: usize, pub min_total_token: u64,
} }
/// A completion event from a language model. /// A completion event from a language model.
@ -135,17 +135,17 @@ impl RequestUsage {
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)] #[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
pub struct TokenUsage { pub struct TokenUsage {
#[serde(default, skip_serializing_if = "is_default")] #[serde(default, skip_serializing_if = "is_default")]
pub input_tokens: u32, pub input_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")] #[serde(default, skip_serializing_if = "is_default")]
pub output_tokens: u32, pub output_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")] #[serde(default, skip_serializing_if = "is_default")]
pub cache_creation_input_tokens: u32, pub cache_creation_input_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")] #[serde(default, skip_serializing_if = "is_default")]
pub cache_read_input_tokens: u32, pub cache_read_input_tokens: u64,
} }
impl TokenUsage { impl TokenUsage {
pub fn total_tokens(&self) -> u32 { pub fn total_tokens(&self) -> u64 {
self.input_tokens self.input_tokens
+ self.output_tokens + self.output_tokens
+ self.cache_read_input_tokens + self.cache_read_input_tokens
@ -254,8 +254,8 @@ pub trait LanguageModel: Send + Sync {
LanguageModelToolSchemaFormat::JsonSchema LanguageModelToolSchemaFormat::JsonSchema
} }
fn max_token_count(&self) -> usize; fn max_token_count(&self) -> u64;
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
None None
} }
@ -263,7 +263,7 @@ pub trait LanguageModel: Send + Sync {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>>; ) -> BoxFuture<'static, Result<u64>>;
fn stream_completion( fn stream_completion(
&self, &self,
@ -349,7 +349,7 @@ pub trait LanguageModel: Send + Sync {
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum LanguageModelKnownError { pub enum LanguageModelKnownError {
#[error("Context window limit exceeded ({tokens})")] #[error("Context window limit exceeded ({tokens})")]
ContextWindowLimitExceeded { tokens: usize }, ContextWindowLimitExceeded { tokens: u64 },
} }
pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema { pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {

View file

@ -51,12 +51,12 @@ pub struct AvailableModel {
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel. /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
pub display_name: Option<String>, pub display_name: Option<String>,
/// The model's context window size. /// The model's context window size.
pub max_tokens: usize, pub max_tokens: u64,
/// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling. /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
pub tool_override: Option<String>, pub tool_override: Option<String>,
/// Configuration of Anthropic's caching API. /// Configuration of Anthropic's caching API.
pub cache_configuration: Option<LanguageModelCacheConfiguration>, pub cache_configuration: Option<LanguageModelCacheConfiguration>,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
pub default_temperature: Option<f32>, pub default_temperature: Option<f32>,
#[serde(default)] #[serde(default)]
pub extra_beta_headers: Vec<String>, pub extra_beta_headers: Vec<String>,
@ -321,7 +321,7 @@ pub struct AnthropicModel {
pub fn count_anthropic_tokens( pub fn count_anthropic_tokens(
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_spawn(async move { cx.background_spawn(async move {
let messages = request.messages; let messages = request.messages;
let mut tokens_from_images = 0; let mut tokens_from_images = 0;
@ -377,7 +377,7 @@ pub fn count_anthropic_tokens(
// Tiktoken doesn't yet support these models, so we manually use the // Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4. // same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
.map(|tokens| tokens + tokens_from_images) .map(|tokens| (tokens + tokens_from_images) as u64)
}) })
.boxed() .boxed()
} }
@ -461,11 +461,11 @@ impl LanguageModel for AnthropicModel {
self.state.read(cx).api_key.clone() self.state.read(cx).api_key.clone()
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
Some(self.model.max_output_tokens()) Some(self.model.max_output_tokens())
} }
@ -473,7 +473,7 @@ impl LanguageModel for AnthropicModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
count_anthropic_tokens(request, cx) count_anthropic_tokens(request, cx)
} }
@ -518,7 +518,7 @@ pub fn into_anthropic(
request: LanguageModelRequest, request: LanguageModelRequest,
model: String, model: String,
default_temperature: f32, default_temperature: f32,
max_output_tokens: u32, max_output_tokens: u64,
mode: AnthropicModelMode, mode: AnthropicModelMode,
) -> anthropic::Request { ) -> anthropic::Request {
let mut new_messages: Vec<anthropic::Message> = Vec::new(); let mut new_messages: Vec<anthropic::Message> = Vec::new();

View file

@ -88,9 +88,9 @@ pub enum BedrockAuthMethod {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub cache_configuration: Option<LanguageModelCacheConfiguration>, pub cache_configuration: Option<LanguageModelCacheConfiguration>,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
pub default_temperature: Option<f32>, pub default_temperature: Option<f32>,
pub mode: Option<ModelMode>, pub mode: Option<ModelMode>,
} }
@ -503,11 +503,11 @@ impl LanguageModel for BedrockModel {
format!("bedrock/{}", self.model.id()) format!("bedrock/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
Some(self.model.max_output_tokens()) Some(self.model.max_output_tokens())
} }
@ -515,7 +515,7 @@ impl LanguageModel for BedrockModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
get_bedrock_tokens(request, cx) get_bedrock_tokens(request, cx)
} }
@ -583,7 +583,7 @@ pub fn into_bedrock(
request: LanguageModelRequest, request: LanguageModelRequest,
model: String, model: String,
default_temperature: f32, default_temperature: f32,
max_output_tokens: u32, max_output_tokens: u64,
mode: BedrockModelMode, mode: BedrockModelMode,
) -> Result<bedrock::Request> { ) -> Result<bedrock::Request> {
let mut new_messages: Vec<BedrockMessage> = Vec::new(); let mut new_messages: Vec<BedrockMessage> = Vec::new();
@ -747,7 +747,7 @@ pub fn into_bedrock(
pub fn get_bedrock_tokens( pub fn get_bedrock_tokens(
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_executor() cx.background_executor()
.spawn(async move { .spawn(async move {
let messages = request.messages; let messages = request.messages;
@ -799,7 +799,7 @@ pub fn get_bedrock_tokens(
// Tiktoken doesn't yet support these models, so we manually use the // Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4. // same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
.map(|tokens| tokens + tokens_from_images) .map(|tokens| (tokens + tokens_from_images) as u64)
}) })
.boxed() .boxed()
} }
@ -947,9 +947,9 @@ pub fn map_to_language_model_completion_events(
let completion_event = let completion_event =
LanguageModelCompletionEvent::UsageUpdate( LanguageModelCompletionEvent::UsageUpdate(
TokenUsage { TokenUsage {
input_tokens: metadata.input_tokens as u32, input_tokens: metadata.input_tokens as u64,
output_tokens: metadata.output_tokens output_tokens: metadata.output_tokens
as u32, as u64,
cache_creation_input_tokens: default(), cache_creation_input_tokens: default(),
cache_read_input_tokens: default(), cache_read_input_tokens: default(),
}, },

View file

@ -73,9 +73,9 @@ pub struct AvailableModel {
/// The size of the context window, indicating the maximum number of tokens the model can process. /// The size of the context window, indicating the maximum number of tokens the model can process.
pub max_tokens: usize, pub max_tokens: usize,
/// The maximum number of output tokens allowed by the model. /// The maximum number of output tokens allowed by the model.
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
/// The maximum number of completion tokens allowed by the model (o1-* only) /// The maximum number of completion tokens allowed by the model (o1-* only)
pub max_completion_tokens: Option<u32>, pub max_completion_tokens: Option<u64>,
/// Override this model with a different Anthropic model for tool calls. /// Override this model with a different Anthropic model for tool calls.
pub tool_override: Option<String>, pub tool_override: Option<String>,
/// Indicates whether this custom model supports caching. /// Indicates whether this custom model supports caching.
@ -715,8 +715,8 @@ impl LanguageModel for CloudLanguageModel {
} }
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count self.model.max_token_count as u64
} }
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> { fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
@ -737,7 +737,7 @@ impl LanguageModel for CloudLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
match self.model.provider { match self.model.provider {
zed_llm_client::LanguageModelProvider::Anthropic => count_anthropic_tokens(request, cx), zed_llm_client::LanguageModelProvider::Anthropic => count_anthropic_tokens(request, cx),
zed_llm_client::LanguageModelProvider::OpenAi => { zed_llm_client::LanguageModelProvider::OpenAi => {
@ -786,7 +786,7 @@ impl LanguageModel for CloudLanguageModel {
let response_body: CountTokensResponse = let response_body: CountTokensResponse =
serde_json::from_str(&response_body)?; serde_json::from_str(&response_body)?;
Ok(response_body.tokens) Ok(response_body.tokens as u64)
} else { } else {
Err(anyhow!(ApiError { Err(anyhow!(ApiError {
status, status,
@ -821,7 +821,7 @@ impl LanguageModel for CloudLanguageModel {
request, request,
self.model.id.to_string(), self.model.id.to_string(),
1.0, 1.0,
self.model.max_output_tokens as u32, self.model.max_output_tokens as u64,
if self.model.id.0.ends_with("-thinking") { if self.model.id.0.ends_with("-thinking") {
AnthropicModelMode::Thinking { AnthropicModelMode::Thinking {
budget_tokens: Some(4_096), budget_tokens: Some(4_096),

View file

@ -237,7 +237,7 @@ impl LanguageModel for CopilotChatLanguageModel {
format!("copilot_chat/{}", self.model.id()) format!("copilot_chat/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
@ -245,7 +245,7 @@ impl LanguageModel for CopilotChatLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
match self.model.vendor() { match self.model.vendor() {
ModelVendor::Anthropic => count_anthropic_tokens(request, cx), ModelVendor::Anthropic => count_anthropic_tokens(request, cx),
ModelVendor::Google => count_google_tokens(request, cx), ModelVendor::Google => count_google_tokens(request, cx),

View file

@ -49,8 +49,8 @@ pub struct DeepSeekSettings {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
} }
pub struct DeepSeekLanguageModelProvider { pub struct DeepSeekLanguageModelProvider {
@ -306,11 +306,11 @@ impl LanguageModel for DeepSeekLanguageModel {
format!("deepseek/{}", self.model.id()) format!("deepseek/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
self.model.max_output_tokens() self.model.max_output_tokens()
} }
@ -318,7 +318,7 @@ impl LanguageModel for DeepSeekLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_spawn(async move { cx.background_spawn(async move {
let messages = request let messages = request
.messages .messages
@ -335,7 +335,7 @@ impl LanguageModel for DeepSeekLanguageModel {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
}) })
.boxed() .boxed()
} }
@ -365,7 +365,7 @@ impl LanguageModel for DeepSeekLanguageModel {
pub fn into_deepseek( pub fn into_deepseek(
request: LanguageModelRequest, request: LanguageModelRequest,
model: &deepseek::Model, model: &deepseek::Model,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
) -> deepseek::Request { ) -> deepseek::Request {
let is_reasoner = *model == deepseek::Model::Reasoner; let is_reasoner = *model == deepseek::Model::Reasoner;

View file

@ -79,7 +79,7 @@ impl From<GoogleModelMode> for ModelMode {
pub struct AvailableModel { pub struct AvailableModel {
name: String, name: String,
display_name: Option<String>, display_name: Option<String>,
max_tokens: usize, max_tokens: u64,
mode: Option<ModelMode>, mode: Option<ModelMode>,
} }
@ -365,7 +365,7 @@ impl LanguageModel for GoogleLanguageModel {
format!("google/{}", self.model.request_id()) format!("google/{}", self.model.request_id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
@ -373,7 +373,7 @@ impl LanguageModel for GoogleLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
let model_id = self.model.request_id().to_string(); let model_id = self.model.request_id().to_string();
let request = into_google(request, model_id.clone(), self.model.mode()); let request = into_google(request, model_id.clone(), self.model.mode());
let http_client = self.http_client.clone(); let http_client = self.http_client.clone();
@ -702,7 +702,7 @@ impl GoogleEventMapper {
pub fn count_google_tokens( pub fn count_google_tokens(
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
// We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly. // We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly.
// So we have to use tokenizer from tiktoken_rs to count tokens. // So we have to use tokenizer from tiktoken_rs to count tokens.
cx.background_spawn(async move { cx.background_spawn(async move {
@ -723,7 +723,7 @@ pub fn count_google_tokens(
// Tiktoken doesn't yet support these models, so we manually use the // Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4. // same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
}) })
.boxed() .boxed()
} }
@ -750,10 +750,10 @@ fn update_usage(usage: &mut UsageMetadata, new: &UsageMetadata) {
} }
fn convert_usage(usage: &UsageMetadata) -> language_model::TokenUsage { fn convert_usage(usage: &UsageMetadata) -> language_model::TokenUsage {
let prompt_tokens = usage.prompt_token_count.unwrap_or(0) as u32; let prompt_tokens = usage.prompt_token_count.unwrap_or(0);
let cached_tokens = usage.cached_content_token_count.unwrap_or(0) as u32; let cached_tokens = usage.cached_content_token_count.unwrap_or(0);
let input_tokens = prompt_tokens - cached_tokens; let input_tokens = prompt_tokens - cached_tokens;
let output_tokens = usage.candidates_token_count.unwrap_or(0) as u32; let output_tokens = usage.candidates_token_count.unwrap_or(0);
language_model::TokenUsage { language_model::TokenUsage {
input_tokens, input_tokens,

View file

@ -44,7 +44,7 @@ pub struct LmStudioSettings {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub supports_tool_calls: bool, pub supports_tool_calls: bool,
pub supports_images: bool, pub supports_images: bool,
} }
@ -414,7 +414,7 @@ impl LanguageModel for LmStudioLanguageModel {
format!("lmstudio/{}", self.model.id()) format!("lmstudio/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
@ -422,7 +422,7 @@ impl LanguageModel for LmStudioLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
_cx: &App, _cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
// Endpoint for this is coming soon. In the meantime, hacky estimation // Endpoint for this is coming soon. In the meantime, hacky estimation
let token_count = request let token_count = request
.messages .messages
@ -430,7 +430,7 @@ impl LanguageModel for LmStudioLanguageModel {
.map(|msg| msg.string_contents().split_whitespace().count()) .map(|msg| msg.string_contents().split_whitespace().count())
.sum::<usize>(); .sum::<usize>();
let estimated_tokens = (token_count as f64 * 0.75) as usize; let estimated_tokens = (token_count as f64 * 0.75) as u64;
async move { Ok(estimated_tokens) }.boxed() async move { Ok(estimated_tokens) }.boxed()
} }

View file

@ -43,9 +43,9 @@ pub struct MistralSettings {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
pub max_completion_tokens: Option<u32>, pub max_completion_tokens: Option<u64>,
pub supports_tools: Option<bool>, pub supports_tools: Option<bool>,
pub supports_images: Option<bool>, pub supports_images: Option<bool>,
} }
@ -322,11 +322,11 @@ impl LanguageModel for MistralLanguageModel {
format!("mistral/{}", self.model.id()) format!("mistral/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
self.model.max_output_tokens() self.model.max_output_tokens()
} }
@ -334,7 +334,7 @@ impl LanguageModel for MistralLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_spawn(async move { cx.background_spawn(async move {
let messages = request let messages = request
.messages .messages
@ -351,7 +351,7 @@ impl LanguageModel for MistralLanguageModel {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
}) })
.boxed() .boxed()
} }
@ -386,7 +386,7 @@ impl LanguageModel for MistralLanguageModel {
pub fn into_mistral( pub fn into_mistral(
request: LanguageModelRequest, request: LanguageModelRequest,
model: String, model: String,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
) -> mistral::Request { ) -> mistral::Request {
let stream = true; let stream = true;

View file

@ -46,7 +46,7 @@ pub struct AvailableModel {
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel. /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
pub display_name: Option<String>, pub display_name: Option<String>,
/// The Context Length parameter to the model (aka num_ctx or n_ctx) /// The Context Length parameter to the model (aka num_ctx or n_ctx)
pub max_tokens: usize, pub max_tokens: u64,
/// The number of seconds to keep the connection open after the last request /// The number of seconds to keep the connection open after the last request
pub keep_alive: Option<KeepAlive>, pub keep_alive: Option<KeepAlive>,
/// Whether the model supports tools /// Whether the model supports tools
@ -377,7 +377,7 @@ impl LanguageModel for OllamaLanguageModel {
format!("ollama/{}", self.model.id()) format!("ollama/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
@ -385,7 +385,7 @@ impl LanguageModel for OllamaLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
_cx: &App, _cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
// There is no endpoint for this _yet_ in Ollama // There is no endpoint for this _yet_ in Ollama
// see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582 // see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
let token_count = request let token_count = request
@ -395,7 +395,7 @@ impl LanguageModel for OllamaLanguageModel {
.sum::<usize>() .sum::<usize>()
/ 4; / 4;
async move { Ok(token_count) }.boxed() async move { Ok(token_count as u64) }.boxed()
} }
fn stream_completion( fn stream_completion(

View file

@ -43,9 +43,9 @@ pub struct OpenAiSettings {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
pub max_completion_tokens: Option<u32>, pub max_completion_tokens: Option<u64>,
} }
pub struct OpenAiLanguageModelProvider { pub struct OpenAiLanguageModelProvider {
@ -312,11 +312,11 @@ impl LanguageModel for OpenAiLanguageModel {
format!("openai/{}", self.model.id()) format!("openai/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
self.model.max_output_tokens() self.model.max_output_tokens()
} }
@ -324,7 +324,7 @@ impl LanguageModel for OpenAiLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
count_open_ai_tokens(request, self.model.clone(), cx) count_open_ai_tokens(request, self.model.clone(), cx)
} }
@ -355,7 +355,7 @@ impl LanguageModel for OpenAiLanguageModel {
pub fn into_open_ai( pub fn into_open_ai(
request: LanguageModelRequest, request: LanguageModelRequest,
model: &Model, model: &Model,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
) -> open_ai::Request { ) -> open_ai::Request {
let stream = !model.id().starts_with("o1-"); let stream = !model.id().starts_with("o1-");
@ -606,7 +606,7 @@ pub fn count_open_ai_tokens(
request: LanguageModelRequest, request: LanguageModelRequest,
model: Model, model: Model,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_spawn(async move { cx.background_spawn(async move {
let messages = request let messages = request
.messages .messages
@ -652,6 +652,7 @@ pub fn count_open_ai_tokens(
| Model::O3Mini | Model::O3Mini
| Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), | Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
} }
.map(|tokens| tokens as u64)
}) })
.boxed() .boxed()
} }

View file

@ -40,9 +40,9 @@ pub struct OpenRouterSettings {
pub struct AvailableModel { pub struct AvailableModel {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub max_output_tokens: Option<u32>, pub max_output_tokens: Option<u64>,
pub max_completion_tokens: Option<u32>, pub max_completion_tokens: Option<u64>,
pub supports_tools: Option<bool>, pub supports_tools: Option<bool>,
pub supports_images: Option<bool>, pub supports_images: Option<bool>,
} }
@ -331,11 +331,11 @@ impl LanguageModel for OpenRouterLanguageModel {
format!("openrouter/{}", self.model.id()) format!("openrouter/{}", self.model.id())
} }
fn max_token_count(&self) -> usize { fn max_token_count(&self) -> u64 {
self.model.max_token_count() self.model.max_token_count()
} }
fn max_output_tokens(&self) -> Option<u32> { fn max_output_tokens(&self) -> Option<u64> {
self.model.max_output_tokens() self.model.max_output_tokens()
} }
@ -355,7 +355,7 @@ impl LanguageModel for OpenRouterLanguageModel {
&self, &self,
request: LanguageModelRequest, request: LanguageModelRequest,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
count_open_router_tokens(request, self.model.clone(), cx) count_open_router_tokens(request, self.model.clone(), cx)
} }
@ -386,7 +386,7 @@ impl LanguageModel for OpenRouterLanguageModel {
pub fn into_open_router( pub fn into_open_router(
request: LanguageModelRequest, request: LanguageModelRequest,
model: &Model, model: &Model,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
) -> open_router::Request { ) -> open_router::Request {
let mut messages = Vec::new(); let mut messages = Vec::new();
for message in request.messages { for message in request.messages {
@ -640,7 +640,7 @@ pub fn count_open_router_tokens(
request: LanguageModelRequest, request: LanguageModelRequest,
_model: open_router::Model, _model: open_router::Model,
cx: &App, cx: &App,
) -> BoxFuture<'static, Result<usize>> { ) -> BoxFuture<'static, Result<u64>> {
cx.background_spawn(async move { cx.background_spawn(async move {
let messages = request let messages = request
.messages .messages
@ -657,7 +657,7 @@ pub fn count_open_router_tokens(
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages) tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages).map(|tokens| tokens as u64)
}) })
.boxed() .boxed()
} }

View file

@ -46,7 +46,7 @@ impl From<Role> for String {
pub struct Model { pub struct Model {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub supports_tool_calls: bool, pub supports_tool_calls: bool,
pub supports_images: bool, pub supports_images: bool,
} }
@ -55,7 +55,7 @@ impl Model {
pub fn new( pub fn new(
name: &str, name: &str,
display_name: Option<&str>, display_name: Option<&str>,
max_tokens: Option<usize>, max_tokens: Option<u64>,
supports_tool_calls: bool, supports_tool_calls: bool,
supports_images: bool, supports_images: bool,
) -> Self { ) -> Self {
@ -76,7 +76,7 @@ impl Model {
self.display_name.as_ref().unwrap_or(&self.name) self.display_name.as_ref().unwrap_or(&self.name)
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
self.max_tokens self.max_tokens
} }
@ -256,9 +256,9 @@ pub struct FunctionChunk {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct Usage { pub struct Usage {
pub prompt_tokens: u32, pub prompt_tokens: u64,
pub completion_tokens: u32, pub completion_tokens: u64,
pub total_tokens: u32, pub total_tokens: u64,
} }
#[derive(Debug, Default, Clone, Deserialize, PartialEq)] #[derive(Debug, Default, Clone, Deserialize, PartialEq)]
@ -306,8 +306,8 @@ pub struct ModelEntry {
pub compatibility_type: CompatibilityType, pub compatibility_type: CompatibilityType,
pub quantization: Option<String>, pub quantization: Option<String>,
pub state: ModelState, pub state: ModelState,
pub max_context_length: Option<usize>, pub max_context_length: Option<u64>,
pub loaded_context_length: Option<usize>, pub loaded_context_length: Option<u64>,
#[serde(default)] #[serde(default)]
pub capabilities: Capabilities, pub capabilities: Capabilities,
} }

View file

@ -70,9 +70,9 @@ pub enum Model {
name: String, name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
max_tokens: usize, max_tokens: u64,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
max_completion_tokens: Option<u32>, max_completion_tokens: Option<u64>,
supports_tools: Option<bool>, supports_tools: Option<bool>,
supports_images: Option<bool>, supports_images: Option<bool>,
}, },
@ -130,7 +130,7 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::CodestralLatest => 256000, Self::CodestralLatest => 256000,
Self::MistralLargeLatest => 131000, Self::MistralLargeLatest => 131000,
@ -145,7 +145,7 @@ impl Model {
} }
} }
pub fn max_output_tokens(&self) -> Option<u32> { pub fn max_output_tokens(&self) -> Option<u64> {
match self { match self {
Self::Custom { Self::Custom {
max_output_tokens, .. max_output_tokens, ..
@ -193,7 +193,7 @@ pub struct Request {
pub messages: Vec<RequestMessage>, pub messages: Vec<RequestMessage>,
pub stream: bool, pub stream: bool,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u32>, pub max_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>, pub temperature: Option<f32>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
@ -360,9 +360,9 @@ pub struct Response {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct Usage { pub struct Usage {
pub prompt_tokens: u32, pub prompt_tokens: u64,
pub completion_tokens: u32, pub completion_tokens: u64,
pub total_tokens: u32, pub total_tokens: u64,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]

View file

@ -35,18 +35,18 @@ impl Default for KeepAlive {
pub struct Model { pub struct Model {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub keep_alive: Option<KeepAlive>, pub keep_alive: Option<KeepAlive>,
pub supports_tools: Option<bool>, pub supports_tools: Option<bool>,
pub supports_vision: Option<bool>, pub supports_vision: Option<bool>,
pub supports_thinking: Option<bool>, pub supports_thinking: Option<bool>,
} }
fn get_max_tokens(name: &str) -> usize { fn get_max_tokens(name: &str) -> u64 {
/// Default context length for unknown models. /// Default context length for unknown models.
const DEFAULT_TOKENS: usize = 4096; const DEFAULT_TOKENS: u64 = 4096;
/// Magic number. Lets many Ollama models work with ~16GB of ram. /// Magic number. Lets many Ollama models work with ~16GB of ram.
const MAXIMUM_TOKENS: usize = 16384; const MAXIMUM_TOKENS: u64 = 16384;
match name.split(':').next().unwrap() { match name.split(':').next().unwrap() {
"phi" | "tinyllama" | "granite-code" => 2048, "phi" | "tinyllama" | "granite-code" => 2048,
@ -67,7 +67,7 @@ impl Model {
pub fn new( pub fn new(
name: &str, name: &str,
display_name: Option<&str>, display_name: Option<&str>,
max_tokens: Option<usize>, max_tokens: Option<u64>,
supports_tools: Option<bool>, supports_tools: Option<bool>,
supports_vision: Option<bool>, supports_vision: Option<bool>,
supports_thinking: Option<bool>, supports_thinking: Option<bool>,
@ -93,7 +93,7 @@ impl Model {
self.display_name.as_ref().unwrap_or(&self.name) self.display_name.as_ref().unwrap_or(&self.name)
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
self.max_tokens self.max_tokens
} }
} }
@ -165,7 +165,7 @@ impl ChatRequest {
// https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values // https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
#[derive(Serialize, Default, Debug)] #[derive(Serialize, Default, Debug)]
pub struct ChatOptions { pub struct ChatOptions {
pub num_ctx: Option<usize>, pub num_ctx: Option<u64>,
pub num_predict: Option<isize>, pub num_predict: Option<isize>,
pub stop: Option<Vec<String>>, pub stop: Option<Vec<String>>,
pub temperature: Option<f32>, pub temperature: Option<f32>,
@ -183,8 +183,8 @@ pub struct ChatResponseDelta {
pub done_reason: Option<String>, pub done_reason: Option<String>,
#[allow(unused)] #[allow(unused)]
pub done: bool, pub done: bool,
pub prompt_eval_count: Option<u32>, pub prompt_eval_count: Option<u64>,
pub eval_count: Option<u32>, pub eval_count: Option<u64>,
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]

View file

@ -80,9 +80,9 @@ pub enum Model {
name: String, name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu. /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>, display_name: Option<String>,
max_tokens: usize, max_tokens: u64,
max_output_tokens: Option<u32>, max_output_tokens: Option<u64>,
max_completion_tokens: Option<u32>, max_completion_tokens: Option<u64>,
}, },
} }
@ -147,7 +147,7 @@ impl Model {
} }
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
match self { match self {
Self::ThreePointFiveTurbo => 16_385, Self::ThreePointFiveTurbo => 16_385,
Self::Four => 8_192, Self::Four => 8_192,
@ -165,7 +165,7 @@ impl Model {
} }
} }
pub fn max_output_tokens(&self) -> Option<u32> { pub fn max_output_tokens(&self) -> Option<u64> {
match self { match self {
Self::Custom { Self::Custom {
max_output_tokens, .. max_output_tokens, ..
@ -209,7 +209,7 @@ pub struct Request {
pub messages: Vec<RequestMessage>, pub messages: Vec<RequestMessage>,
pub stream: bool, pub stream: bool,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub max_completion_tokens: Option<u32>, pub max_completion_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")] #[serde(default, skip_serializing_if = "Vec::is_empty")]
pub stop: Vec<String>, pub stop: Vec<String>,
pub temperature: f32, pub temperature: f32,

View file

@ -50,7 +50,7 @@ impl From<Role> for String {
pub struct Model { pub struct Model {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub max_tokens: usize, pub max_tokens: u64,
pub supports_tools: Option<bool>, pub supports_tools: Option<bool>,
pub supports_images: Option<bool>, pub supports_images: Option<bool>,
} }
@ -73,7 +73,7 @@ impl Model {
pub fn new( pub fn new(
name: &str, name: &str,
display_name: Option<&str>, display_name: Option<&str>,
max_tokens: Option<usize>, max_tokens: Option<u64>,
supports_tools: Option<bool>, supports_tools: Option<bool>,
supports_images: Option<bool>, supports_images: Option<bool>,
) -> Self { ) -> Self {
@ -94,11 +94,11 @@ impl Model {
self.display_name.as_ref().unwrap_or(&self.name) self.display_name.as_ref().unwrap_or(&self.name)
} }
pub fn max_token_count(&self) -> usize { pub fn max_token_count(&self) -> u64 {
self.max_tokens self.max_tokens
} }
pub fn max_output_tokens(&self) -> Option<u32> { pub fn max_output_tokens(&self) -> Option<u64> {
None None
} }
@ -117,7 +117,7 @@ pub struct Request {
pub messages: Vec<RequestMessage>, pub messages: Vec<RequestMessage>,
pub stream: bool, pub stream: bool,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u32>, pub max_tokens: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")] #[serde(default, skip_serializing_if = "Vec::is_empty")]
pub stop: Vec<String>, pub stop: Vec<String>,
pub temperature: f32, pub temperature: f32,
@ -318,9 +318,9 @@ pub struct FunctionChunk {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct Usage { pub struct Usage {
pub prompt_tokens: u32, pub prompt_tokens: u64,
pub completion_tokens: u32, pub completion_tokens: u64,
pub total_tokens: u32, pub total_tokens: u64,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@ -369,7 +369,7 @@ pub struct ModelEntry {
pub created: usize, pub created: usize,
pub description: String, pub description: String,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub context_length: Option<usize>, pub context_length: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")] #[serde(default, skip_serializing_if = "Vec::is_empty")]
pub supported_parameters: Vec<String>, pub supported_parameters: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]

View file

@ -154,7 +154,7 @@ pub struct RulesLibrary {
struct RuleEditor { struct RuleEditor {
title_editor: Entity<Editor>, title_editor: Entity<Editor>,
body_editor: Entity<Editor>, body_editor: Entity<Editor>,
token_count: Option<usize>, token_count: Option<u64>,
pending_token_count: Task<Option<()>>, pending_token_count: Task<Option<()>>,
next_title_and_body_to_save: Option<(String, Rope)>, next_title_and_body_to_save: Option<(String, Rope)>,
pending_save: Option<Task<Option<()>>>, pending_save: Option<Task<Option<()>>>,