Standardize on u64 for token counts (#32869)

Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens:
usize, max_output_tokens: Option<u32>` in the same `struct`.

Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`,
token counts should be consistent across targets (e.g. the same model
doesn't suddenly get a smaller context window if you're compiling for
wasm32), and these token counts could end up getting serialized using a
binary protocol, so `usize` is not the right choice for token counts.

I chose to standardize on `u64` over `u32` because we don't store many
of them (so the extra size should be insignificant) and future models
may exceed `u32::MAX` tokens.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-06-17 10:43:07 -04:00 committed by GitHub
parent a391d67366
commit 5405c2c2d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 191 additions and 192 deletions

View file

@ -169,11 +169,11 @@ impl LanguageModel for FakeLanguageModel {
"fake".to_string()
}
fn max_token_count(&self) -> usize {
fn max_token_count(&self) -> u64 {
1000000
}
fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<usize>> {
fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<u64>> {
futures::future::ready(Ok(0)).boxed()
}

View file

@ -53,7 +53,7 @@ pub fn init_settings(cx: &mut App) {
pub struct LanguageModelCacheConfiguration {
pub max_cache_anchors: usize,
pub should_speculate: bool,
pub min_total_token: usize,
pub min_total_token: u64,
}
/// A completion event from a language model.
@ -135,17 +135,17 @@ impl RequestUsage {
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
pub struct TokenUsage {
#[serde(default, skip_serializing_if = "is_default")]
pub input_tokens: u32,
pub input_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")]
pub output_tokens: u32,
pub output_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")]
pub cache_creation_input_tokens: u32,
pub cache_creation_input_tokens: u64,
#[serde(default, skip_serializing_if = "is_default")]
pub cache_read_input_tokens: u32,
pub cache_read_input_tokens: u64,
}
impl TokenUsage {
pub fn total_tokens(&self) -> u32 {
pub fn total_tokens(&self) -> u64 {
self.input_tokens
+ self.output_tokens
+ self.cache_read_input_tokens
@ -254,8 +254,8 @@ pub trait LanguageModel: Send + Sync {
LanguageModelToolSchemaFormat::JsonSchema
}
fn max_token_count(&self) -> usize;
fn max_output_tokens(&self) -> Option<u32> {
fn max_token_count(&self) -> u64;
fn max_output_tokens(&self) -> Option<u64> {
None
}
@ -263,7 +263,7 @@ pub trait LanguageModel: Send + Sync {
&self,
request: LanguageModelRequest,
cx: &App,
) -> BoxFuture<'static, Result<usize>>;
) -> BoxFuture<'static, Result<u64>>;
fn stream_completion(
&self,
@ -349,7 +349,7 @@ pub trait LanguageModel: Send + Sync {
#[derive(Debug, Error)]
pub enum LanguageModelKnownError {
#[error("Context window limit exceeded ({tokens})")]
ContextWindowLimitExceeded { tokens: usize },
ContextWindowLimitExceeded { tokens: u64 },
}
pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {