Standardize on u64 for token counts (#32869)

Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens:
usize, max_output_tokens: Option<u32>` in the same `struct`.

Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`,
token counts should be consistent across targets (e.g. the same model
doesn't suddenly get a smaller context window if you're compiling for
wasm32), and these token counts could end up getting serialized using a
binary protocol, so `usize` is not the right choice for token counts.

I chose to standardize on `u64` over `u32` because we don't store many
of them (so the extra size should be insignificant) and future models
may exceed `u32::MAX` tokens.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-06-17 10:43:07 -04:00 committed by GitHub
parent a391d67366
commit 5405c2c2d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 191 additions and 192 deletions

View file

@ -272,8 +272,8 @@ impl DetailedSummaryState {
#[derive(Default, Debug)]
pub struct TotalTokenUsage {
pub total: usize,
pub max: usize,
pub total: u64,
pub max: u64,
}
impl TotalTokenUsage {
@ -299,7 +299,7 @@ impl TotalTokenUsage {
}
}
pub fn add(&self, tokens: usize) -> TotalTokenUsage {
pub fn add(&self, tokens: u64) -> TotalTokenUsage {
TotalTokenUsage {
total: self.total + tokens,
max: self.max,
@ -396,7 +396,7 @@ pub struct ExceededWindowError {
/// Model used when last message exceeded context window
model_id: LanguageModelId,
/// Token count including last message
token_count: usize,
token_count: u64,
}
impl Thread {
@ -2769,7 +2769,7 @@ impl Thread {
.unwrap_or_default();
TotalTokenUsage {
total: token_usage.total_tokens() as usize,
total: token_usage.total_tokens(),
max,
}
}
@ -2791,7 +2791,7 @@ impl Thread {
let total = self
.token_usage_at_last_message()
.unwrap_or_default()
.total_tokens() as usize;
.total_tokens();
Some(TotalTokenUsage { total, max })
}