Standardize on u64 for token counts (#32869)

Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens:
usize, max_output_tokens: Option<u32>` in the same `struct`.

Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`,
token counts should be consistent across targets (e.g. the same model
doesn't suddenly get a smaller context window if you're compiling for
wasm32), and these token counts could end up getting serialized using a
binary protocol, so `usize` is not the right choice for token counts.

I chose to standardize on `u64` over `u32` because we don't store many
of them (so the extra size should be insignificant) and future models
may exceed `u32::MAX` tokens.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-06-17 10:43:07 -04:00 committed by GitHub
parent a391d67366
commit 5405c2c2d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 191 additions and 192 deletions

View file

@ -152,7 +152,7 @@ pub enum Thinking {
#[derive(Debug)]
pub struct Request {
pub model: String,
pub max_tokens: u32,
pub max_tokens: u64,
pub messages: Vec<BedrockMessage>,
pub tools: Option<BedrockToolConfig>,
pub thinking: Option<Thinking>,

View file

@ -99,10 +99,10 @@ pub enum Model {
#[serde(rename = "custom")]
Custom {
name: String,
max_tokens: usize,
max_tokens: u64,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>,
max_output_tokens: Option<u32>,
max_output_tokens: Option<u64>,
default_temperature: Option<f32>,
},
}
@ -309,7 +309,7 @@ impl Model {
}
}
pub fn max_token_count(&self) -> usize {
pub fn max_token_count(&self) -> u64 {
match self {
Self::Claude3_5SonnetV2
| Self::Claude3Opus
@ -328,7 +328,7 @@ impl Model {
}
}
pub fn max_output_tokens(&self) -> u32 {
pub fn max_output_tokens(&self) -> u64 {
match self {
Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096,
Self::Claude3_7Sonnet