Standardize on u64 for token counts (#32869)

Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens:
usize, max_output_tokens: Option<u32>` in the same `struct`.

Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`,
token counts should be consistent across targets (e.g. the same model
doesn't suddenly get a smaller context window if you're compiling for
wasm32), and these token counts could end up getting serialized using a
binary protocol, so `usize` is not the right choice for token counts.

I chose to standardize on `u64` over `u32` because we don't store many
of them (so the extra size should be insignificant) and future models
may exceed `u32::MAX` tokens.

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-06-17 10:43:07 -04:00 committed by GitHub
parent a391d67366
commit 5405c2c2d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 191 additions and 192 deletions

View file

@ -3121,12 +3121,12 @@ fn invoked_slash_command_fold_placeholder(
enum TokenState {
NoTokensLeft {
max_token_count: usize,
token_count: usize,
max_token_count: u64,
token_count: u64,
},
HasMoreTokens {
max_token_count: usize,
token_count: usize,
max_token_count: u64,
token_count: u64,
over_warn_threshold: bool,
},
}
@ -3139,9 +3139,7 @@ fn token_state(context: &Entity<AssistantContext>, cx: &App) -> Option<TokenStat
.model;
let token_count = context.read(cx).token_count()?;
let max_token_count = model.max_token_count();
let remaining_tokens = max_token_count as isize - token_count as isize;
let token_state = if remaining_tokens <= 0 {
let token_state = if max_token_count.saturating_sub(token_count) == 0 {
TokenState::NoTokensLeft {
max_token_count,
token_count,
@ -3182,7 +3180,7 @@ fn size_for_image(data: &RenderImage, max_size: Size<Pixels>) -> Size<Pixels> {
}
}
pub fn humanize_token_count(count: usize) -> String {
pub fn humanize_token_count(count: u64) -> String {
match count {
0..=999 => count.to_string(),
1000..=9999 => {