Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -678,7 +678,7 @@ pub struct AssistantContext {
|
|||
summary_task: Task<Option<()>>,
|
||||
completion_count: usize,
|
||||
pending_completions: Vec<PendingCompletion>,
|
||||
token_count: Option<usize>,
|
||||
token_count: Option<u64>,
|
||||
pending_token_count: Task<Option<()>>,
|
||||
pending_save: Task<Result<()>>,
|
||||
pending_cache_warming_task: Task<Option<()>>,
|
||||
|
@ -1250,7 +1250,7 @@ impl AssistantContext {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn token_count(&self) -> Option<usize> {
|
||||
pub fn token_count(&self) -> Option<u64> {
|
||||
self.token_count
|
||||
}
|
||||
|
||||
|
|
|
@ -3121,12 +3121,12 @@ fn invoked_slash_command_fold_placeholder(
|
|||
|
||||
enum TokenState {
|
||||
NoTokensLeft {
|
||||
max_token_count: usize,
|
||||
token_count: usize,
|
||||
max_token_count: u64,
|
||||
token_count: u64,
|
||||
},
|
||||
HasMoreTokens {
|
||||
max_token_count: usize,
|
||||
token_count: usize,
|
||||
max_token_count: u64,
|
||||
token_count: u64,
|
||||
over_warn_threshold: bool,
|
||||
},
|
||||
}
|
||||
|
@ -3139,9 +3139,7 @@ fn token_state(context: &Entity<AssistantContext>, cx: &App) -> Option<TokenStat
|
|||
.model;
|
||||
let token_count = context.read(cx).token_count()?;
|
||||
let max_token_count = model.max_token_count();
|
||||
|
||||
let remaining_tokens = max_token_count as isize - token_count as isize;
|
||||
let token_state = if remaining_tokens <= 0 {
|
||||
let token_state = if max_token_count.saturating_sub(token_count) == 0 {
|
||||
TokenState::NoTokensLeft {
|
||||
max_token_count,
|
||||
token_count,
|
||||
|
@ -3182,7 +3180,7 @@ fn size_for_image(data: &RenderImage, max_size: Size<Pixels>) -> Size<Pixels> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn humanize_token_count(count: usize) -> String {
|
||||
pub fn humanize_token_count(count: u64) -> String {
|
||||
match count {
|
||||
0..=999 => count.to_string(),
|
||||
1000..=9999 => {
|
||||
|
|
|
@ -664,7 +664,7 @@ mod tests {
|
|||
format!("{}/{}", self.provider_id.0, self.name.0)
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
|
@ -672,7 +672,7 @@ mod tests {
|
|||
&self,
|
||||
_: LanguageModelRequest,
|
||||
_: &App,
|
||||
) -> BoxFuture<'static, http_client::Result<usize>> {
|
||||
) -> BoxFuture<'static, http_client::Result<u64>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue