Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -750,7 +750,7 @@ struct EditingMessageState {
|
|||
editor: Entity<Editor>,
|
||||
context_strip: Entity<ContextStrip>,
|
||||
context_picker_menu_handle: PopoverMenuHandle<ContextPicker>,
|
||||
last_estimated_token_count: Option<usize>,
|
||||
last_estimated_token_count: Option<u64>,
|
||||
_subscriptions: [Subscription; 2],
|
||||
_update_token_count_task: Option<Task<()>>,
|
||||
}
|
||||
|
@ -857,7 +857,7 @@ impl ActiveThread {
|
|||
}
|
||||
|
||||
/// Returns the editing message id and the estimated token count in the content
|
||||
pub fn editing_message_id(&self) -> Option<(MessageId, usize)> {
|
||||
pub fn editing_message_id(&self) -> Option<(MessageId, u64)> {
|
||||
self.editing_message
|
||||
.as_ref()
|
||||
.map(|(id, state)| (*id, state.last_estimated_token_count.unwrap_or(0)))
|
||||
|
|
|
@ -76,7 +76,7 @@ pub struct MessageEditor {
|
|||
profile_selector: Entity<ProfileSelector>,
|
||||
edits_expanded: bool,
|
||||
editor_is_expanded: bool,
|
||||
last_estimated_token_count: Option<usize>,
|
||||
last_estimated_token_count: Option<u64>,
|
||||
update_token_count_task: Option<Task<()>>,
|
||||
_subscriptions: Vec<Subscription>,
|
||||
}
|
||||
|
@ -1335,7 +1335,7 @@ impl MessageEditor {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn last_estimated_token_count(&self) -> Option<usize> {
|
||||
pub fn last_estimated_token_count(&self) -> Option<u64> {
|
||||
self.last_estimated_token_count
|
||||
}
|
||||
|
||||
|
|
|
@ -272,8 +272,8 @@ impl DetailedSummaryState {
|
|||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct TotalTokenUsage {
|
||||
pub total: usize,
|
||||
pub max: usize,
|
||||
pub total: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl TotalTokenUsage {
|
||||
|
@ -299,7 +299,7 @@ impl TotalTokenUsage {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn add(&self, tokens: usize) -> TotalTokenUsage {
|
||||
pub fn add(&self, tokens: u64) -> TotalTokenUsage {
|
||||
TotalTokenUsage {
|
||||
total: self.total + tokens,
|
||||
max: self.max,
|
||||
|
@ -396,7 +396,7 @@ pub struct ExceededWindowError {
|
|||
/// Model used when last message exceeded context window
|
||||
model_id: LanguageModelId,
|
||||
/// Token count including last message
|
||||
token_count: usize,
|
||||
token_count: u64,
|
||||
}
|
||||
|
||||
impl Thread {
|
||||
|
@ -2769,7 +2769,7 @@ impl Thread {
|
|||
.unwrap_or_default();
|
||||
|
||||
TotalTokenUsage {
|
||||
total: token_usage.total_tokens() as usize,
|
||||
total: token_usage.total_tokens(),
|
||||
max,
|
||||
}
|
||||
}
|
||||
|
@ -2791,7 +2791,7 @@ impl Thread {
|
|||
let total = self
|
||||
.token_usage_at_last_message()
|
||||
.unwrap_or_default()
|
||||
.total_tokens() as usize;
|
||||
.total_tokens();
|
||||
|
||||
Some(TotalTokenUsage { total, max })
|
||||
}
|
||||
|
|
|
@ -427,7 +427,7 @@ impl ToolUseState {
|
|||
|
||||
// Protect from overly large output
|
||||
let tool_output_limit = configured_model
|
||||
.map(|model| model.model.max_token_count() * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.map(|model| model.model.max_token_count() as usize * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.unwrap_or(usize::MAX);
|
||||
|
||||
let content = match tool_result {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue