Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -427,7 +427,7 @@ impl ToolUseState {
|
|||
|
||||
// Protect from overly large output
|
||||
let tool_output_limit = configured_model
|
||||
.map(|model| model.model.max_token_count() * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.map(|model| model.model.max_token_count() as usize * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.unwrap_or(usize::MAX);
|
||||
|
||||
let content = match tool_result {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue