Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -43,9 +43,9 @@ pub struct OpenAiSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct OpenAiLanguageModelProvider {
|
||||
|
@ -312,11 +312,11 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
format!("openai/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_open_ai_tokens(request, self.model.clone(), cx)
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
pub fn into_open_ai(
|
||||
request: LanguageModelRequest,
|
||||
model: &Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> open_ai::Request {
|
||||
let stream = !model.id().starts_with("o1-");
|
||||
|
||||
|
@ -606,7 +606,7 @@ pub fn count_open_ai_tokens(
|
|||
request: LanguageModelRequest,
|
||||
model: Model,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -652,6 +652,7 @@ pub fn count_open_ai_tokens(
|
|||
| Model::O3Mini
|
||||
| Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
|
||||
}
|
||||
.map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue