Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -51,12 +51,12 @@ pub struct AvailableModel {
|
|||
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
|
||||
pub display_name: Option<String>,
|
||||
/// The model's context window size.
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
/// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
|
||||
pub tool_override: Option<String>,
|
||||
/// Configuration of Anthropic's caching API.
|
||||
pub cache_configuration: Option<LanguageModelCacheConfiguration>,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub default_temperature: Option<f32>,
|
||||
#[serde(default)]
|
||||
pub extra_beta_headers: Vec<String>,
|
||||
|
@ -321,7 +321,7 @@ pub struct AnthropicModel {
|
|||
pub fn count_anthropic_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request.messages;
|
||||
let mut tokens_from_images = 0;
|
||||
|
@ -377,7 +377,7 @@ pub fn count_anthropic_tokens(
|
|||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
|
||||
.map(|tokens| tokens + tokens_from_images)
|
||||
.map(|tokens| (tokens + tokens_from_images) as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -461,11 +461,11 @@ impl LanguageModel for AnthropicModel {
|
|||
self.state.read(cx).api_key.clone()
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
Some(self.model.max_output_tokens())
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ impl LanguageModel for AnthropicModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_anthropic_tokens(request, cx)
|
||||
}
|
||||
|
||||
|
@ -518,7 +518,7 @@ pub fn into_anthropic(
|
|||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
default_temperature: f32,
|
||||
max_output_tokens: u32,
|
||||
max_output_tokens: u64,
|
||||
mode: AnthropicModelMode,
|
||||
) -> anthropic::Request {
|
||||
let mut new_messages: Vec<anthropic::Message> = Vec::new();
|
||||
|
|
|
@ -88,9 +88,9 @@ pub enum BedrockAuthMethod {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub cache_configuration: Option<LanguageModelCacheConfiguration>,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub default_temperature: Option<f32>,
|
||||
pub mode: Option<ModelMode>,
|
||||
}
|
||||
|
@ -503,11 +503,11 @@ impl LanguageModel for BedrockModel {
|
|||
format!("bedrock/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
Some(self.model.max_output_tokens())
|
||||
}
|
||||
|
||||
|
@ -515,7 +515,7 @@ impl LanguageModel for BedrockModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
get_bedrock_tokens(request, cx)
|
||||
}
|
||||
|
||||
|
@ -583,7 +583,7 @@ pub fn into_bedrock(
|
|||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
default_temperature: f32,
|
||||
max_output_tokens: u32,
|
||||
max_output_tokens: u64,
|
||||
mode: BedrockModelMode,
|
||||
) -> Result<bedrock::Request> {
|
||||
let mut new_messages: Vec<BedrockMessage> = Vec::new();
|
||||
|
@ -747,7 +747,7 @@ pub fn into_bedrock(
|
|||
pub fn get_bedrock_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_executor()
|
||||
.spawn(async move {
|
||||
let messages = request.messages;
|
||||
|
@ -799,7 +799,7 @@ pub fn get_bedrock_tokens(
|
|||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
|
||||
.map(|tokens| tokens + tokens_from_images)
|
||||
.map(|tokens| (tokens + tokens_from_images) as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -947,9 +947,9 @@ pub fn map_to_language_model_completion_events(
|
|||
let completion_event =
|
||||
LanguageModelCompletionEvent::UsageUpdate(
|
||||
TokenUsage {
|
||||
input_tokens: metadata.input_tokens as u32,
|
||||
input_tokens: metadata.input_tokens as u64,
|
||||
output_tokens: metadata.output_tokens
|
||||
as u32,
|
||||
as u64,
|
||||
cache_creation_input_tokens: default(),
|
||||
cache_read_input_tokens: default(),
|
||||
},
|
||||
|
|
|
@ -73,9 +73,9 @@ pub struct AvailableModel {
|
|||
/// The size of the context window, indicating the maximum number of tokens the model can process.
|
||||
pub max_tokens: usize,
|
||||
/// The maximum number of output tokens allowed by the model.
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
/// The maximum number of completion tokens allowed by the model (o1-* only)
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
/// Override this model with a different Anthropic model for tool calls.
|
||||
pub tool_override: Option<String>,
|
||||
/// Indicates whether this custom model supports caching.
|
||||
|
@ -715,8 +715,8 @@ impl LanguageModel for CloudLanguageModel {
|
|||
}
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
self.model.max_token_count
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count as u64
|
||||
}
|
||||
|
||||
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
|
||||
|
@ -737,7 +737,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
match self.model.provider {
|
||||
zed_llm_client::LanguageModelProvider::Anthropic => count_anthropic_tokens(request, cx),
|
||||
zed_llm_client::LanguageModelProvider::OpenAi => {
|
||||
|
@ -786,7 +786,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
let response_body: CountTokensResponse =
|
||||
serde_json::from_str(&response_body)?;
|
||||
|
||||
Ok(response_body.tokens)
|
||||
Ok(response_body.tokens as u64)
|
||||
} else {
|
||||
Err(anyhow!(ApiError {
|
||||
status,
|
||||
|
@ -821,7 +821,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
request,
|
||||
self.model.id.to_string(),
|
||||
1.0,
|
||||
self.model.max_output_tokens as u32,
|
||||
self.model.max_output_tokens as u64,
|
||||
if self.model.id.0.ends_with("-thinking") {
|
||||
AnthropicModelMode::Thinking {
|
||||
budget_tokens: Some(4_096),
|
||||
|
|
|
@ -237,7 +237,7 @@ impl LanguageModel for CopilotChatLanguageModel {
|
|||
format!("copilot_chat/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ impl LanguageModel for CopilotChatLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
match self.model.vendor() {
|
||||
ModelVendor::Anthropic => count_anthropic_tokens(request, cx),
|
||||
ModelVendor::Google => count_google_tokens(request, cx),
|
||||
|
|
|
@ -49,8 +49,8 @@ pub struct DeepSeekSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct DeepSeekLanguageModelProvider {
|
||||
|
@ -306,11 +306,11 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
format!("deepseek/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -318,7 +318,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -335,7 +335,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
pub fn into_deepseek(
|
||||
request: LanguageModelRequest,
|
||||
model: &deepseek::Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> deepseek::Request {
|
||||
let is_reasoner = *model == deepseek::Model::Reasoner;
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ impl From<GoogleModelMode> for ModelMode {
|
|||
pub struct AvailableModel {
|
||||
name: String,
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_tokens: u64,
|
||||
mode: Option<ModelMode>,
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ impl LanguageModel for GoogleLanguageModel {
|
|||
format!("google/{}", self.model.request_id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -373,7 +373,7 @@ impl LanguageModel for GoogleLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
let model_id = self.model.request_id().to_string();
|
||||
let request = into_google(request, model_id.clone(), self.model.mode());
|
||||
let http_client = self.http_client.clone();
|
||||
|
@ -702,7 +702,7 @@ impl GoogleEventMapper {
|
|||
pub fn count_google_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly.
|
||||
// So we have to use tokenizer from tiktoken_rs to count tokens.
|
||||
cx.background_spawn(async move {
|
||||
|
@ -723,7 +723,7 @@ pub fn count_google_tokens(
|
|||
|
||||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -750,10 +750,10 @@ fn update_usage(usage: &mut UsageMetadata, new: &UsageMetadata) {
|
|||
}
|
||||
|
||||
fn convert_usage(usage: &UsageMetadata) -> language_model::TokenUsage {
|
||||
let prompt_tokens = usage.prompt_token_count.unwrap_or(0) as u32;
|
||||
let cached_tokens = usage.cached_content_token_count.unwrap_or(0) as u32;
|
||||
let prompt_tokens = usage.prompt_token_count.unwrap_or(0);
|
||||
let cached_tokens = usage.cached_content_token_count.unwrap_or(0);
|
||||
let input_tokens = prompt_tokens - cached_tokens;
|
||||
let output_tokens = usage.candidates_token_count.unwrap_or(0) as u32;
|
||||
let output_tokens = usage.candidates_token_count.unwrap_or(0);
|
||||
|
||||
language_model::TokenUsage {
|
||||
input_tokens,
|
||||
|
|
|
@ -44,7 +44,7 @@ pub struct LmStudioSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub supports_tool_calls: bool,
|
||||
pub supports_images: bool,
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
format!("lmstudio/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
_cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// Endpoint for this is coming soon. In the meantime, hacky estimation
|
||||
let token_count = request
|
||||
.messages
|
||||
|
@ -430,7 +430,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
.map(|msg| msg.string_contents().split_whitespace().count())
|
||||
.sum::<usize>();
|
||||
|
||||
let estimated_tokens = (token_count as f64 * 0.75) as usize;
|
||||
let estimated_tokens = (token_count as f64 * 0.75) as u64;
|
||||
async move { Ok(estimated_tokens) }.boxed()
|
||||
}
|
||||
|
||||
|
|
|
@ -43,9 +43,9 @@ pub struct MistralSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_images: Option<bool>,
|
||||
}
|
||||
|
@ -322,11 +322,11 @@ impl LanguageModel for MistralLanguageModel {
|
|||
format!("mistral/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -351,7 +351,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
pub fn into_mistral(
|
||||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> mistral::Request {
|
||||
let stream = true;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ pub struct AvailableModel {
|
|||
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
|
||||
pub display_name: Option<String>,
|
||||
/// The Context Length parameter to the model (aka num_ctx or n_ctx)
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
/// The number of seconds to keep the connection open after the last request
|
||||
pub keep_alive: Option<KeepAlive>,
|
||||
/// Whether the model supports tools
|
||||
|
@ -377,7 +377,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
format!("ollama/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
_cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// There is no endpoint for this _yet_ in Ollama
|
||||
// see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
|
||||
let token_count = request
|
||||
|
@ -395,7 +395,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
.sum::<usize>()
|
||||
/ 4;
|
||||
|
||||
async move { Ok(token_count) }.boxed()
|
||||
async move { Ok(token_count as u64) }.boxed()
|
||||
}
|
||||
|
||||
fn stream_completion(
|
||||
|
|
|
@ -43,9 +43,9 @@ pub struct OpenAiSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct OpenAiLanguageModelProvider {
|
||||
|
@ -312,11 +312,11 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
format!("openai/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_open_ai_tokens(request, self.model.clone(), cx)
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
pub fn into_open_ai(
|
||||
request: LanguageModelRequest,
|
||||
model: &Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> open_ai::Request {
|
||||
let stream = !model.id().starts_with("o1-");
|
||||
|
||||
|
@ -606,7 +606,7 @@ pub fn count_open_ai_tokens(
|
|||
request: LanguageModelRequest,
|
||||
model: Model,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -652,6 +652,7 @@ pub fn count_open_ai_tokens(
|
|||
| Model::O3Mini
|
||||
| Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
|
||||
}
|
||||
.map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
|
|
@ -40,9 +40,9 @@ pub struct OpenRouterSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_images: Option<bool>,
|
||||
}
|
||||
|
@ -331,11 +331,11 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
format!("openrouter/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_open_router_tokens(request, self.model.clone(), cx)
|
||||
}
|
||||
|
||||
|
@ -386,7 +386,7 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
pub fn into_open_router(
|
||||
request: LanguageModelRequest,
|
||||
model: &Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> open_router::Request {
|
||||
let mut messages = Vec::new();
|
||||
for message in request.messages {
|
||||
|
@ -640,7 +640,7 @@ pub fn count_open_router_tokens(
|
|||
request: LanguageModelRequest,
|
||||
_model: open_router::Model,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -657,7 +657,7 @@ pub fn count_open_router_tokens(
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue