Standardize on u64 for token counts (#32869)
Previously we were using a mix of `u32` and `usize`, e.g. `max_tokens: usize, max_output_tokens: Option<u32>` in the same `struct`. Although [tiktoken](https://github.com/openai/tiktoken) uses `usize`, token counts should be consistent across targets (e.g. the same model doesn't suddenly get a smaller context window if you're compiling for wasm32), and these token counts could end up getting serialized using a binary protocol, so `usize` is not the right choice for token counts. I chose to standardize on `u64` over `u32` because we don't store many of them (so the extra size should be insignificant) and future models may exceed `u32::MAX` tokens. Release Notes: - N/A
This commit is contained in:
parent
a391d67366
commit
5405c2c2d3
32 changed files with 191 additions and 192 deletions
|
@ -750,7 +750,7 @@ struct EditingMessageState {
|
|||
editor: Entity<Editor>,
|
||||
context_strip: Entity<ContextStrip>,
|
||||
context_picker_menu_handle: PopoverMenuHandle<ContextPicker>,
|
||||
last_estimated_token_count: Option<usize>,
|
||||
last_estimated_token_count: Option<u64>,
|
||||
_subscriptions: [Subscription; 2],
|
||||
_update_token_count_task: Option<Task<()>>,
|
||||
}
|
||||
|
@ -857,7 +857,7 @@ impl ActiveThread {
|
|||
}
|
||||
|
||||
/// Returns the editing message id and the estimated token count in the content
|
||||
pub fn editing_message_id(&self) -> Option<(MessageId, usize)> {
|
||||
pub fn editing_message_id(&self) -> Option<(MessageId, u64)> {
|
||||
self.editing_message
|
||||
.as_ref()
|
||||
.map(|(id, state)| (*id, state.last_estimated_token_count.unwrap_or(0)))
|
||||
|
|
|
@ -76,7 +76,7 @@ pub struct MessageEditor {
|
|||
profile_selector: Entity<ProfileSelector>,
|
||||
edits_expanded: bool,
|
||||
editor_is_expanded: bool,
|
||||
last_estimated_token_count: Option<usize>,
|
||||
last_estimated_token_count: Option<u64>,
|
||||
update_token_count_task: Option<Task<()>>,
|
||||
_subscriptions: Vec<Subscription>,
|
||||
}
|
||||
|
@ -1335,7 +1335,7 @@ impl MessageEditor {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn last_estimated_token_count(&self) -> Option<usize> {
|
||||
pub fn last_estimated_token_count(&self) -> Option<u64> {
|
||||
self.last_estimated_token_count
|
||||
}
|
||||
|
||||
|
|
|
@ -272,8 +272,8 @@ impl DetailedSummaryState {
|
|||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct TotalTokenUsage {
|
||||
pub total: usize,
|
||||
pub max: usize,
|
||||
pub total: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl TotalTokenUsage {
|
||||
|
@ -299,7 +299,7 @@ impl TotalTokenUsage {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn add(&self, tokens: usize) -> TotalTokenUsage {
|
||||
pub fn add(&self, tokens: u64) -> TotalTokenUsage {
|
||||
TotalTokenUsage {
|
||||
total: self.total + tokens,
|
||||
max: self.max,
|
||||
|
@ -396,7 +396,7 @@ pub struct ExceededWindowError {
|
|||
/// Model used when last message exceeded context window
|
||||
model_id: LanguageModelId,
|
||||
/// Token count including last message
|
||||
token_count: usize,
|
||||
token_count: u64,
|
||||
}
|
||||
|
||||
impl Thread {
|
||||
|
@ -2769,7 +2769,7 @@ impl Thread {
|
|||
.unwrap_or_default();
|
||||
|
||||
TotalTokenUsage {
|
||||
total: token_usage.total_tokens() as usize,
|
||||
total: token_usage.total_tokens(),
|
||||
max,
|
||||
}
|
||||
}
|
||||
|
@ -2791,7 +2791,7 @@ impl Thread {
|
|||
let total = self
|
||||
.token_usage_at_last_message()
|
||||
.unwrap_or_default()
|
||||
.total_tokens() as usize;
|
||||
.total_tokens();
|
||||
|
||||
Some(TotalTokenUsage { total, max })
|
||||
}
|
||||
|
|
|
@ -427,7 +427,7 @@ impl ToolUseState {
|
|||
|
||||
// Protect from overly large output
|
||||
let tool_output_limit = configured_model
|
||||
.map(|model| model.model.max_token_count() * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.map(|model| model.model.max_token_count() as usize * BYTES_PER_TOKEN_ESTIMATE)
|
||||
.unwrap_or(usize::MAX);
|
||||
|
||||
let content = match tool_result {
|
||||
|
|
|
@ -15,7 +15,7 @@ pub const ANTHROPIC_API_URL: &str = "https://api.anthropic.com";
|
|||
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct AnthropicModelCacheConfiguration {
|
||||
pub min_total_token: usize,
|
||||
pub min_total_token: u64,
|
||||
pub should_speculate: bool,
|
||||
pub max_cache_anchors: usize,
|
||||
}
|
||||
|
@ -68,14 +68,14 @@ pub enum Model {
|
|||
#[serde(rename = "custom")]
|
||||
Custom {
|
||||
name: String,
|
||||
max_tokens: usize,
|
||||
max_tokens: u64,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
/// Override this model with a different Anthropic model for tool calls.
|
||||
tool_override: Option<String>,
|
||||
/// Indicates whether this custom model supports caching.
|
||||
cache_configuration: Option<AnthropicModelCacheConfiguration>,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
default_temperature: Option<f32>,
|
||||
#[serde(default)]
|
||||
extra_beta_headers: Vec<String>,
|
||||
|
@ -211,7 +211,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
match self {
|
||||
Self::ClaudeOpus4
|
||||
| Self::ClaudeOpus4Thinking
|
||||
|
@ -228,7 +228,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> u32 {
|
||||
pub fn max_output_tokens(&self) -> u64 {
|
||||
match self {
|
||||
Self::ClaudeOpus4
|
||||
| Self::ClaudeOpus4Thinking
|
||||
|
@ -693,7 +693,7 @@ pub enum StringOrContents {
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
pub model: String,
|
||||
pub max_tokens: u32,
|
||||
pub max_tokens: u64,
|
||||
pub messages: Vec<Message>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub tools: Vec<Tool>,
|
||||
|
@ -730,13 +730,13 @@ pub struct Metadata {
|
|||
#[derive(Debug, Serialize, Deserialize, Default)]
|
||||
pub struct Usage {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub input_tokens: Option<u32>,
|
||||
pub input_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub output_tokens: Option<u32>,
|
||||
pub output_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub cache_creation_input_tokens: Option<u32>,
|
||||
pub cache_creation_input_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub cache_read_input_tokens: Option<u32>,
|
||||
pub cache_read_input_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
@ -846,7 +846,7 @@ impl ApiError {
|
|||
matches!(self.error_type.as_str(), "rate_limit_error")
|
||||
}
|
||||
|
||||
pub fn match_window_exceeded(&self) -> Option<usize> {
|
||||
pub fn match_window_exceeded(&self) -> Option<u64> {
|
||||
let Some(ApiErrorCode::InvalidRequestError) = self.code() else {
|
||||
return None;
|
||||
};
|
||||
|
@ -855,12 +855,12 @@ impl ApiError {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn parse_prompt_too_long(message: &str) -> Option<usize> {
|
||||
pub fn parse_prompt_too_long(message: &str) -> Option<u64> {
|
||||
message
|
||||
.strip_prefix("prompt is too long: ")?
|
||||
.split_once(" tokens")?
|
||||
.0
|
||||
.parse::<usize>()
|
||||
.parse()
|
||||
.ok()
|
||||
}
|
||||
|
||||
|
|
|
@ -678,7 +678,7 @@ pub struct AssistantContext {
|
|||
summary_task: Task<Option<()>>,
|
||||
completion_count: usize,
|
||||
pending_completions: Vec<PendingCompletion>,
|
||||
token_count: Option<usize>,
|
||||
token_count: Option<u64>,
|
||||
pending_token_count: Task<Option<()>>,
|
||||
pending_save: Task<Result<()>>,
|
||||
pending_cache_warming_task: Task<Option<()>>,
|
||||
|
@ -1250,7 +1250,7 @@ impl AssistantContext {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn token_count(&self) -> Option<usize> {
|
||||
pub fn token_count(&self) -> Option<u64> {
|
||||
self.token_count
|
||||
}
|
||||
|
||||
|
|
|
@ -3121,12 +3121,12 @@ fn invoked_slash_command_fold_placeholder(
|
|||
|
||||
enum TokenState {
|
||||
NoTokensLeft {
|
||||
max_token_count: usize,
|
||||
token_count: usize,
|
||||
max_token_count: u64,
|
||||
token_count: u64,
|
||||
},
|
||||
HasMoreTokens {
|
||||
max_token_count: usize,
|
||||
token_count: usize,
|
||||
max_token_count: u64,
|
||||
token_count: u64,
|
||||
over_warn_threshold: bool,
|
||||
},
|
||||
}
|
||||
|
@ -3139,9 +3139,7 @@ fn token_state(context: &Entity<AssistantContext>, cx: &App) -> Option<TokenStat
|
|||
.model;
|
||||
let token_count = context.read(cx).token_count()?;
|
||||
let max_token_count = model.max_token_count();
|
||||
|
||||
let remaining_tokens = max_token_count as isize - token_count as isize;
|
||||
let token_state = if remaining_tokens <= 0 {
|
||||
let token_state = if max_token_count.saturating_sub(token_count) == 0 {
|
||||
TokenState::NoTokensLeft {
|
||||
max_token_count,
|
||||
token_count,
|
||||
|
@ -3182,7 +3180,7 @@ fn size_for_image(data: &RenderImage, max_size: Size<Pixels>) -> Size<Pixels> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn humanize_token_count(count: usize) -> String {
|
||||
pub fn humanize_token_count(count: u64) -> String {
|
||||
match count {
|
||||
0..=999 => count.to_string(),
|
||||
1000..=9999 => {
|
||||
|
|
|
@ -664,7 +664,7 @@ mod tests {
|
|||
format!("{}/{}", self.provider_id.0, self.name.0)
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
|
@ -672,7 +672,7 @@ mod tests {
|
|||
&self,
|
||||
_: LanguageModelRequest,
|
||||
_: &App,
|
||||
) -> BoxFuture<'static, http_client::Result<usize>> {
|
||||
) -> BoxFuture<'static, http_client::Result<u64>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ pub enum Thinking {
|
|||
#[derive(Debug)]
|
||||
pub struct Request {
|
||||
pub model: String,
|
||||
pub max_tokens: u32,
|
||||
pub max_tokens: u64,
|
||||
pub messages: Vec<BedrockMessage>,
|
||||
pub tools: Option<BedrockToolConfig>,
|
||||
pub thinking: Option<Thinking>,
|
||||
|
|
|
@ -99,10 +99,10 @@ pub enum Model {
|
|||
#[serde(rename = "custom")]
|
||||
Custom {
|
||||
name: String,
|
||||
max_tokens: usize,
|
||||
max_tokens: u64,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
default_temperature: Option<f32>,
|
||||
},
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
match self {
|
||||
Self::Claude3_5SonnetV2
|
||||
| Self::Claude3Opus
|
||||
|
@ -328,7 +328,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> u32 {
|
||||
pub fn max_output_tokens(&self) -> u64 {
|
||||
match self {
|
||||
Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096,
|
||||
Self::Claude3_7Sonnet
|
||||
|
|
|
@ -126,7 +126,7 @@ struct ModelLimits {
|
|||
#[serde(default)]
|
||||
max_output_tokens: usize,
|
||||
#[serde(default)]
|
||||
max_prompt_tokens: usize,
|
||||
max_prompt_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
|
@ -182,7 +182,7 @@ impl Model {
|
|||
self.name.as_str()
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
self.capabilities.limits.max_prompt_tokens
|
||||
}
|
||||
|
||||
|
@ -316,15 +316,15 @@ pub struct ResponseEvent {
|
|||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Usage {
|
||||
pub completion_tokens: u32,
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u64,
|
||||
pub prompt_tokens: u64,
|
||||
pub prompt_tokens_details: PromptTokensDetails,
|
||||
pub total_tokens: u32,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct PromptTokensDetails {
|
||||
pub cached_tokens: u32,
|
||||
pub cached_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
|
|
|
@ -58,8 +58,8 @@ pub enum Model {
|
|||
name: String,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_tokens: u64,
|
||||
max_output_tokens: Option<u64>,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -94,14 +94,14 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
match self {
|
||||
Self::Chat | Self::Reasoner => 64_000,
|
||||
Self::Custom { max_tokens, .. } => *max_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> Option<u32> {
|
||||
pub fn max_output_tokens(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Chat => Some(8_192),
|
||||
Self::Reasoner => Some(8_192),
|
||||
|
@ -118,7 +118,7 @@ pub struct Request {
|
|||
pub messages: Vec<RequestMessage>,
|
||||
pub stream: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub max_tokens: Option<u32>,
|
||||
pub max_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub temperature: Option<f32>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
|
|
|
@ -276,17 +276,17 @@ pub struct PromptFeedback {
|
|||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UsageMetadata {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt_token_count: Option<usize>,
|
||||
pub prompt_token_count: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cached_content_token_count: Option<usize>,
|
||||
pub cached_content_token_count: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub candidates_token_count: Option<usize>,
|
||||
pub candidates_token_count: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tool_use_prompt_token_count: Option<usize>,
|
||||
pub tool_use_prompt_token_count: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub thoughts_token_count: Option<usize>,
|
||||
pub thoughts_token_count: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub total_token_count: Option<usize>,
|
||||
pub total_token_count: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
@ -395,7 +395,7 @@ pub struct CountTokensRequest {
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CountTokensResponse {
|
||||
pub total_tokens: usize,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
@ -523,7 +523,7 @@ pub enum Model {
|
|||
name: String,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_tokens: u64,
|
||||
#[serde(default)]
|
||||
mode: GoogleModelMode,
|
||||
},
|
||||
|
@ -586,9 +586,9 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
const ONE_MILLION: usize = 1_048_576;
|
||||
const TWO_MILLION: usize = 2_097_152;
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
const ONE_MILLION: u64 = 1_048_576;
|
||||
const TWO_MILLION: u64 = 2_097_152;
|
||||
match self {
|
||||
Model::Gemini15Pro => TWO_MILLION,
|
||||
Model::Gemini15Flash => ONE_MILLION,
|
||||
|
|
|
@ -169,11 +169,11 @@ impl LanguageModel for FakeLanguageModel {
|
|||
"fake".to_string()
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
1000000
|
||||
}
|
||||
|
||||
fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<usize>> {
|
||||
fn count_tokens(&self, _: LanguageModelRequest, _: &App) -> BoxFuture<'static, Result<u64>> {
|
||||
futures::future::ready(Ok(0)).boxed()
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ pub fn init_settings(cx: &mut App) {
|
|||
pub struct LanguageModelCacheConfiguration {
|
||||
pub max_cache_anchors: usize,
|
||||
pub should_speculate: bool,
|
||||
pub min_total_token: usize,
|
||||
pub min_total_token: u64,
|
||||
}
|
||||
|
||||
/// A completion event from a language model.
|
||||
|
@ -135,17 +135,17 @@ impl RequestUsage {
|
|||
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
|
||||
pub struct TokenUsage {
|
||||
#[serde(default, skip_serializing_if = "is_default")]
|
||||
pub input_tokens: u32,
|
||||
pub input_tokens: u64,
|
||||
#[serde(default, skip_serializing_if = "is_default")]
|
||||
pub output_tokens: u32,
|
||||
pub output_tokens: u64,
|
||||
#[serde(default, skip_serializing_if = "is_default")]
|
||||
pub cache_creation_input_tokens: u32,
|
||||
pub cache_creation_input_tokens: u64,
|
||||
#[serde(default, skip_serializing_if = "is_default")]
|
||||
pub cache_read_input_tokens: u32,
|
||||
pub cache_read_input_tokens: u64,
|
||||
}
|
||||
|
||||
impl TokenUsage {
|
||||
pub fn total_tokens(&self) -> u32 {
|
||||
pub fn total_tokens(&self) -> u64 {
|
||||
self.input_tokens
|
||||
+ self.output_tokens
|
||||
+ self.cache_read_input_tokens
|
||||
|
@ -254,8 +254,8 @@ pub trait LanguageModel: Send + Sync {
|
|||
LanguageModelToolSchemaFormat::JsonSchema
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize;
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_token_count(&self) -> u64;
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ pub trait LanguageModel: Send + Sync {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>>;
|
||||
) -> BoxFuture<'static, Result<u64>>;
|
||||
|
||||
fn stream_completion(
|
||||
&self,
|
||||
|
@ -349,7 +349,7 @@ pub trait LanguageModel: Send + Sync {
|
|||
#[derive(Debug, Error)]
|
||||
pub enum LanguageModelKnownError {
|
||||
#[error("Context window limit exceeded ({tokens})")]
|
||||
ContextWindowLimitExceeded { tokens: usize },
|
||||
ContextWindowLimitExceeded { tokens: u64 },
|
||||
}
|
||||
|
||||
pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
|
||||
|
|
|
@ -51,12 +51,12 @@ pub struct AvailableModel {
|
|||
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
|
||||
pub display_name: Option<String>,
|
||||
/// The model's context window size.
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
/// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
|
||||
pub tool_override: Option<String>,
|
||||
/// Configuration of Anthropic's caching API.
|
||||
pub cache_configuration: Option<LanguageModelCacheConfiguration>,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub default_temperature: Option<f32>,
|
||||
#[serde(default)]
|
||||
pub extra_beta_headers: Vec<String>,
|
||||
|
@ -321,7 +321,7 @@ pub struct AnthropicModel {
|
|||
pub fn count_anthropic_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request.messages;
|
||||
let mut tokens_from_images = 0;
|
||||
|
@ -377,7 +377,7 @@ pub fn count_anthropic_tokens(
|
|||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
|
||||
.map(|tokens| tokens + tokens_from_images)
|
||||
.map(|tokens| (tokens + tokens_from_images) as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -461,11 +461,11 @@ impl LanguageModel for AnthropicModel {
|
|||
self.state.read(cx).api_key.clone()
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
Some(self.model.max_output_tokens())
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ impl LanguageModel for AnthropicModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_anthropic_tokens(request, cx)
|
||||
}
|
||||
|
||||
|
@ -518,7 +518,7 @@ pub fn into_anthropic(
|
|||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
default_temperature: f32,
|
||||
max_output_tokens: u32,
|
||||
max_output_tokens: u64,
|
||||
mode: AnthropicModelMode,
|
||||
) -> anthropic::Request {
|
||||
let mut new_messages: Vec<anthropic::Message> = Vec::new();
|
||||
|
|
|
@ -88,9 +88,9 @@ pub enum BedrockAuthMethod {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub cache_configuration: Option<LanguageModelCacheConfiguration>,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub default_temperature: Option<f32>,
|
||||
pub mode: Option<ModelMode>,
|
||||
}
|
||||
|
@ -503,11 +503,11 @@ impl LanguageModel for BedrockModel {
|
|||
format!("bedrock/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
Some(self.model.max_output_tokens())
|
||||
}
|
||||
|
||||
|
@ -515,7 +515,7 @@ impl LanguageModel for BedrockModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
get_bedrock_tokens(request, cx)
|
||||
}
|
||||
|
||||
|
@ -583,7 +583,7 @@ pub fn into_bedrock(
|
|||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
default_temperature: f32,
|
||||
max_output_tokens: u32,
|
||||
max_output_tokens: u64,
|
||||
mode: BedrockModelMode,
|
||||
) -> Result<bedrock::Request> {
|
||||
let mut new_messages: Vec<BedrockMessage> = Vec::new();
|
||||
|
@ -747,7 +747,7 @@ pub fn into_bedrock(
|
|||
pub fn get_bedrock_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_executor()
|
||||
.spawn(async move {
|
||||
let messages = request.messages;
|
||||
|
@ -799,7 +799,7 @@ pub fn get_bedrock_tokens(
|
|||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
|
||||
.map(|tokens| tokens + tokens_from_images)
|
||||
.map(|tokens| (tokens + tokens_from_images) as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -947,9 +947,9 @@ pub fn map_to_language_model_completion_events(
|
|||
let completion_event =
|
||||
LanguageModelCompletionEvent::UsageUpdate(
|
||||
TokenUsage {
|
||||
input_tokens: metadata.input_tokens as u32,
|
||||
input_tokens: metadata.input_tokens as u64,
|
||||
output_tokens: metadata.output_tokens
|
||||
as u32,
|
||||
as u64,
|
||||
cache_creation_input_tokens: default(),
|
||||
cache_read_input_tokens: default(),
|
||||
},
|
||||
|
|
|
@ -73,9 +73,9 @@ pub struct AvailableModel {
|
|||
/// The size of the context window, indicating the maximum number of tokens the model can process.
|
||||
pub max_tokens: usize,
|
||||
/// The maximum number of output tokens allowed by the model.
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
/// The maximum number of completion tokens allowed by the model (o1-* only)
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
/// Override this model with a different Anthropic model for tool calls.
|
||||
pub tool_override: Option<String>,
|
||||
/// Indicates whether this custom model supports caching.
|
||||
|
@ -715,8 +715,8 @@ impl LanguageModel for CloudLanguageModel {
|
|||
}
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
self.model.max_token_count
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count as u64
|
||||
}
|
||||
|
||||
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
|
||||
|
@ -737,7 +737,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
match self.model.provider {
|
||||
zed_llm_client::LanguageModelProvider::Anthropic => count_anthropic_tokens(request, cx),
|
||||
zed_llm_client::LanguageModelProvider::OpenAi => {
|
||||
|
@ -786,7 +786,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
let response_body: CountTokensResponse =
|
||||
serde_json::from_str(&response_body)?;
|
||||
|
||||
Ok(response_body.tokens)
|
||||
Ok(response_body.tokens as u64)
|
||||
} else {
|
||||
Err(anyhow!(ApiError {
|
||||
status,
|
||||
|
@ -821,7 +821,7 @@ impl LanguageModel for CloudLanguageModel {
|
|||
request,
|
||||
self.model.id.to_string(),
|
||||
1.0,
|
||||
self.model.max_output_tokens as u32,
|
||||
self.model.max_output_tokens as u64,
|
||||
if self.model.id.0.ends_with("-thinking") {
|
||||
AnthropicModelMode::Thinking {
|
||||
budget_tokens: Some(4_096),
|
||||
|
|
|
@ -237,7 +237,7 @@ impl LanguageModel for CopilotChatLanguageModel {
|
|||
format!("copilot_chat/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ impl LanguageModel for CopilotChatLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
match self.model.vendor() {
|
||||
ModelVendor::Anthropic => count_anthropic_tokens(request, cx),
|
||||
ModelVendor::Google => count_google_tokens(request, cx),
|
||||
|
|
|
@ -49,8 +49,8 @@ pub struct DeepSeekSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct DeepSeekLanguageModelProvider {
|
||||
|
@ -306,11 +306,11 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
format!("deepseek/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -318,7 +318,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -335,7 +335,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
pub fn into_deepseek(
|
||||
request: LanguageModelRequest,
|
||||
model: &deepseek::Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> deepseek::Request {
|
||||
let is_reasoner = *model == deepseek::Model::Reasoner;
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ impl From<GoogleModelMode> for ModelMode {
|
|||
pub struct AvailableModel {
|
||||
name: String,
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_tokens: u64,
|
||||
mode: Option<ModelMode>,
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ impl LanguageModel for GoogleLanguageModel {
|
|||
format!("google/{}", self.model.request_id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -373,7 +373,7 @@ impl LanguageModel for GoogleLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
let model_id = self.model.request_id().to_string();
|
||||
let request = into_google(request, model_id.clone(), self.model.mode());
|
||||
let http_client = self.http_client.clone();
|
||||
|
@ -702,7 +702,7 @@ impl GoogleEventMapper {
|
|||
pub fn count_google_tokens(
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly.
|
||||
// So we have to use tokenizer from tiktoken_rs to count tokens.
|
||||
cx.background_spawn(async move {
|
||||
|
@ -723,7 +723,7 @@ pub fn count_google_tokens(
|
|||
|
||||
// Tiktoken doesn't yet support these models, so we manually use the
|
||||
// same tokenizer as GPT-4.
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -750,10 +750,10 @@ fn update_usage(usage: &mut UsageMetadata, new: &UsageMetadata) {
|
|||
}
|
||||
|
||||
fn convert_usage(usage: &UsageMetadata) -> language_model::TokenUsage {
|
||||
let prompt_tokens = usage.prompt_token_count.unwrap_or(0) as u32;
|
||||
let cached_tokens = usage.cached_content_token_count.unwrap_or(0) as u32;
|
||||
let prompt_tokens = usage.prompt_token_count.unwrap_or(0);
|
||||
let cached_tokens = usage.cached_content_token_count.unwrap_or(0);
|
||||
let input_tokens = prompt_tokens - cached_tokens;
|
||||
let output_tokens = usage.candidates_token_count.unwrap_or(0) as u32;
|
||||
let output_tokens = usage.candidates_token_count.unwrap_or(0);
|
||||
|
||||
language_model::TokenUsage {
|
||||
input_tokens,
|
||||
|
|
|
@ -44,7 +44,7 @@ pub struct LmStudioSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub supports_tool_calls: bool,
|
||||
pub supports_images: bool,
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
format!("lmstudio/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
_cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// Endpoint for this is coming soon. In the meantime, hacky estimation
|
||||
let token_count = request
|
||||
.messages
|
||||
|
@ -430,7 +430,7 @@ impl LanguageModel for LmStudioLanguageModel {
|
|||
.map(|msg| msg.string_contents().split_whitespace().count())
|
||||
.sum::<usize>();
|
||||
|
||||
let estimated_tokens = (token_count as f64 * 0.75) as usize;
|
||||
let estimated_tokens = (token_count as f64 * 0.75) as u64;
|
||||
async move { Ok(estimated_tokens) }.boxed()
|
||||
}
|
||||
|
||||
|
|
|
@ -43,9 +43,9 @@ pub struct MistralSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_images: Option<bool>,
|
||||
}
|
||||
|
@ -322,11 +322,11 @@ impl LanguageModel for MistralLanguageModel {
|
|||
format!("mistral/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -351,7 +351,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ impl LanguageModel for MistralLanguageModel {
|
|||
pub fn into_mistral(
|
||||
request: LanguageModelRequest,
|
||||
model: String,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> mistral::Request {
|
||||
let stream = true;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ pub struct AvailableModel {
|
|||
/// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
|
||||
pub display_name: Option<String>,
|
||||
/// The Context Length parameter to the model (aka num_ctx or n_ctx)
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
/// The number of seconds to keep the connection open after the last request
|
||||
pub keep_alive: Option<KeepAlive>,
|
||||
/// Whether the model supports tools
|
||||
|
@ -377,7 +377,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
format!("ollama/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
_cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
// There is no endpoint for this _yet_ in Ollama
|
||||
// see: https://github.com/ollama/ollama/issues/1716 and https://github.com/ollama/ollama/issues/3582
|
||||
let token_count = request
|
||||
|
@ -395,7 +395,7 @@ impl LanguageModel for OllamaLanguageModel {
|
|||
.sum::<usize>()
|
||||
/ 4;
|
||||
|
||||
async move { Ok(token_count) }.boxed()
|
||||
async move { Ok(token_count as u64) }.boxed()
|
||||
}
|
||||
|
||||
fn stream_completion(
|
||||
|
|
|
@ -43,9 +43,9 @@ pub struct OpenAiSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct OpenAiLanguageModelProvider {
|
||||
|
@ -312,11 +312,11 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
format!("openai/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_open_ai_tokens(request, self.model.clone(), cx)
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for OpenAiLanguageModel {
|
|||
pub fn into_open_ai(
|
||||
request: LanguageModelRequest,
|
||||
model: &Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> open_ai::Request {
|
||||
let stream = !model.id().starts_with("o1-");
|
||||
|
||||
|
@ -606,7 +606,7 @@ pub fn count_open_ai_tokens(
|
|||
request: LanguageModelRequest,
|
||||
model: Model,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -652,6 +652,7 @@ pub fn count_open_ai_tokens(
|
|||
| Model::O3Mini
|
||||
| Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
|
||||
}
|
||||
.map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
|
|
@ -40,9 +40,9 @@ pub struct OpenRouterSettings {
|
|||
pub struct AvailableModel {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_output_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_tokens: u64,
|
||||
pub max_output_tokens: Option<u64>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_images: Option<bool>,
|
||||
}
|
||||
|
@ -331,11 +331,11 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
format!("openrouter/{}", self.model.id())
|
||||
}
|
||||
|
||||
fn max_token_count(&self) -> usize {
|
||||
fn max_token_count(&self) -> u64 {
|
||||
self.model.max_token_count()
|
||||
}
|
||||
|
||||
fn max_output_tokens(&self) -> Option<u32> {
|
||||
fn max_output_tokens(&self) -> Option<u64> {
|
||||
self.model.max_output_tokens()
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
&self,
|
||||
request: LanguageModelRequest,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
count_open_router_tokens(request, self.model.clone(), cx)
|
||||
}
|
||||
|
||||
|
@ -386,7 +386,7 @@ impl LanguageModel for OpenRouterLanguageModel {
|
|||
pub fn into_open_router(
|
||||
request: LanguageModelRequest,
|
||||
model: &Model,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_output_tokens: Option<u64>,
|
||||
) -> open_router::Request {
|
||||
let mut messages = Vec::new();
|
||||
for message in request.messages {
|
||||
|
@ -640,7 +640,7 @@ pub fn count_open_router_tokens(
|
|||
request: LanguageModelRequest,
|
||||
_model: open_router::Model,
|
||||
cx: &App,
|
||||
) -> BoxFuture<'static, Result<usize>> {
|
||||
) -> BoxFuture<'static, Result<u64>> {
|
||||
cx.background_spawn(async move {
|
||||
let messages = request
|
||||
.messages
|
||||
|
@ -657,7 +657,7 @@ pub fn count_open_router_tokens(
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages)
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages).map(|tokens| tokens as u64)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ impl From<Role> for String {
|
|||
pub struct Model {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub supports_tool_calls: bool,
|
||||
pub supports_images: bool,
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ impl Model {
|
|||
pub fn new(
|
||||
name: &str,
|
||||
display_name: Option<&str>,
|
||||
max_tokens: Option<usize>,
|
||||
max_tokens: Option<u64>,
|
||||
supports_tool_calls: bool,
|
||||
supports_images: bool,
|
||||
) -> Self {
|
||||
|
@ -76,7 +76,7 @@ impl Model {
|
|||
self.display_name.as_ref().unwrap_or(&self.name)
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
self.max_tokens
|
||||
}
|
||||
|
||||
|
@ -256,9 +256,9 @@ pub struct FunctionChunk {
|
|||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Usage {
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
pub prompt_tokens: u64,
|
||||
pub completion_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Deserialize, PartialEq)]
|
||||
|
@ -306,8 +306,8 @@ pub struct ModelEntry {
|
|||
pub compatibility_type: CompatibilityType,
|
||||
pub quantization: Option<String>,
|
||||
pub state: ModelState,
|
||||
pub max_context_length: Option<usize>,
|
||||
pub loaded_context_length: Option<usize>,
|
||||
pub max_context_length: Option<u64>,
|
||||
pub loaded_context_length: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub capabilities: Capabilities,
|
||||
}
|
||||
|
|
|
@ -70,9 +70,9 @@ pub enum Model {
|
|||
name: String,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_completion_tokens: Option<u32>,
|
||||
max_tokens: u64,
|
||||
max_output_tokens: Option<u64>,
|
||||
max_completion_tokens: Option<u64>,
|
||||
supports_tools: Option<bool>,
|
||||
supports_images: Option<bool>,
|
||||
},
|
||||
|
@ -130,7 +130,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
match self {
|
||||
Self::CodestralLatest => 256000,
|
||||
Self::MistralLargeLatest => 131000,
|
||||
|
@ -145,7 +145,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> Option<u32> {
|
||||
pub fn max_output_tokens(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Custom {
|
||||
max_output_tokens, ..
|
||||
|
@ -193,7 +193,7 @@ pub struct Request {
|
|||
pub messages: Vec<RequestMessage>,
|
||||
pub stream: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub max_tokens: Option<u32>,
|
||||
pub max_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub temperature: Option<f32>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
|
@ -360,9 +360,9 @@ pub struct Response {
|
|||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Usage {
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
pub prompt_tokens: u64,
|
||||
pub completion_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
|
|
|
@ -35,18 +35,18 @@ impl Default for KeepAlive {
|
|||
pub struct Model {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub keep_alive: Option<KeepAlive>,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_vision: Option<bool>,
|
||||
pub supports_thinking: Option<bool>,
|
||||
}
|
||||
|
||||
fn get_max_tokens(name: &str) -> usize {
|
||||
fn get_max_tokens(name: &str) -> u64 {
|
||||
/// Default context length for unknown models.
|
||||
const DEFAULT_TOKENS: usize = 4096;
|
||||
const DEFAULT_TOKENS: u64 = 4096;
|
||||
/// Magic number. Lets many Ollama models work with ~16GB of ram.
|
||||
const MAXIMUM_TOKENS: usize = 16384;
|
||||
const MAXIMUM_TOKENS: u64 = 16384;
|
||||
|
||||
match name.split(':').next().unwrap() {
|
||||
"phi" | "tinyllama" | "granite-code" => 2048,
|
||||
|
@ -67,7 +67,7 @@ impl Model {
|
|||
pub fn new(
|
||||
name: &str,
|
||||
display_name: Option<&str>,
|
||||
max_tokens: Option<usize>,
|
||||
max_tokens: Option<u64>,
|
||||
supports_tools: Option<bool>,
|
||||
supports_vision: Option<bool>,
|
||||
supports_thinking: Option<bool>,
|
||||
|
@ -93,7 +93,7 @@ impl Model {
|
|||
self.display_name.as_ref().unwrap_or(&self.name)
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
self.max_tokens
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ impl ChatRequest {
|
|||
// https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
||||
#[derive(Serialize, Default, Debug)]
|
||||
pub struct ChatOptions {
|
||||
pub num_ctx: Option<usize>,
|
||||
pub num_ctx: Option<u64>,
|
||||
pub num_predict: Option<isize>,
|
||||
pub stop: Option<Vec<String>>,
|
||||
pub temperature: Option<f32>,
|
||||
|
@ -183,8 +183,8 @@ pub struct ChatResponseDelta {
|
|||
pub done_reason: Option<String>,
|
||||
#[allow(unused)]
|
||||
pub done: bool,
|
||||
pub prompt_eval_count: Option<u32>,
|
||||
pub eval_count: Option<u32>,
|
||||
pub prompt_eval_count: Option<u64>,
|
||||
pub eval_count: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
|
|
@ -80,9 +80,9 @@ pub enum Model {
|
|||
name: String,
|
||||
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
|
||||
display_name: Option<String>,
|
||||
max_tokens: usize,
|
||||
max_output_tokens: Option<u32>,
|
||||
max_completion_tokens: Option<u32>,
|
||||
max_tokens: u64,
|
||||
max_output_tokens: Option<u64>,
|
||||
max_completion_tokens: Option<u64>,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
match self {
|
||||
Self::ThreePointFiveTurbo => 16_385,
|
||||
Self::Four => 8_192,
|
||||
|
@ -165,7 +165,7 @@ impl Model {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> Option<u32> {
|
||||
pub fn max_output_tokens(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Custom {
|
||||
max_output_tokens, ..
|
||||
|
@ -209,7 +209,7 @@ pub struct Request {
|
|||
pub messages: Vec<RequestMessage>,
|
||||
pub stream: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub max_completion_tokens: Option<u32>,
|
||||
pub max_completion_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub stop: Vec<String>,
|
||||
pub temperature: f32,
|
||||
|
|
|
@ -50,7 +50,7 @@ impl From<Role> for String {
|
|||
pub struct Model {
|
||||
pub name: String,
|
||||
pub display_name: Option<String>,
|
||||
pub max_tokens: usize,
|
||||
pub max_tokens: u64,
|
||||
pub supports_tools: Option<bool>,
|
||||
pub supports_images: Option<bool>,
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ impl Model {
|
|||
pub fn new(
|
||||
name: &str,
|
||||
display_name: Option<&str>,
|
||||
max_tokens: Option<usize>,
|
||||
max_tokens: Option<u64>,
|
||||
supports_tools: Option<bool>,
|
||||
supports_images: Option<bool>,
|
||||
) -> Self {
|
||||
|
@ -94,11 +94,11 @@ impl Model {
|
|||
self.display_name.as_ref().unwrap_or(&self.name)
|
||||
}
|
||||
|
||||
pub fn max_token_count(&self) -> usize {
|
||||
pub fn max_token_count(&self) -> u64 {
|
||||
self.max_tokens
|
||||
}
|
||||
|
||||
pub fn max_output_tokens(&self) -> Option<u32> {
|
||||
pub fn max_output_tokens(&self) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ pub struct Request {
|
|||
pub messages: Vec<RequestMessage>,
|
||||
pub stream: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub max_tokens: Option<u32>,
|
||||
pub max_tokens: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub stop: Vec<String>,
|
||||
pub temperature: f32,
|
||||
|
@ -318,9 +318,9 @@ pub struct FunctionChunk {
|
|||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Usage {
|
||||
pub prompt_tokens: u32,
|
||||
pub completion_tokens: u32,
|
||||
pub total_tokens: u32,
|
||||
pub prompt_tokens: u64,
|
||||
pub completion_tokens: u64,
|
||||
pub total_tokens: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
|
@ -369,7 +369,7 @@ pub struct ModelEntry {
|
|||
pub created: usize,
|
||||
pub description: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub context_length: Option<usize>,
|
||||
pub context_length: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub supported_parameters: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
|
|
|
@ -154,7 +154,7 @@ pub struct RulesLibrary {
|
|||
struct RuleEditor {
|
||||
title_editor: Entity<Editor>,
|
||||
body_editor: Entity<Editor>,
|
||||
token_count: Option<usize>,
|
||||
token_count: Option<u64>,
|
||||
pending_token_count: Task<Option<()>>,
|
||||
next_title_and_body_to_save: Option<(String, Rope)>,
|
||||
pending_save: Option<Task<Option<()>>>,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue