copilot: Add support for Gemini 2.0 Flash model to Copilot Chat (#24952)

Co-authored-by: Peter Tripp <peter@zed.dev>
This commit is contained in:
Richard Hao 2025-02-18 03:25:38 +08:00 committed by GitHub
parent 3e9722685b
commit f833a01a7e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 43 additions and 3 deletions

View file

@ -25,6 +25,7 @@ use strum::IntoEnumIterator;
use ui::prelude::*;
use super::anthropic::count_anthropic_tokens;
use super::google::count_google_tokens;
use super::open_ai::count_open_ai_tokens;
const PROVIDER_ID: &str = "copilot_chat";
@ -174,13 +175,16 @@ impl LanguageModel for CopilotChatLanguageModel {
) -> BoxFuture<'static, Result<usize>> {
match self.model {
CopilotChatModel::Claude3_5Sonnet => count_anthropic_tokens(request, cx),
CopilotChatModel::Gemini20Flash => count_google_tokens(request, cx),
_ => {
let model = match self.model {
CopilotChatModel::Gpt4o => open_ai::Model::FourOmni,
CopilotChatModel::Gpt4 => open_ai::Model::Four,
CopilotChatModel::Gpt3_5Turbo => open_ai::Model::ThreePointFiveTurbo,
CopilotChatModel::O1 | CopilotChatModel::O3Mini => open_ai::Model::Four,
CopilotChatModel::Claude3_5Sonnet => unreachable!(),
CopilotChatModel::Claude3_5Sonnet | CopilotChatModel::Gemini20Flash => {
unreachable!()
}
};
count_open_ai_tokens(request, model, cx)
}

View file

@ -11,7 +11,7 @@ use language_model::LanguageModelCompletionEvent;
use language_model::{
LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
LanguageModelRequest, RateLimiter,
LanguageModelRequest, RateLimiter, Role,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@ -324,6 +324,36 @@ impl LanguageModel for GoogleLanguageModel {
}
}
pub fn count_google_tokens(
request: LanguageModelRequest,
cx: &App,
) -> BoxFuture<'static, Result<usize>> {
// We couldn't use the GoogleLanguageModelProvider to count tokens because the github copilot doesn't have the access to google_ai directly.
// So we have to use tokenizer from tiktoken_rs to count tokens.
cx.background_executor()
.spawn(async move {
let messages = request
.messages
.into_iter()
.map(|message| tiktoken_rs::ChatCompletionRequestMessage {
role: match message.role {
Role::User => "user".into(),
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
content: Some(message.string_contents()),
name: None,
function_call: None,
})
.collect::<Vec<_>>();
// Tiktoken doesn't yet support these models, so we manually use the
// same tokenizer as GPT-4.
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
})
.boxed()
}
struct ConfigurationView {
api_key_editor: Entity<Editor>,
state: gpui::Entity<State>,