Merge branch 'main' into ollama-inline-completions
# Conflicts: # crates/edit_prediction_button/src/edit_prediction_button.rs # crates/editor/src/editor.rs
This commit is contained in:
commit
8bd784f5ea
271 changed files with 13577 additions and 5147 deletions
|
@ -136,6 +136,7 @@ impl State {
|
|||
cx: &mut Context<Self>,
|
||||
) -> Self {
|
||||
let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
|
||||
let mut current_user = user_store.read(cx).watch_current_user();
|
||||
Self {
|
||||
client: client.clone(),
|
||||
llm_api_token: LlmApiToken::default(),
|
||||
|
@ -151,22 +152,14 @@ impl State {
|
|||
let (client, llm_api_token) = this
|
||||
.read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
|
||||
|
||||
loop {
|
||||
let is_authenticated = user_store
|
||||
.read_with(cx, |user_store, _cx| user_store.current_user().is_some())?;
|
||||
if is_authenticated {
|
||||
break;
|
||||
}
|
||||
|
||||
cx.background_executor()
|
||||
.timer(Duration::from_millis(100))
|
||||
.await;
|
||||
while current_user.borrow().is_none() {
|
||||
current_user.next().await;
|
||||
}
|
||||
|
||||
let response = Self::fetch_models(client, llm_api_token).await?;
|
||||
this.update(cx, |this, cx| {
|
||||
this.update_models(response, cx);
|
||||
})
|
||||
let response =
|
||||
Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
|
||||
this.update(cx, |this, cx| this.update_models(response, cx))?;
|
||||
anyhow::Ok(())
|
||||
})
|
||||
.await
|
||||
.context("failed to fetch Zed models")
|
||||
|
@ -1267,8 +1260,16 @@ impl Render for ConfigurationView {
|
|||
}
|
||||
|
||||
impl Component for ZedAiConfiguration {
|
||||
fn name() -> &'static str {
|
||||
"AI Configuration Content"
|
||||
}
|
||||
|
||||
fn sort_name() -> &'static str {
|
||||
"AI Configuration Content"
|
||||
}
|
||||
|
||||
fn scope() -> ComponentScope {
|
||||
ComponentScope::Agent
|
||||
ComponentScope::Onboarding
|
||||
}
|
||||
|
||||
fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
|
||||
|
|
|
@ -674,6 +674,10 @@ pub fn count_open_ai_tokens(
|
|||
| Model::O3
|
||||
| Model::O3Mini
|
||||
| Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
|
||||
// GPT-5 models don't have tiktoken support yet; fall back on gpt-4o tokenizer
|
||||
Model::Five | Model::FiveMini | Model::FiveNano => {
|
||||
tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages)
|
||||
}
|
||||
}
|
||||
.map(|tokens| tokens as u64)
|
||||
})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue