Ensure thread's model is initialized once settings are loaded

Also, avoid showing token threshold warning when thread has no model.

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
This commit is contained in:
Max Brunsfeld 2025-04-28 17:27:56 -07:00
parent 17903a0999
commit bc665b2a76
3 changed files with 33 additions and 11 deletions

View file

@ -268,6 +268,7 @@ pub struct AssistantPanel {
thread: Entity<ActiveThread>,
message_editor: Entity<MessageEditor>,
_active_thread_subscriptions: Vec<Subscription>,
_default_model_subscription: Subscription,
context_store: Entity<assistant_context_editor::ContextStore>,
prompt_store: Option<Entity<PromptStore>>,
configuration: Option<Entity<AssistantConfiguration>>,
@ -408,6 +409,20 @@ impl AssistantPanel {
}
});
let _default_model_subscription = cx.subscribe(
&LanguageModelRegistry::global(cx),
|this, _, event: &language_model::Event, cx| match event {
language_model::Event::DefaultModelChanged => {
this.thread
.read(cx)
.thread()
.clone()
.update(cx, |thread, cx| thread.get_or_init_configured_model(cx));
}
_ => {}
},
);
Self {
active_view,
workspace,
@ -423,6 +438,7 @@ impl AssistantPanel {
active_thread_subscription,
message_editor_subscription,
],
_default_model_subscription,
context_store,
prompt_store,
configuration: None,
@ -1274,7 +1290,8 @@ impl AssistantPanel {
let is_generating = thread.is_generating();
let message_editor = self.message_editor.read(cx);
let conversation_token_usage = thread.total_token_usage();
let conversation_token_usage = thread.total_token_usage()?;
let (total_token_usage, is_estimating) = if let Some((editing_message_id, unsent_tokens)) =
self.thread.read(cx).editing_message_id()
{

View file

@ -1134,8 +1134,11 @@ impl Focusable for MessageEditor {
impl Render for MessageEditor {
fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
let thread = self.thread.read(cx);
let total_token_usage = thread.total_token_usage();
let token_usage_ratio = total_token_usage.ratio();
let token_usage_ratio = thread
.total_token_usage()
.map_or(TokenUsageRatio::Normal, |total_token_usage| {
total_token_usage.ratio()
});
let action_log = self.thread.read(cx).action_log();
let changed_buffers = action_log.read(cx).changed_buffers(cx);

View file

@ -272,7 +272,11 @@ impl TotalTokenUsage {
#[cfg(not(debug_assertions))]
let warning_threshold: f32 = 0.8;
if self.total >= self.max {
// When the maximum is unknown because there is no selected model,
// avoid showing the token limit warning.
if self.max == 0 {
TokenUsageRatio::Normal
} else if self.total >= self.max {
TokenUsageRatio::Exceeded
} else if self.total as f32 / self.max as f32 >= warning_threshold {
TokenUsageRatio::Warning
@ -2323,19 +2327,17 @@ impl Thread {
}
}
pub fn total_token_usage(&self) -> TotalTokenUsage {
let Some(model) = self.configured_model.as_ref() else {
return TotalTokenUsage::default();
};
pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
let model = self.configured_model.as_ref()?;
let max = model.model.max_token_count();
if let Some(exceeded_error) = &self.exceeded_window_error {
if model.model.id() == exceeded_error.model_id {
return TotalTokenUsage {
return Some(TotalTokenUsage {
total: exceeded_error.token_count,
max,
};
});
}
}
@ -2344,7 +2346,7 @@ impl Thread {
.unwrap_or_default()
.total_tokens() as usize;
TotalTokenUsage { total, max }
Some(TotalTokenUsage { total, max })
}
fn token_usage_at_last_message(&self) -> Option<TokenUsage> {