Ensure thread's model is initialized once settings are loaded

Also, avoid showing token threshold warning when thread has no model.

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
This commit is contained in:
Max Brunsfeld 2025-04-28 17:27:56 -07:00
parent 17903a0999
commit bc665b2a76
3 changed files with 33 additions and 11 deletions

View file

@ -268,6 +268,7 @@ pub struct AssistantPanel {
thread: Entity<ActiveThread>, thread: Entity<ActiveThread>,
message_editor: Entity<MessageEditor>, message_editor: Entity<MessageEditor>,
_active_thread_subscriptions: Vec<Subscription>, _active_thread_subscriptions: Vec<Subscription>,
_default_model_subscription: Subscription,
context_store: Entity<assistant_context_editor::ContextStore>, context_store: Entity<assistant_context_editor::ContextStore>,
prompt_store: Option<Entity<PromptStore>>, prompt_store: Option<Entity<PromptStore>>,
configuration: Option<Entity<AssistantConfiguration>>, configuration: Option<Entity<AssistantConfiguration>>,
@ -408,6 +409,20 @@ impl AssistantPanel {
} }
}); });
let _default_model_subscription = cx.subscribe(
&LanguageModelRegistry::global(cx),
|this, _, event: &language_model::Event, cx| match event {
language_model::Event::DefaultModelChanged => {
this.thread
.read(cx)
.thread()
.clone()
.update(cx, |thread, cx| thread.get_or_init_configured_model(cx));
}
_ => {}
},
);
Self { Self {
active_view, active_view,
workspace, workspace,
@ -423,6 +438,7 @@ impl AssistantPanel {
active_thread_subscription, active_thread_subscription,
message_editor_subscription, message_editor_subscription,
], ],
_default_model_subscription,
context_store, context_store,
prompt_store, prompt_store,
configuration: None, configuration: None,
@ -1274,7 +1290,8 @@ impl AssistantPanel {
let is_generating = thread.is_generating(); let is_generating = thread.is_generating();
let message_editor = self.message_editor.read(cx); let message_editor = self.message_editor.read(cx);
let conversation_token_usage = thread.total_token_usage(); let conversation_token_usage = thread.total_token_usage()?;
let (total_token_usage, is_estimating) = if let Some((editing_message_id, unsent_tokens)) = let (total_token_usage, is_estimating) = if let Some((editing_message_id, unsent_tokens)) =
self.thread.read(cx).editing_message_id() self.thread.read(cx).editing_message_id()
{ {

View file

@ -1134,8 +1134,11 @@ impl Focusable for MessageEditor {
impl Render for MessageEditor { impl Render for MessageEditor {
fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement { fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
let thread = self.thread.read(cx); let thread = self.thread.read(cx);
let total_token_usage = thread.total_token_usage(); let token_usage_ratio = thread
let token_usage_ratio = total_token_usage.ratio(); .total_token_usage()
.map_or(TokenUsageRatio::Normal, |total_token_usage| {
total_token_usage.ratio()
});
let action_log = self.thread.read(cx).action_log(); let action_log = self.thread.read(cx).action_log();
let changed_buffers = action_log.read(cx).changed_buffers(cx); let changed_buffers = action_log.read(cx).changed_buffers(cx);

View file

@ -272,7 +272,11 @@ impl TotalTokenUsage {
#[cfg(not(debug_assertions))] #[cfg(not(debug_assertions))]
let warning_threshold: f32 = 0.8; let warning_threshold: f32 = 0.8;
if self.total >= self.max { // When the maximum is unknown because there is no selected model,
// avoid showing the token limit warning.
if self.max == 0 {
TokenUsageRatio::Normal
} else if self.total >= self.max {
TokenUsageRatio::Exceeded TokenUsageRatio::Exceeded
} else if self.total as f32 / self.max as f32 >= warning_threshold { } else if self.total as f32 / self.max as f32 >= warning_threshold {
TokenUsageRatio::Warning TokenUsageRatio::Warning
@ -2323,19 +2327,17 @@ impl Thread {
} }
} }
pub fn total_token_usage(&self) -> TotalTokenUsage { pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
let Some(model) = self.configured_model.as_ref() else { let model = self.configured_model.as_ref()?;
return TotalTokenUsage::default();
};
let max = model.model.max_token_count(); let max = model.model.max_token_count();
if let Some(exceeded_error) = &self.exceeded_window_error { if let Some(exceeded_error) = &self.exceeded_window_error {
if model.model.id() == exceeded_error.model_id { if model.model.id() == exceeded_error.model_id {
return TotalTokenUsage { return Some(TotalTokenUsage {
total: exceeded_error.token_count, total: exceeded_error.token_count,
max, max,
}; });
} }
} }
@ -2344,7 +2346,7 @@ impl Thread {
.unwrap_or_default() .unwrap_or_default()
.total_tokens() as usize; .total_tokens() as usize;
TotalTokenUsage { total, max } Some(TotalTokenUsage { total, max })
} }
fn token_usage_at_last_message(&self) -> Option<TokenUsage> { fn token_usage_at_last_message(&self) -> Option<TokenUsage> {