Ensure thread's model is initialized once settings are loaded

Also, avoid showing token threshold warning when thread has no model.

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
This commit is contained in:
Max Brunsfeld 2025-04-28 17:27:56 -07:00
parent 17903a0999
commit bc665b2a76
3 changed files with 33 additions and 11 deletions

View file

@ -272,7 +272,11 @@ impl TotalTokenUsage {
#[cfg(not(debug_assertions))]
let warning_threshold: f32 = 0.8;
if self.total >= self.max {
// When the maximum is unknown because there is no selected model,
// avoid showing the token limit warning.
if self.max == 0 {
TokenUsageRatio::Normal
} else if self.total >= self.max {
TokenUsageRatio::Exceeded
} else if self.total as f32 / self.max as f32 >= warning_threshold {
TokenUsageRatio::Warning
@ -2323,19 +2327,17 @@ impl Thread {
}
}
pub fn total_token_usage(&self) -> TotalTokenUsage {
let Some(model) = self.configured_model.as_ref() else {
return TotalTokenUsage::default();
};
pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
let model = self.configured_model.as_ref()?;
let max = model.model.max_token_count();
if let Some(exceeded_error) = &self.exceeded_window_error {
if model.model.id() == exceeded_error.model_id {
return TotalTokenUsage {
return Some(TotalTokenUsage {
total: exceeded_error.token_count,
max,
};
});
}
}
@ -2344,7 +2346,7 @@ impl Thread {
.unwrap_or_default()
.total_tokens() as usize;
TotalTokenUsage { total, max }
Some(TotalTokenUsage { total, max })
}
fn token_usage_at_last_message(&self) -> Option<TokenUsage> {