agent: Fix max token count mismatch when not using burn mode (#34025)

Closes #31854

Release Notes:

- agent: Fixed an issue where the maximum token count would be displayed
incorrectly when burn mode was not being used.
This commit is contained in:
Bennet Bo Fenner 2025-07-07 23:13:24 +02:00 committed by GitHub
parent a9107dfaeb
commit 66a1c356bf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 64 additions and 17 deletions

View file

@ -730,6 +730,13 @@ impl LanguageModel for CloudLanguageModel {
self.model.max_token_count as u64
}
fn max_token_count_in_burn_mode(&self) -> Option<u64> {
self.model
.max_token_count_in_max_mode
.filter(|_| self.model.supports_max_mode)
.map(|max_token_count| max_token_count as u64)
}
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
match &self.model.provider {
zed_llm_client::LanguageModelProvider::Anthropic => {