agent: Log errors from token counting APIs (#29626)

Release Notes:

- N/A
This commit is contained in:
Michael Sloan 2025-04-29 15:55:54 -06:00 committed by GitHub
parent f7a3e00074
commit 33abf1ee7c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 100 additions and 86 deletions

View file

@ -702,7 +702,7 @@ struct EditMessageState {
editor: Entity<Editor>, editor: Entity<Editor>,
last_estimated_token_count: Option<usize>, last_estimated_token_count: Option<usize>,
_subscription: Subscription, _subscription: Subscription,
_update_token_count_task: Option<Task<anyhow::Result<()>>>, _update_token_count_task: Option<Task<()>>,
} }
impl ActiveThread { impl ActiveThread {
@ -1268,58 +1268,65 @@ impl ActiveThread {
.await; .await;
} }
let token_count = if let Some(task) = cx.update(|cx| { let token_count = if let Some(task) = cx
let Some(message) = thread.read(cx).message(message_id) else { .update(|cx| {
log::error!("Message that was being edited no longer exists"); let Some(message) = thread.read(cx).message(message_id) else {
return None; log::error!("Message that was being edited no longer exists");
}; return None;
let message_text = editor.read(cx).text(cx); };
let message_text = editor.read(cx).text(cx);
if message_text.is_empty() && message.loaded_context.is_empty() { if message_text.is_empty() && message.loaded_context.is_empty() {
return None; return None;
} }
let mut request_message = LanguageModelRequestMessage { let mut request_message = LanguageModelRequestMessage {
role: language_model::Role::User, role: language_model::Role::User,
content: Vec::new(), content: Vec::new(),
cache: false, cache: false,
}; };
message message
.loaded_context .loaded_context
.add_to_request_message(&mut request_message); .add_to_request_message(&mut request_message);
if !message_text.is_empty() { if !message_text.is_empty() {
request_message request_message
.content .content
.push(MessageContent::Text(message_text)); .push(MessageContent::Text(message_text));
} }
let request = language_model::LanguageModelRequest { let request = language_model::LanguageModelRequest {
thread_id: None, thread_id: None,
prompt_id: None, prompt_id: None,
mode: None, mode: None,
messages: vec![request_message], messages: vec![request_message],
tools: vec![], tools: vec![],
stop: vec![], stop: vec![],
temperature: None, temperature: None,
}; };
Some(configured_model.model.count_tokens(request, cx)) Some(configured_model.model.count_tokens(request, cx))
})? { })
task.await? .ok()
.flatten()
{
task.await.log_err()
} else { } else {
0 Some(0)
}; };
this.update(cx, |this, cx| { if let Some(token_count) = token_count {
let Some((_message_id, state)) = this.editing_message.as_mut() else { this.update(cx, |this, cx| {
return; let Some((_message_id, state)) = this.editing_message.as_mut() else {
}; return;
};
state.last_estimated_token_count = Some(token_count); state.last_estimated_token_count = Some(token_count);
cx.emit(ActiveThreadEvent::EditingMessageTokenCountChanged); cx.emit(ActiveThreadEvent::EditingMessageTokenCountChanged);
}) })
.ok();
};
})); }));
} }

View file

@ -63,7 +63,7 @@ pub struct MessageEditor {
edits_expanded: bool, edits_expanded: bool,
editor_is_expanded: bool, editor_is_expanded: bool,
last_estimated_token_count: Option<usize>, last_estimated_token_count: Option<usize>,
update_token_count_task: Option<Task<anyhow::Result<()>>>, update_token_count_task: Option<Task<()>>,
_subscriptions: Vec<Subscription>, _subscriptions: Vec<Subscription>,
} }
@ -1088,57 +1088,64 @@ impl MessageEditor {
.await; .await;
} }
let token_count = if let Some(task) = this.update(cx, |this, cx| { let token_count = if let Some(task) = this
let loaded_context = this .update(cx, |this, cx| {
.last_loaded_context let loaded_context = this
.as_ref() .last_loaded_context
.map(|context_load_result| &context_load_result.loaded_context); .as_ref()
let message_text = editor.read(cx).text(cx); .map(|context_load_result| &context_load_result.loaded_context);
let message_text = editor.read(cx).text(cx);
if message_text.is_empty() if message_text.is_empty()
&& loaded_context.map_or(true, |loaded_context| loaded_context.is_empty()) && loaded_context.map_or(true, |loaded_context| loaded_context.is_empty())
{ {
return None; return None;
} }
let mut request_message = LanguageModelRequestMessage { let mut request_message = LanguageModelRequestMessage {
role: language_model::Role::User, role: language_model::Role::User,
content: Vec::new(), content: Vec::new(),
cache: false, cache: false,
}; };
if let Some(loaded_context) = loaded_context { if let Some(loaded_context) = loaded_context {
loaded_context.add_to_request_message(&mut request_message); loaded_context.add_to_request_message(&mut request_message);
} }
if !message_text.is_empty() { if !message_text.is_empty() {
request_message request_message
.content .content
.push(MessageContent::Text(message_text)); .push(MessageContent::Text(message_text));
} }
let request = language_model::LanguageModelRequest { let request = language_model::LanguageModelRequest {
thread_id: None, thread_id: None,
prompt_id: None, prompt_id: None,
mode: None, mode: None,
messages: vec![request_message], messages: vec![request_message],
tools: vec![], tools: vec![],
stop: vec![], stop: vec![],
temperature: None, temperature: None,
}; };
Some(model.model.count_tokens(request, cx)) Some(model.model.count_tokens(request, cx))
})? { })
task.await? .ok()
.flatten()
{
task.await.log_err()
} else { } else {
0 Some(0)
}; };
this.update(cx, |this, cx| { if let Some(token_count) = token_count {
this.last_estimated_token_count = Some(token_count); this.update(cx, |this, cx| {
cx.emit(MessageEditorEvent::EstimatedTokenCount); this.last_estimated_token_count = Some(token_count);
this.update_token_count_task.take(); cx.emit(MessageEditorEvent::EstimatedTokenCount);
}) this.update_token_count_task.take();
})
.ok();
}
})); }));
} }
} }