agent: Add several UX improvements (#29828)

Still a work in progress.

Release Notes:

- N/A

---------

Co-authored-by: Bennet Bo Fenner <bennetbo@gmx.de>
Co-authored-by: Nathan Sobo <nathan@zed.dev>
Co-authored-by: Cole Miller <m@cole-miller.net>
Co-authored-by: Nathan Sobo <1789+nathansobo@users.noreply.github.com>
Co-authored-by: Cole Miller <53574922+cole-miller@users.noreply.github.com>
This commit is contained in:
Danilo Leal 2025-05-02 22:00:55 -03:00 committed by GitHub
parent 5053562e28
commit 10a7f2a972
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 219 additions and 109 deletions

View file

@ -358,6 +358,7 @@ pub struct Thread {
feedback: Option<ThreadFeedback>,
message_feedback: HashMap<MessageId, ThreadFeedback>,
last_auto_capture_at: Option<Instant>,
last_received_chunk_at: Option<Instant>,
request_callback: Option<
Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
>,
@ -419,6 +420,7 @@ impl Thread {
feedback: None,
message_feedback: HashMap::default(),
last_auto_capture_at: None,
last_received_chunk_at: None,
request_callback: None,
remaining_turns: u32::MAX,
configured_model,
@ -525,6 +527,7 @@ impl Thread {
feedback: None,
message_feedback: HashMap::default(),
last_auto_capture_at: None,
last_received_chunk_at: None,
request_callback: None,
remaining_turns: u32::MAX,
configured_model,
@ -632,6 +635,19 @@ impl Thread {
!self.pending_completions.is_empty() || !self.all_tools_finished()
}
/// Indicates whether streaming of language model events is stale.
/// When `is_generating()` is false, this method returns `None`.
pub fn is_generation_stale(&self) -> Option<bool> {
const STALE_THRESHOLD: u128 = 250;
self.last_received_chunk_at
.map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
}
fn received_chunk(&mut self) {
self.last_received_chunk_at = Some(Instant::now());
}
pub fn queue_state(&self) -> Option<QueueState> {
self.pending_completions
.first()
@ -1328,6 +1344,8 @@ impl Thread {
prompt_id: prompt_id.clone(),
};
self.last_received_chunk_at = Some(Instant::now());
let task = cx.spawn(async move |thread, cx| {
let stream_completion_future = model.stream_completion_with_usage(request, &cx);
let initial_token_usage =
@ -1398,6 +1416,8 @@ impl Thread {
current_token_usage = token_usage;
}
LanguageModelCompletionEvent::Text(chunk) => {
thread.received_chunk();
cx.emit(ThreadEvent::ReceivedTextChunk);
if let Some(last_message) = thread.messages.last_mut() {
if last_message.role == Role::Assistant
@ -1426,6 +1446,8 @@ impl Thread {
text: chunk,
signature,
} => {
thread.received_chunk();
if let Some(last_message) = thread.messages.last_mut() {
if last_message.role == Role::Assistant
&& !thread.tool_use.has_tool_results(last_message.id)
@ -1512,6 +1534,7 @@ impl Thread {
}
thread.update(cx, |thread, cx| {
thread.last_received_chunk_at = None;
thread
.pending_completions
.retain(|completion| completion.id != pending_completion_id);