Add support for queuing status updates in cloud language model provider (#29818)

This sets us up to display queue position information to the user, once
our language model backend is updated to support request queuing.

The JSON returned by the LLM backend will need to look like this:

```json
{"queue": {"status": "queued", "position": 1}}
{"queue": {"status": "started"}}
{"event": {"THE_UPSTREAM_MODEL_PROVIDER_EVENT": "..."}} 
```

Release Notes:

- N/A

---------

Co-authored-by: Marshall Bowers <git@maxdeviant.com>
This commit is contained in:
Max Brunsfeld 2025-05-02 13:36:39 -07:00 committed by GitHub
parent 4d1df7bcd7
commit 04772bf17d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 492 additions and 430 deletions

View file

@ -4,8 +4,8 @@ use crate::context_store::ContextStore;
use crate::context_strip::{ContextStrip, ContextStripEvent, SuggestContextKind};
use crate::message_editor::insert_message_creases;
use crate::thread::{
LastRestoreCheckpoint, MessageCrease, MessageId, MessageSegment, Thread, ThreadError,
ThreadEvent, ThreadFeedback,
LastRestoreCheckpoint, MessageCrease, MessageId, MessageSegment, QueueState, Thread,
ThreadError, ThreadEvent, ThreadFeedback,
};
use crate::thread_store::{RulesLoadingError, ThreadStore};
use crate::tool_use::{PendingToolUseStatus, ToolUse};
@ -1733,8 +1733,27 @@ impl ActiveThread {
let show_feedback = thread.is_turn_end(ix);
let generating_label = (is_generating && is_last_message)
.then(|| AnimatedLabel::new("Generating").size(LabelSize::Small));
let generating_label = is_last_message
.then(|| match (thread.queue_state(), is_generating) {
(Some(QueueState::Sending), _) => Some(
AnimatedLabel::new("Sending")
.size(LabelSize::Small)
.into_any_element(),
),
(Some(QueueState::Queued { position }), _) => Some(
Label::new(format!("Queue position: {position}"))
.size(LabelSize::Small)
.color(Color::Muted)
.into_any_element(),
),
(_, true) => Some(
AnimatedLabel::new("Generating")
.size(LabelSize::Small)
.into_any_element(),
),
_ => None,
})
.flatten();
let editing_message_state = self
.editing_message
@ -2105,7 +2124,7 @@ impl ActiveThread {
parent.child(self.render_rules_item(cx))
})
.child(styled_message)
.when(generating_label.is_some(), |this| {
.when_some(generating_label, |this, generating_label| {
this.child(
h_flex()
.h_8()
@ -2113,7 +2132,7 @@ impl ActiveThread {
.mb_4()
.ml_4()
.py_1p5()
.child(generating_label.unwrap()),
.child(generating_label),
)
})
.when(show_feedback, move |parent| {

View file

@ -320,6 +320,13 @@ fn default_completion_mode(cx: &App) -> CompletionMode {
}
}
#[derive(Debug, Clone, Copy)]
pub enum QueueState {
Sending,
Queued { position: usize },
Started,
}
/// A thread of conversation with the LLM.
pub struct Thread {
id: ThreadId,
@ -625,6 +632,12 @@ impl Thread {
!self.pending_completions.is_empty() || !self.all_tools_finished()
}
pub fn queue_state(&self) -> Option<QueueState> {
self.pending_completions
.first()
.map(|pending_completion| pending_completion.queue_state)
}
pub fn tools(&self) -> &Entity<ToolWorkingSet> {
&self.tools
}
@ -1470,6 +1483,20 @@ impl Thread {
});
}
}
LanguageModelCompletionEvent::QueueUpdate(queue_event) => {
if let Some(completion) = thread
.pending_completions
.iter_mut()
.find(|completion| completion.id == pending_completion_id)
{
completion.queue_state = match queue_event {
language_model::QueueState::Queued { position } => {
QueueState::Queued { position }
}
language_model::QueueState::Started => QueueState::Started,
}
}
}
}
thread.touch_updated_at();
@ -1590,6 +1617,7 @@ impl Thread {
self.pending_completions.push(PendingCompletion {
id: pending_completion_id,
queue_state: QueueState::Sending,
_task: task,
});
}
@ -2499,6 +2527,7 @@ impl EventEmitter<ThreadEvent> for Thread {}
struct PendingCompletion {
id: usize,
queue_state: QueueState,
_task: Task<()>,
}