Show progress as the agent locates which range it needs to edit (#31582)

Release Notes:

- Improved latency when the agent starts streaming edits.

---------

Co-authored-by: Ben Brandt <benjamin.j.brandt@gmail.com>
This commit is contained in:
Antonio Scandurra 2025-05-28 14:32:54 +02:00 committed by GitHub
parent 94a5fe265d
commit 4f78165ee8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 1342 additions and 660 deletions

View file

@ -107,14 +107,18 @@ impl FakeLanguageModel {
self.current_completion_txs.lock().len()
}
pub fn stream_completion_response(&self, request: &LanguageModelRequest, chunk: String) {
pub fn stream_completion_response(
&self,
request: &LanguageModelRequest,
chunk: impl Into<String>,
) {
let current_completion_txs = self.current_completion_txs.lock();
let tx = current_completion_txs
.iter()
.find(|(req, _)| req == request)
.map(|(_, tx)| tx)
.unwrap();
tx.unbounded_send(chunk).unwrap();
tx.unbounded_send(chunk.into()).unwrap();
}
pub fn end_completion_stream(&self, request: &LanguageModelRequest) {
@ -123,7 +127,7 @@ impl FakeLanguageModel {
.retain(|(req, _)| req != request);
}
pub fn stream_last_completion_response(&self, chunk: String) {
pub fn stream_last_completion_response(&self, chunk: impl Into<String>) {
self.stream_completion_response(self.pending_completions().last().unwrap(), chunk);
}