Lay the groundwork for collaborating on assistant panel (#13991)
This pull request introduces collaboration for the assistant panel by turning `Context` into a CRDT. `ContextStore` is responsible for sending and applying operations, as well as synchronizing missed changes while the connection was lost. Contexts are shared on a per-project basis, and only the host can share them for now. Shared contexts can be accessed via the `History` tab in the assistant panel. <img width="1819" alt="image" src="https://github.com/zed-industries/zed/assets/482957/c7ae46d2-cde3-4b03-b74a-6e9b1555c154"> Please note that this doesn't implement following yet, which is scheduled for a subsequent pull request. Release Notes: - N/A
This commit is contained in:
parent
1662993811
commit
8944af7406
25 changed files with 4232 additions and 2120 deletions
|
@ -595,6 +595,14 @@ impl Server {
|
|||
.add_message_handler(user_message_handler(acknowledge_channel_message))
|
||||
.add_message_handler(user_message_handler(acknowledge_buffer_version))
|
||||
.add_request_handler(user_handler(get_supermaven_api_key))
|
||||
.add_request_handler(user_handler(
|
||||
forward_mutating_project_request::<proto::OpenContext>,
|
||||
))
|
||||
.add_request_handler(user_handler(
|
||||
forward_mutating_project_request::<proto::SynchronizeContexts>,
|
||||
))
|
||||
.add_message_handler(broadcast_project_message_from_host::<proto::AdvertiseContexts>)
|
||||
.add_message_handler(update_context)
|
||||
.add_streaming_request_handler({
|
||||
let app_state = app_state.clone();
|
||||
move |request, response, session| {
|
||||
|
@ -3056,6 +3064,53 @@ async fn update_buffer(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_context(message: proto::UpdateContext, session: Session) -> Result<()> {
|
||||
let project_id = ProjectId::from_proto(message.project_id);
|
||||
|
||||
let operation = message.operation.as_ref().context("invalid operation")?;
|
||||
let capability = match operation.variant.as_ref() {
|
||||
Some(proto::context_operation::Variant::BufferOperation(buffer_op)) => {
|
||||
if let Some(buffer_op) = buffer_op.operation.as_ref() {
|
||||
match buffer_op.variant {
|
||||
None | Some(proto::operation::Variant::UpdateSelections(_)) => {
|
||||
Capability::ReadOnly
|
||||
}
|
||||
_ => Capability::ReadWrite,
|
||||
}
|
||||
} else {
|
||||
Capability::ReadWrite
|
||||
}
|
||||
}
|
||||
Some(_) => Capability::ReadWrite,
|
||||
None => Capability::ReadOnly,
|
||||
};
|
||||
|
||||
let guard = session
|
||||
.db()
|
||||
.await
|
||||
.connections_for_buffer_update(
|
||||
project_id,
|
||||
session.principal_id(),
|
||||
session.connection_id,
|
||||
capability,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (host, guests) = &*guard;
|
||||
|
||||
broadcast(
|
||||
Some(session.connection_id),
|
||||
guests.iter().chain([host]).copied(),
|
||||
|connection_id| {
|
||||
session
|
||||
.peer
|
||||
.forward_send(session.connection_id, connection_id, message.clone())
|
||||
},
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Notify other participants that a project has been updated.
|
||||
async fn broadcast_project_message_from_host<T: EntityMessage<Entity = ShareProject>>(
|
||||
request: T,
|
||||
|
|
|
@ -6,6 +6,7 @@ use crate::{
|
|||
},
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use assistant::ContextStore;
|
||||
use call::{room, ActiveCall, ParticipantLocation, Room};
|
||||
use client::{User, RECEIVE_TIMEOUT};
|
||||
use collections::{HashMap, HashSet};
|
||||
|
@ -6449,3 +6450,123 @@ async fn test_preview_tabs(cx: &mut TestAppContext) {
|
|||
assert!(!pane.can_navigate_forward());
|
||||
});
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_context_collaboration_with_reconnect(
|
||||
executor: BackgroundExecutor,
|
||||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
let mut server = TestServer::start(executor.clone()).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
server
|
||||
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)])
|
||||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
client_a.fs().insert_tree("/a", Default::default()).await;
|
||||
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
|
||||
let project_id = active_call_a
|
||||
.update(cx_a, |call, cx| call.share_project(project_a.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let project_b = client_b.build_dev_server_project(project_id, cx_b).await;
|
||||
|
||||
// Client A sees that a guest has joined.
|
||||
executor.run_until_parked();
|
||||
|
||||
project_a.read_with(cx_a, |project, _| {
|
||||
assert_eq!(project.collaborators().len(), 1);
|
||||
});
|
||||
project_b.read_with(cx_b, |project, _| {
|
||||
assert_eq!(project.collaborators().len(), 1);
|
||||
});
|
||||
|
||||
let context_store_a = cx_a
|
||||
.update(|cx| ContextStore::new(project_a.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let context_store_b = cx_b
|
||||
.update(|cx| ContextStore::new(project_b.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Client A creates a new context.
|
||||
let context_a = context_store_a.update(cx_a, |store, cx| store.create(cx));
|
||||
executor.run_until_parked();
|
||||
|
||||
// Client B retrieves host's contexts and joins one.
|
||||
let context_b = context_store_b
|
||||
.update(cx_b, |store, cx| {
|
||||
let host_contexts = store.host_contexts().to_vec();
|
||||
assert_eq!(host_contexts.len(), 1);
|
||||
store.open_remote_context(host_contexts[0].id.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Host and guest make changes
|
||||
context_a.update(cx_a, |context, cx| {
|
||||
context.buffer().update(cx, |buffer, cx| {
|
||||
buffer.edit([(0..0, "Host change\n")], None, cx)
|
||||
})
|
||||
});
|
||||
context_b.update(cx_b, |context, cx| {
|
||||
context.buffer().update(cx, |buffer, cx| {
|
||||
buffer.edit([(0..0, "Guest change\n")], None, cx)
|
||||
})
|
||||
});
|
||||
executor.run_until_parked();
|
||||
assert_eq!(
|
||||
context_a.read_with(cx_a, |context, cx| context.buffer().read(cx).text()),
|
||||
"Guest change\nHost change\n"
|
||||
);
|
||||
assert_eq!(
|
||||
context_b.read_with(cx_b, |context, cx| context.buffer().read(cx).text()),
|
||||
"Guest change\nHost change\n"
|
||||
);
|
||||
|
||||
// Disconnect client A and make some changes while disconnected.
|
||||
server.disconnect_client(client_a.peer_id().unwrap());
|
||||
server.forbid_connections();
|
||||
context_a.update(cx_a, |context, cx| {
|
||||
context.buffer().update(cx, |buffer, cx| {
|
||||
buffer.edit([(0..0, "Host offline change\n")], None, cx)
|
||||
})
|
||||
});
|
||||
context_b.update(cx_b, |context, cx| {
|
||||
context.buffer().update(cx, |buffer, cx| {
|
||||
buffer.edit([(0..0, "Guest offline change\n")], None, cx)
|
||||
})
|
||||
});
|
||||
executor.run_until_parked();
|
||||
assert_eq!(
|
||||
context_a.read_with(cx_a, |context, cx| context.buffer().read(cx).text()),
|
||||
"Host offline change\nGuest change\nHost change\n"
|
||||
);
|
||||
assert_eq!(
|
||||
context_b.read_with(cx_b, |context, cx| context.buffer().read(cx).text()),
|
||||
"Guest offline change\nGuest change\nHost change\n"
|
||||
);
|
||||
|
||||
// Allow client A to reconnect and verify that contexts converge.
|
||||
server.allow_connections();
|
||||
executor.advance_clock(RECEIVE_TIMEOUT);
|
||||
assert_eq!(
|
||||
context_a.read_with(cx_a, |context, cx| context.buffer().read(cx).text()),
|
||||
"Guest offline change\nHost offline change\nGuest change\nHost change\n"
|
||||
);
|
||||
assert_eq!(
|
||||
context_b.read_with(cx_b, |context, cx| context.buffer().read(cx).text()),
|
||||
"Guest offline change\nHost offline change\nGuest change\nHost change\n"
|
||||
);
|
||||
|
||||
// Client A disconnects without being able to reconnect. Context B becomes readonly.
|
||||
server.forbid_connections();
|
||||
server.disconnect_client(client_a.peer_id().unwrap());
|
||||
executor.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
|
||||
context_b.read_with(cx_b, |context, cx| {
|
||||
assert!(context.buffer().read(cx).read_only());
|
||||
});
|
||||
}
|
||||
|
|
|
@ -294,6 +294,8 @@ impl TestServer {
|
|||
menu::init();
|
||||
dev_server_projects::init(client.clone(), cx);
|
||||
settings::KeymapFile::load_asset(os_keymap, cx).unwrap();
|
||||
assistant::FakeCompletionProvider::setup_test(cx);
|
||||
assistant::context_store::init(&client);
|
||||
});
|
||||
|
||||
client
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue