Lay the groundwork for collaborating on assistant panel (#13991)

This pull request introduces collaboration for the assistant panel by
turning `Context` into a CRDT. `ContextStore` is responsible for sending
and applying operations, as well as synchronizing missed changes while
the connection was lost.

Contexts are shared on a per-project basis, and only the host can share
them for now. Shared contexts can be accessed via the `History` tab in
the assistant panel.

<img width="1819" alt="image"
src="https://github.com/zed-industries/zed/assets/482957/c7ae46d2-cde3-4b03-b74a-6e9b1555c154">


Please note that this doesn't implement following yet, which is
scheduled for a subsequent pull request.

Release Notes:

- N/A
This commit is contained in:
Antonio Scandurra 2024-07-10 17:36:22 +02:00 committed by GitHub
parent 1662993811
commit 8944af7406
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 4232 additions and 2120 deletions

View file

@ -1,12 +1,15 @@
use std::fmt::Debug;
use clock::ReplicaId;
use collections::{BTreeMap, HashSet};
pub struct Network<T: Clone, R: rand::Rng> {
inboxes: std::collections::BTreeMap<ReplicaId, Vec<Envelope<T>>>,
all_messages: Vec<T>,
inboxes: BTreeMap<ReplicaId, Vec<Envelope<T>>>,
disconnected_peers: HashSet<ReplicaId>,
rng: R,
}
#[derive(Clone)]
#[derive(Clone, Debug)]
struct Envelope<T: Clone> {
message: T,
}
@ -14,8 +17,8 @@ struct Envelope<T: Clone> {
impl<T: Clone, R: rand::Rng> Network<T, R> {
pub fn new(rng: R) -> Self {
Network {
inboxes: Default::default(),
all_messages: Vec::new(),
inboxes: BTreeMap::default(),
disconnected_peers: HashSet::default(),
rng,
}
}
@ -24,6 +27,24 @@ impl<T: Clone, R: rand::Rng> Network<T, R> {
self.inboxes.insert(id, Vec::new());
}
pub fn disconnect_peer(&mut self, id: ReplicaId) {
self.disconnected_peers.insert(id);
self.inboxes.get_mut(&id).unwrap().clear();
}
pub fn reconnect_peer(&mut self, id: ReplicaId, replicate_from: ReplicaId) {
assert!(self.disconnected_peers.remove(&id));
self.replicate(replicate_from, id);
}
pub fn is_disconnected(&self, id: ReplicaId) -> bool {
self.disconnected_peers.contains(&id)
}
pub fn contains_disconnected_peers(&self) -> bool {
!self.disconnected_peers.is_empty()
}
pub fn replicate(&mut self, old_replica_id: ReplicaId, new_replica_id: ReplicaId) {
self.inboxes
.insert(new_replica_id, self.inboxes[&old_replica_id].clone());
@ -34,8 +55,13 @@ impl<T: Clone, R: rand::Rng> Network<T, R> {
}
pub fn broadcast(&mut self, sender: ReplicaId, messages: Vec<T>) {
// Drop messages from disconnected peers.
if self.disconnected_peers.contains(&sender) {
return;
}
for (replica, inbox) in self.inboxes.iter_mut() {
if *replica != sender {
if *replica != sender && !self.disconnected_peers.contains(replica) {
for message in &messages {
// Insert one or more duplicates of this message, potentially *before* the previous
// message sent by this peer to simulate out-of-order delivery.
@ -51,7 +77,6 @@ impl<T: Clone, R: rand::Rng> Network<T, R> {
}
}
}
self.all_messages.extend(messages);
}
pub fn has_unreceived(&self, receiver: ReplicaId) -> bool {

View file

@ -1265,6 +1265,10 @@ impl Buffer {
}
}
pub fn has_deferred_ops(&self) -> bool {
!self.deferred_ops.is_empty()
}
pub fn peek_undo_stack(&self) -> Option<&HistoryEntry> {
self.history.undo_stack.last()
}