Remove ShareWorktree message

Instead, create an empty worktree on guests when a worktree is first *registered*, then update it via an initial UpdateWorktree message.

This prevents the host from referencing a worktree in definition RPC responses that hasn't yet been observed by the guest. We could have waited until the entire worktree was shared, but this could take a long time, so instead we create an empty one on guests and proceed from there.

We still have randomized test failures as of this commit:

SEED=9519 MAX_PEERS=2 ITERATIONS=10000 OPERATIONS=7 ct -p zed-server test_random_collaboration

Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
This commit is contained in:
Nathan Sobo 2022-02-23 11:56:09 -07:00
parent d1b4384f80
commit 17c9aa1819
5 changed files with 63 additions and 55 deletions

View file

@ -334,16 +334,16 @@ impl Server {
replica_id: 0,
user_id: joined.project.host_user_id.to_proto(),
});
let worktrees = joined
.project
let worktrees = share
.worktrees
.iter()
.filter_map(|(id, worktree)| {
worktree.share.as_ref().map(|share| proto::Worktree {
.filter_map(|(id, shared_worktree)| {
let worktree = joined.project.worktrees.get(&id)?;
Some(proto::Worktree {
id: *id,
root_name: worktree.root_name.clone(),
entries: share.entries.values().cloned().collect(),
diagnostic_summaries: share
entries: shared_worktree.entries.values().cloned().collect(),
diagnostic_summaries: shared_worktree
.diagnostic_summaries
.values()
.cloned()
@ -437,7 +437,6 @@ impl Server {
Worktree {
authorized_user_ids: contact_user_ids.clone(),
root_name: request.payload.root_name.clone(),
share: None,
weak: request.payload.weak,
},
)?;
@ -1164,7 +1163,7 @@ mod tests {
cell::Cell,
env,
ops::Deref,
path::Path,
path::{Path, PathBuf},
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering::SeqCst},
@ -2115,16 +2114,14 @@ mod tests {
let worktree = store
.project(project_id)
.unwrap()
.share
.as_ref()
.unwrap()
.worktrees
.get(&worktree_id.to_proto())
.unwrap();
!worktree
.share
.as_ref()
.unwrap()
.diagnostic_summaries
.is_empty()
!worktree.diagnostic_summaries.is_empty()
})
.await;

View file

@ -30,7 +30,6 @@ pub struct Project {
pub struct Worktree {
pub authorized_user_ids: Vec<UserId>,
pub root_name: String,
pub share: Option<WorktreeShare>,
pub weak: bool,
}
@ -38,8 +37,10 @@ pub struct Worktree {
pub struct ProjectShare {
pub guests: HashMap<ConnectionId, (ReplicaId, UserId)>,
pub active_replica_ids: HashSet<ReplicaId>,
pub worktrees: HashMap<u64, WorktreeShare>,
}
#[derive(Default)]
pub struct WorktreeShare {
pub entries: HashMap<u64, proto::Entry>,
pub diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
@ -74,11 +75,6 @@ pub struct LeftProject {
pub authorized_user_ids: Vec<UserId>,
}
pub struct SharedWorktree {
pub authorized_user_ids: Vec<UserId>,
pub connection_ids: Vec<ConnectionId>,
}
impl Store {
pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId) {
self.connections.insert(
@ -272,6 +268,9 @@ impl Store {
connection.projects.insert(project_id);
}
project.worktrees.insert(worktree_id, worktree);
if let Ok(share) = project.share_mut() {
share.worktrees.insert(worktree_id, Default::default());
}
#[cfg(test)]
self.check_invariants();
@ -326,8 +325,9 @@ impl Store {
.ok_or_else(|| anyhow!("no such worktree"))?;
let mut guest_connection_ids = Vec::new();
if let Some(share) = &project.share {
if let Ok(share) = project.share_mut() {
guest_connection_ids.extend(share.guests.keys());
share.worktrees.remove(&worktree_id);
}
for authorized_user_id in &worktree.authorized_user_ids {
@ -349,7 +349,11 @@ impl Store {
pub fn share_project(&mut self, project_id: u64, connection_id: ConnectionId) -> bool {
if let Some(project) = self.projects.get_mut(&project_id) {
if project.host_connection_id == connection_id {
project.share = Some(ProjectShare::default());
let mut share = ProjectShare::default();
for worktree_id in project.worktrees.keys() {
share.worktrees.insert(*worktree_id, Default::default());
}
project.share = Some(share);
return true;
}
}
@ -380,10 +384,6 @@ impl Store {
}
}
for worktree in project.worktrees.values_mut() {
worktree.share.take();
}
#[cfg(test)]
self.check_invariants();
@ -407,17 +407,16 @@ impl Store {
.projects
.get_mut(&project_id)
.ok_or_else(|| anyhow!("no such project"))?;
let worktree = project
.worktrees
.get_mut(&worktree_id)
.ok_or_else(|| anyhow!("no such worktree"))?;
if project.host_connection_id == connection_id {
if let Some(share) = worktree.share.as_mut() {
share
.diagnostic_summaries
.insert(summary.path.clone().into(), summary);
return Ok(project.connection_ids());
}
let worktree = project
.share_mut()?
.worktrees
.get_mut(&worktree_id)
.ok_or_else(|| anyhow!("no such worktree"))?;
worktree
.diagnostic_summaries
.insert(summary.path.clone().into(), summary);
return Ok(project.connection_ids());
}
Err(anyhow!("no such worktree"))?
@ -508,18 +507,16 @@ impl Store {
updated_entries: &[proto::Entry],
) -> tide::Result<Vec<ConnectionId>> {
let project = self.write_project(project_id, connection_id)?;
let share = project
let worktree = project
.share_mut()?
.worktrees
.get_mut(&worktree_id)
.ok_or_else(|| anyhow!("no such worktree"))?
.share
.as_mut()
.ok_or_else(|| anyhow!("worktree is not shared"))?;
.ok_or_else(|| anyhow!("no such worktree"))?;
for entry_id in removed_entries {
share.entries.remove(&entry_id);
worktree.entries.remove(&entry_id);
}
for entry in updated_entries {
share.entries.insert(entry.id, entry.clone());
worktree.entries.insert(entry.id, entry.clone());
}
Ok(project.connection_ids())
}