Remove ShareWorktree message

Instead, create an empty worktree on guests when a worktree is first *registered*, then update it via an initial UpdateWorktree message.

This prevents the host from referencing a worktree in definition RPC responses that hasn't yet been observed by the guest. We could have waited until the entire worktree was shared, but this could take a long time, so instead we create an empty one on guests and proceed from there.

We still have randomized test failures as of this commit:

SEED=9519 MAX_PEERS=2 ITERATIONS=10000 OPERATIONS=7 ct -p zed-server test_random_collaboration

Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
This commit is contained in:
Nathan Sobo 2022-02-23 11:56:09 -07:00
parent d1b4384f80
commit 17c9aa1819
5 changed files with 63 additions and 55 deletions

View file

@ -169,7 +169,7 @@ impl DiagnosticSummary {
this
}
pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
pub fn to_proto(&self, path: &Path) -> proto::DiagnosticSummary {
proto::DiagnosticSummary {
path: path.to_string_lossy().to_string(),
error_count: self.error_count as u32,
@ -195,7 +195,7 @@ impl Project {
client.add_entity_message_handler(Self::handle_disk_based_diagnostics_updated);
client.add_entity_message_handler(Self::handle_disk_based_diagnostics_updating);
client.add_entity_message_handler(Self::handle_remove_collaborator);
client.add_entity_message_handler(Self::handle_share_worktree);
client.add_entity_message_handler(Self::handle_register_worktree);
client.add_entity_message_handler(Self::handle_unregister_worktree);
client.add_entity_message_handler(Self::handle_unshare_project);
client.add_entity_message_handler(Self::handle_update_buffer_file);
@ -2347,19 +2347,22 @@ impl Project {
})
}
async fn handle_share_worktree(
async fn handle_register_worktree(
this: ModelHandle<Self>,
envelope: TypedEnvelope<proto::ShareWorktree>,
envelope: TypedEnvelope<proto::RegisterWorktree>,
client: Arc<Client>,
mut cx: AsyncAppContext,
) -> Result<()> {
this.update(&mut cx, |this, cx| {
let remote_id = this.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
let replica_id = this.replica_id();
let worktree = envelope
.payload
.worktree
.ok_or_else(|| anyhow!("invalid worktree"))?;
let worktree = proto::Worktree {
id: envelope.payload.worktree_id,
root_name: envelope.payload.root_name,
entries: Default::default(),
diagnostic_summaries: Default::default(),
weak: envelope.payload.weak,
};
let (worktree, load_task) =
Worktree::remote(remote_id, replica_id, worktree, client, cx);
this.add_worktree(&worktree, cx);

View file

@ -771,12 +771,10 @@ impl LocalWorktree {
let worktree_id = cx.model_id() as u64;
let (snapshots_to_send_tx, snapshots_to_send_rx) =
smol::channel::unbounded::<LocalSnapshot>();
let (mut share_tx, mut share_rx) = oneshot::channel();
let maintain_remote_snapshot = cx.background().spawn({
let rpc = rpc.clone();
let snapshot = snapshot.clone();
let diagnostic_summaries = self.diagnostic_summaries.clone();
let weak = self.weak;
async move {
if let Err(error) = rpc
.request(proto::UpdateWorktree {
@ -799,6 +797,14 @@ impl LocalWorktree {
let _ = share_tx.try_send(Ok(()));
}
for (path, summary) in diagnostic_summaries.iter() {
rpc.send(proto::UpdateDiagnosticSummary {
project_id,
worktree_id,
summary: Some(summary.to_proto(&path.0)),
})?;
}
let mut prev_snapshot = snapshot;
while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
let message =
@ -819,7 +825,10 @@ impl LocalWorktree {
}
async move {
share_rx.next().await;
share_rx
.next()
.await
.unwrap_or_else(|| Err(anyhow!("share ended")))
}
}
@ -1014,6 +1023,7 @@ impl Snapshot {
}
impl LocalSnapshot {
#[cfg(test)]
pub(crate) fn to_proto(
&self,
diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
@ -1031,7 +1041,7 @@ impl LocalSnapshot {
.collect(),
diagnostic_summaries: diagnostic_summaries
.iter()
.map(|(path, summary)| summary.to_proto(path.0.clone()))
.map(|(path, summary)| summary.to_proto(&path.0))
.collect(),
weak,
}