Remove update_id from worktree update messages

We don't need this anymore because worktree updates are foreground
messages.

Co-Authored-By: Nathan Sobo <nathan@zed.dev>
Co-Authored-By: Max Brunsfeld <max@zed.dev>
This commit is contained in:
Antonio Scandurra 2022-02-23 18:35:25 +01:00
parent f3c6320eeb
commit 8440644dc9
5 changed files with 14 additions and 67 deletions

View file

@ -3885,7 +3885,6 @@ mod tests {
&initial_snapshot, &initial_snapshot,
1, 1,
1, 1,
0,
true, true,
); );
remote remote

View file

@ -84,7 +84,6 @@ pub struct RemoteWorktree {
queued_operations: Vec<(u64, Operation)>, queued_operations: Vec<(u64, Operation)>,
diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>, diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
weak: bool, weak: bool,
next_update_id: u64,
pending_updates: VecDeque<proto::UpdateWorktree>, pending_updates: VecDeque<proto::UpdateWorktree>,
} }
@ -239,7 +238,6 @@ impl Worktree {
}), }),
), ),
weak, weak,
next_update_id: worktree.next_update_id,
pending_updates: Default::default(), pending_updates: Default::default(),
}) })
}); });
@ -792,20 +790,13 @@ impl LocalWorktree {
let _ = share_tx.try_send(Ok(())); let _ = share_tx.try_send(Ok(()));
} }
let mut update_id = 0;
let mut prev_snapshot = snapshot; let mut prev_snapshot = snapshot;
while let Ok(snapshot) = snapshots_to_send_rx.recv().await { while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
let message = snapshot.build_update( let message =
&prev_snapshot, snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
project_id,
worktree_id,
update_id,
false,
);
match rpc.request(message).await { match rpc.request(message).await {
Ok(_) => { Ok(_) => {
prev_snapshot = snapshot; prev_snapshot = snapshot;
update_id += 1;
} }
Err(err) => log::error!("error sending snapshot diff {}", err), Err(err) => log::error!("error sending snapshot diff {}", err),
} }
@ -844,30 +835,9 @@ impl RemoteWorktree {
&mut self, &mut self,
envelope: TypedEnvelope<proto::UpdateWorktree>, envelope: TypedEnvelope<proto::UpdateWorktree>,
) -> Result<()> { ) -> Result<()> {
let update = envelope.payload; self.updates_tx
if update.id > self.next_update_id { .unbounded_send(envelope.payload)
let ix = match self .expect("consumer runs to completion");
.pending_updates
.binary_search_by_key(&update.id, |pending| pending.id)
{
Ok(ix) | Err(ix) => ix,
};
self.pending_updates.insert(ix, update);
} else {
let tx = self.updates_tx.clone();
self.next_update_id += 1;
tx.unbounded_send(update)
.expect("consumer runs to completion");
while let Some(update) = self.pending_updates.front() {
if update.id == self.next_update_id {
self.next_update_id += 1;
tx.unbounded_send(self.pending_updates.pop_front().unwrap())
.expect("consumer runs to completion");
} else {
break;
}
}
}
Ok(()) Ok(())
} }
@ -1058,7 +1028,6 @@ impl LocalSnapshot {
.map(|(path, summary)| summary.to_proto(path.0.clone())) .map(|(path, summary)| summary.to_proto(path.0.clone()))
.collect(), .collect(),
weak, weak,
next_update_id: 0,
} }
} }
@ -1067,7 +1036,6 @@ impl LocalSnapshot {
other: &Self, other: &Self,
project_id: u64, project_id: u64,
worktree_id: u64, worktree_id: u64,
update_id: u64,
include_ignored: bool, include_ignored: bool,
) -> proto::UpdateWorktree { ) -> proto::UpdateWorktree {
let mut updated_entries = Vec::new(); let mut updated_entries = Vec::new();
@ -1120,7 +1088,6 @@ impl LocalSnapshot {
} }
proto::UpdateWorktree { proto::UpdateWorktree {
id: update_id as u64,
project_id, project_id,
worktree_id, worktree_id,
root_name: self.root_name().to_string(), root_name: self.root_name().to_string(),
@ -2461,7 +2428,7 @@ mod tests {
fmt::Write, fmt::Write,
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use util::{post_inc, test::temp_tree}; use util::test::temp_tree;
#[gpui::test] #[gpui::test]
async fn test_traversal(cx: gpui::TestAppContext) { async fn test_traversal(cx: gpui::TestAppContext) {
@ -2646,7 +2613,6 @@ mod tests {
new_scanner.snapshot().to_vec(true) new_scanner.snapshot().to_vec(true)
); );
let mut update_id = 0;
for mut prev_snapshot in snapshots { for mut prev_snapshot in snapshots {
let include_ignored = rng.gen::<bool>(); let include_ignored = rng.gen::<bool>();
if !include_ignored { if !include_ignored {
@ -2667,13 +2633,9 @@ mod tests {
prev_snapshot.entries_by_id.edit(entries_by_id_edits, &()); prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
} }
let update = scanner.snapshot().build_update( let update = scanner
&prev_snapshot, .snapshot()
0, .build_update(&prev_snapshot, 0, 0, include_ignored);
0,
post_inc(&mut update_id),
include_ignored,
);
prev_snapshot.apply_remote_update(update).unwrap(); prev_snapshot.apply_remote_update(update).unwrap();
assert_eq!( assert_eq!(
prev_snapshot.to_vec(true), prev_snapshot.to_vec(true),

View file

@ -145,12 +145,11 @@ message ShareWorktree {
} }
message UpdateWorktree { message UpdateWorktree {
uint64 id = 1; uint64 project_id = 1;
uint64 project_id = 2; uint64 worktree_id = 2;
uint64 worktree_id = 3; string root_name = 3;
string root_name = 4; repeated Entry updated_entries = 4;
repeated Entry updated_entries = 5; repeated uint64 removed_entries = 5;
repeated uint64 removed_entries = 6;
} }
message AddProjectCollaborator { message AddProjectCollaborator {
@ -494,7 +493,6 @@ message Worktree {
repeated Entry entries = 3; repeated Entry entries = 3;
repeated DiagnosticSummary diagnostic_summaries = 4; repeated DiagnosticSummary diagnostic_summaries = 4;
bool weak = 5; bool weak = 5;
uint64 next_update_id = 6;
} }
message File { message File {

View file

@ -350,7 +350,6 @@ impl Server {
.cloned() .cloned()
.collect(), .collect(),
weak: worktree.weak, weak: worktree.weak,
next_update_id: share.next_update_id as u64,
}) })
}) })
.collect(); .collect();
@ -489,7 +488,6 @@ impl Server {
request.sender_id, request.sender_id,
entries, entries,
diagnostic_summaries, diagnostic_summaries,
worktree.next_update_id,
)?; )?;
broadcast( broadcast(
@ -513,7 +511,6 @@ impl Server {
request.sender_id, request.sender_id,
request.payload.project_id, request.payload.project_id,
request.payload.worktree_id, request.payload.worktree_id,
request.payload.id,
&request.payload.removed_entries, &request.payload.removed_entries,
&request.payload.updated_entries, &request.payload.updated_entries,
)?; )?;

View file

@ -43,7 +43,6 @@ pub struct ProjectShare {
pub struct WorktreeShare { pub struct WorktreeShare {
pub entries: HashMap<u64, proto::Entry>, pub entries: HashMap<u64, proto::Entry>,
pub diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>, pub diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
pub next_update_id: u64,
} }
#[derive(Default)] #[derive(Default)]
@ -404,7 +403,6 @@ impl Store {
connection_id: ConnectionId, connection_id: ConnectionId,
entries: HashMap<u64, proto::Entry>, entries: HashMap<u64, proto::Entry>,
diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>, diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
next_update_id: u64,
) -> tide::Result<SharedWorktree> { ) -> tide::Result<SharedWorktree> {
let project = self let project = self
.projects .projects
@ -418,7 +416,6 @@ impl Store {
worktree.share = Some(WorktreeShare { worktree.share = Some(WorktreeShare {
entries, entries,
diagnostic_summaries, diagnostic_summaries,
next_update_id,
}); });
Ok(SharedWorktree { Ok(SharedWorktree {
authorized_user_ids: project.authorized_user_ids(), authorized_user_ids: project.authorized_user_ids(),
@ -537,7 +534,6 @@ impl Store {
connection_id: ConnectionId, connection_id: ConnectionId,
project_id: u64, project_id: u64,
worktree_id: u64, worktree_id: u64,
update_id: u64,
removed_entries: &[u64], removed_entries: &[u64],
updated_entries: &[proto::Entry], updated_entries: &[proto::Entry],
) -> tide::Result<Vec<ConnectionId>> { ) -> tide::Result<Vec<ConnectionId>> {
@ -549,11 +545,6 @@ impl Store {
.share .share
.as_mut() .as_mut()
.ok_or_else(|| anyhow!("worktree is not shared"))?; .ok_or_else(|| anyhow!("worktree is not shared"))?;
if share.next_update_id != update_id {
return Err(anyhow!("received worktree updates out-of-order"))?;
}
share.next_update_id = update_id + 1;
for entry_id in removed_entries { for entry_id in removed_entries {
share.entries.remove(&entry_id); share.entries.remove(&entry_id);
} }