Remove update_id from worktree update messages

We don't need this anymore because worktree updates are foreground
messages.

Co-Authored-By: Nathan Sobo <nathan@zed.dev>
Co-Authored-By: Max Brunsfeld <max@zed.dev>
This commit is contained in:
Antonio Scandurra 2022-02-23 18:35:25 +01:00
parent f3c6320eeb
commit 8440644dc9
5 changed files with 14 additions and 67 deletions

View file

@ -3885,7 +3885,6 @@ mod tests {
&initial_snapshot,
1,
1,
0,
true,
);
remote

View file

@ -84,7 +84,6 @@ pub struct RemoteWorktree {
queued_operations: Vec<(u64, Operation)>,
diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
weak: bool,
next_update_id: u64,
pending_updates: VecDeque<proto::UpdateWorktree>,
}
@ -239,7 +238,6 @@ impl Worktree {
}),
),
weak,
next_update_id: worktree.next_update_id,
pending_updates: Default::default(),
})
});
@ -792,20 +790,13 @@ impl LocalWorktree {
let _ = share_tx.try_send(Ok(()));
}
let mut update_id = 0;
let mut prev_snapshot = snapshot;
while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
let message = snapshot.build_update(
&prev_snapshot,
project_id,
worktree_id,
update_id,
false,
);
let message =
snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
match rpc.request(message).await {
Ok(_) => {
prev_snapshot = snapshot;
update_id += 1;
}
Err(err) => log::error!("error sending snapshot diff {}", err),
}
@ -844,30 +835,9 @@ impl RemoteWorktree {
&mut self,
envelope: TypedEnvelope<proto::UpdateWorktree>,
) -> Result<()> {
let update = envelope.payload;
if update.id > self.next_update_id {
let ix = match self
.pending_updates
.binary_search_by_key(&update.id, |pending| pending.id)
{
Ok(ix) | Err(ix) => ix,
};
self.pending_updates.insert(ix, update);
} else {
let tx = self.updates_tx.clone();
self.next_update_id += 1;
tx.unbounded_send(update)
.expect("consumer runs to completion");
while let Some(update) = self.pending_updates.front() {
if update.id == self.next_update_id {
self.next_update_id += 1;
tx.unbounded_send(self.pending_updates.pop_front().unwrap())
.expect("consumer runs to completion");
} else {
break;
}
}
}
self.updates_tx
.unbounded_send(envelope.payload)
.expect("consumer runs to completion");
Ok(())
}
@ -1058,7 +1028,6 @@ impl LocalSnapshot {
.map(|(path, summary)| summary.to_proto(path.0.clone()))
.collect(),
weak,
next_update_id: 0,
}
}
@ -1067,7 +1036,6 @@ impl LocalSnapshot {
other: &Self,
project_id: u64,
worktree_id: u64,
update_id: u64,
include_ignored: bool,
) -> proto::UpdateWorktree {
let mut updated_entries = Vec::new();
@ -1120,7 +1088,6 @@ impl LocalSnapshot {
}
proto::UpdateWorktree {
id: update_id as u64,
project_id,
worktree_id,
root_name: self.root_name().to_string(),
@ -2461,7 +2428,7 @@ mod tests {
fmt::Write,
time::{SystemTime, UNIX_EPOCH},
};
use util::{post_inc, test::temp_tree};
use util::test::temp_tree;
#[gpui::test]
async fn test_traversal(cx: gpui::TestAppContext) {
@ -2646,7 +2613,6 @@ mod tests {
new_scanner.snapshot().to_vec(true)
);
let mut update_id = 0;
for mut prev_snapshot in snapshots {
let include_ignored = rng.gen::<bool>();
if !include_ignored {
@ -2667,13 +2633,9 @@ mod tests {
prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
}
let update = scanner.snapshot().build_update(
&prev_snapshot,
0,
0,
post_inc(&mut update_id),
include_ignored,
);
let update = scanner
.snapshot()
.build_update(&prev_snapshot, 0, 0, include_ignored);
prev_snapshot.apply_remote_update(update).unwrap();
assert_eq!(
prev_snapshot.to_vec(true),

View file

@ -145,12 +145,11 @@ message ShareWorktree {
}
message UpdateWorktree {
uint64 id = 1;
uint64 project_id = 2;
uint64 worktree_id = 3;
string root_name = 4;
repeated Entry updated_entries = 5;
repeated uint64 removed_entries = 6;
uint64 project_id = 1;
uint64 worktree_id = 2;
string root_name = 3;
repeated Entry updated_entries = 4;
repeated uint64 removed_entries = 5;
}
message AddProjectCollaborator {
@ -494,7 +493,6 @@ message Worktree {
repeated Entry entries = 3;
repeated DiagnosticSummary diagnostic_summaries = 4;
bool weak = 5;
uint64 next_update_id = 6;
}
message File {

View file

@ -350,7 +350,6 @@ impl Server {
.cloned()
.collect(),
weak: worktree.weak,
next_update_id: share.next_update_id as u64,
})
})
.collect();
@ -489,7 +488,6 @@ impl Server {
request.sender_id,
entries,
diagnostic_summaries,
worktree.next_update_id,
)?;
broadcast(
@ -513,7 +511,6 @@ impl Server {
request.sender_id,
request.payload.project_id,
request.payload.worktree_id,
request.payload.id,
&request.payload.removed_entries,
&request.payload.updated_entries,
)?;

View file

@ -43,7 +43,6 @@ pub struct ProjectShare {
pub struct WorktreeShare {
pub entries: HashMap<u64, proto::Entry>,
pub diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
pub next_update_id: u64,
}
#[derive(Default)]
@ -404,7 +403,6 @@ impl Store {
connection_id: ConnectionId,
entries: HashMap<u64, proto::Entry>,
diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
next_update_id: u64,
) -> tide::Result<SharedWorktree> {
let project = self
.projects
@ -418,7 +416,6 @@ impl Store {
worktree.share = Some(WorktreeShare {
entries,
diagnostic_summaries,
next_update_id,
});
Ok(SharedWorktree {
authorized_user_ids: project.authorized_user_ids(),
@ -537,7 +534,6 @@ impl Store {
connection_id: ConnectionId,
project_id: u64,
worktree_id: u64,
update_id: u64,
removed_entries: &[u64],
updated_entries: &[proto::Entry],
) -> tide::Result<Vec<ConnectionId>> {
@ -549,11 +545,6 @@ impl Store {
.share
.as_mut()
.ok_or_else(|| anyhow!("worktree is not shared"))?;
if share.next_update_id != update_id {
return Err(anyhow!("received worktree updates out-of-order"))?;
}
share.next_update_id = update_id + 1;
for entry_id in removed_entries {
share.entries.remove(&entry_id);
}