Ensure worktree updates are observed in order on the server

This commit is contained in:
Antonio Scandurra 2022-02-16 14:05:10 +01:00
parent 3f6feb1c12
commit c3ba8f59ed
4 changed files with 19 additions and 8 deletions

View file

@ -43,7 +43,7 @@ use std::{
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap}; use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap};
use util::{post_inc, ResultExt}; use util::ResultExt;
lazy_static! { lazy_static! {
static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore"); static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
@ -796,11 +796,14 @@ impl LocalWorktree {
&prev_snapshot, &prev_snapshot,
project_id, project_id,
worktree_id, worktree_id,
post_inc(&mut update_id), update_id,
false, false,
); );
match rpc.send(message) { match rpc.request(message).await {
Ok(()) => prev_snapshot = snapshot, Ok(_) => {
prev_snapshot = snapshot;
update_id += 1;
}
Err(err) => log::error!("error sending snapshot diff {}", err), Err(err) => log::error!("error sending snapshot diff {}", err),
} }
} }
@ -2451,7 +2454,7 @@ mod tests {
fmt::Write, fmt::Write,
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use util::test::temp_tree; use util::{post_inc, test::temp_tree};
#[gpui::test] #[gpui::test]
async fn test_traversal(cx: gpui::TestAppContext) { async fn test_traversal(cx: gpui::TestAppContext) {

View file

@ -199,6 +199,7 @@ request_messages!(
(ShareProject, Ack), (ShareProject, Ack),
(ShareWorktree, Ack), (ShareWorktree, Ack),
(UpdateBuffer, Ack), (UpdateBuffer, Ack),
(UpdateWorktree, Ack),
); );
entity_messages!( entity_messages!(

View file

@ -75,7 +75,7 @@ impl Server {
.add_request_handler(Server::register_worktree) .add_request_handler(Server::register_worktree)
.add_message_handler(Server::unregister_worktree) .add_message_handler(Server::unregister_worktree)
.add_request_handler(Server::share_worktree) .add_request_handler(Server::share_worktree)
.add_message_handler(Server::update_worktree) .add_request_handler(Server::update_worktree)
.add_message_handler(Server::update_diagnostic_summary) .add_message_handler(Server::update_diagnostic_summary)
.add_message_handler(Server::disk_based_diagnostics_updating) .add_message_handler(Server::disk_based_diagnostics_updating)
.add_message_handler(Server::disk_based_diagnostics_updated) .add_message_handler(Server::disk_based_diagnostics_updated)
@ -497,11 +497,12 @@ impl Server {
async fn update_worktree( async fn update_worktree(
mut self: Arc<Server>, mut self: Arc<Server>,
request: TypedEnvelope<proto::UpdateWorktree>, request: TypedEnvelope<proto::UpdateWorktree>,
) -> tide::Result<()> { ) -> tide::Result<proto::Ack> {
let connection_ids = self.state_mut().update_worktree( let connection_ids = self.state_mut().update_worktree(
request.sender_id, request.sender_id,
request.payload.project_id, request.payload.project_id,
request.payload.worktree_id, request.payload.worktree_id,
request.payload.id,
&request.payload.removed_entries, &request.payload.removed_entries,
&request.payload.updated_entries, &request.payload.updated_entries,
)?; )?;
@ -511,7 +512,7 @@ impl Server {
.forward_send(request.sender_id, connection_id, request.payload.clone()) .forward_send(request.sender_id, connection_id, request.payload.clone())
})?; })?;
Ok(()) Ok(proto::Ack {})
} }
async fn update_diagnostic_summary( async fn update_diagnostic_summary(

View file

@ -537,6 +537,7 @@ impl Store {
connection_id: ConnectionId, connection_id: ConnectionId,
project_id: u64, project_id: u64,
worktree_id: u64, worktree_id: u64,
update_id: u64,
removed_entries: &[u64], removed_entries: &[u64],
updated_entries: &[proto::Entry], updated_entries: &[proto::Entry],
) -> tide::Result<Vec<ConnectionId>> { ) -> tide::Result<Vec<ConnectionId>> {
@ -548,6 +549,11 @@ impl Store {
.share .share
.as_mut() .as_mut()
.ok_or_else(|| anyhow!("worktree is not shared"))?; .ok_or_else(|| anyhow!("worktree is not shared"))?;
if share.next_update_id != update_id {
return Err(anyhow!("received worktree updates out-of-order"))?;
}
share.next_update_id = update_id + 1;
for entry_id in removed_entries { for entry_id in removed_entries {
share.entries.remove(&entry_id); share.entries.remove(&entry_id);
} }