Merge pull request #1265 from zed-industries/worktree-performance
Fix problems that arise when large numbers of files change on disk
This commit is contained in:
commit
0b2452f666
9 changed files with 393 additions and 453 deletions
|
@ -507,10 +507,9 @@ impl Project {
|
|||
|
||||
let mut worktrees = Vec::new();
|
||||
for worktree in response.worktrees {
|
||||
let (worktree, load_task) = cx
|
||||
let worktree = cx
|
||||
.update(|cx| Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx));
|
||||
worktrees.push(worktree);
|
||||
load_task.detach();
|
||||
}
|
||||
|
||||
let (opened_buffer_tx, opened_buffer_rx) = watch::channel();
|
||||
|
@ -1102,7 +1101,7 @@ impl Project {
|
|||
.ok_or_else(|| anyhow!("missing entry in response"))?;
|
||||
worktree
|
||||
.update(&mut cx, |worktree, cx| {
|
||||
worktree.as_remote().unwrap().insert_entry(
|
||||
worktree.as_remote_mut().unwrap().insert_entry(
|
||||
entry,
|
||||
response.worktree_scan_id as usize,
|
||||
cx,
|
||||
|
@ -1145,7 +1144,7 @@ impl Project {
|
|||
.ok_or_else(|| anyhow!("missing entry in response"))?;
|
||||
worktree
|
||||
.update(&mut cx, |worktree, cx| {
|
||||
worktree.as_remote().unwrap().insert_entry(
|
||||
worktree.as_remote_mut().unwrap().insert_entry(
|
||||
entry,
|
||||
response.worktree_scan_id as usize,
|
||||
cx,
|
||||
|
@ -1188,7 +1187,7 @@ impl Project {
|
|||
.ok_or_else(|| anyhow!("missing entry in response"))?;
|
||||
worktree
|
||||
.update(&mut cx, |worktree, cx| {
|
||||
worktree.as_remote().unwrap().insert_entry(
|
||||
worktree.as_remote_mut().unwrap().insert_entry(
|
||||
entry,
|
||||
response.worktree_scan_id as usize,
|
||||
cx,
|
||||
|
@ -1221,7 +1220,7 @@ impl Project {
|
|||
.await?;
|
||||
worktree
|
||||
.update(&mut cx, move |worktree, cx| {
|
||||
worktree.as_remote().unwrap().delete_entry(
|
||||
worktree.as_remote_mut().unwrap().delete_entry(
|
||||
entry_id,
|
||||
response.worktree_scan_id as usize,
|
||||
cx,
|
||||
|
@ -1352,12 +1351,13 @@ impl Project {
|
|||
let client = self.client.clone();
|
||||
cx.foreground()
|
||||
.spawn(async move {
|
||||
share.await?;
|
||||
client.send(proto::RespondToJoinProjectRequest {
|
||||
requester_id,
|
||||
project_id,
|
||||
allow,
|
||||
})
|
||||
})?;
|
||||
share.await?;
|
||||
anyhow::Ok(())
|
||||
})
|
||||
.detach_and_log_err(cx);
|
||||
}
|
||||
|
@ -4552,18 +4552,9 @@ impl Project {
|
|||
{
|
||||
this.worktrees.push(WorktreeHandle::Strong(old_worktree));
|
||||
} else {
|
||||
let worktree = proto::Worktree {
|
||||
id: worktree.id,
|
||||
root_name: worktree.root_name,
|
||||
entries: Default::default(),
|
||||
diagnostic_summaries: Default::default(),
|
||||
visible: worktree.visible,
|
||||
scan_id: 0,
|
||||
};
|
||||
let (worktree, load_task) =
|
||||
let worktree =
|
||||
Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx);
|
||||
this.add_worktree(&worktree, cx);
|
||||
load_task.detach();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4587,8 +4578,8 @@ impl Project {
|
|||
if let Some(worktree) = this.worktree_for_id(worktree_id, cx) {
|
||||
worktree.update(cx, |worktree, _| {
|
||||
let worktree = worktree.as_remote_mut().unwrap();
|
||||
worktree.update_from_remote(envelope)
|
||||
})?;
|
||||
worktree.update_from_remote(envelope.payload);
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
|
@ -8125,7 +8116,10 @@ mod tests {
|
|||
}
|
||||
|
||||
#[gpui::test(retries = 5)]
|
||||
async fn test_rescan_and_remote_updates(cx: &mut gpui::TestAppContext) {
|
||||
async fn test_rescan_and_remote_updates(
|
||||
deterministic: Arc<Deterministic>,
|
||||
cx: &mut gpui::TestAppContext,
|
||||
) {
|
||||
let dir = temp_tree(json!({
|
||||
"a": {
|
||||
"file1": "",
|
||||
|
@ -8169,17 +8163,24 @@ mod tests {
|
|||
// Create a remote copy of this worktree.
|
||||
let tree = project.read_with(cx, |project, cx| project.worktrees(cx).next().unwrap());
|
||||
let initial_snapshot = tree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
|
||||
let (remote, load_task) = cx.update(|cx| {
|
||||
let remote = cx.update(|cx| {
|
||||
Worktree::remote(
|
||||
1,
|
||||
1,
|
||||
initial_snapshot.to_proto(&Default::default(), true),
|
||||
proto::WorktreeMetadata {
|
||||
id: initial_snapshot.id().to_proto(),
|
||||
root_name: initial_snapshot.root_name().into(),
|
||||
visible: true,
|
||||
},
|
||||
rpc.clone(),
|
||||
cx,
|
||||
)
|
||||
});
|
||||
// tree
|
||||
load_task.await;
|
||||
remote.update(cx, |remote, _| {
|
||||
let update = initial_snapshot.build_initial_update(1);
|
||||
remote.as_remote_mut().unwrap().update_from_remote(update);
|
||||
});
|
||||
deterministic.run_until_parked();
|
||||
|
||||
cx.read(|cx| {
|
||||
assert!(!buffer2.read(cx).is_dirty());
|
||||
|
@ -8245,19 +8246,16 @@ mod tests {
|
|||
// Update the remote worktree. Check that it becomes consistent with the
|
||||
// local worktree.
|
||||
remote.update(cx, |remote, cx| {
|
||||
let update_message = tree.read(cx).as_local().unwrap().snapshot().build_update(
|
||||
let update = tree.read(cx).as_local().unwrap().snapshot().build_update(
|
||||
&initial_snapshot,
|
||||
1,
|
||||
1,
|
||||
true,
|
||||
);
|
||||
remote
|
||||
.as_remote_mut()
|
||||
.unwrap()
|
||||
.snapshot
|
||||
.apply_remote_update(update_message)
|
||||
.unwrap();
|
||||
|
||||
remote.as_remote_mut().unwrap().update_from_remote(update);
|
||||
});
|
||||
deterministic.run_until_parked();
|
||||
remote.read_with(cx, |remote, _| {
|
||||
assert_eq!(
|
||||
remote
|
||||
.paths()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue