Implement rejoining projects as guest when rejoining a room
Co-authored-by: Julia Risley <julia@zed.dev>
This commit is contained in:
parent
55ebfe8321
commit
6542b30d1f
7 changed files with 447 additions and 166 deletions
|
@ -1453,14 +1453,124 @@ impl Database {
|
|||
.exec(&*tx)
|
||||
.await?;
|
||||
|
||||
// TODO: handle left projects
|
||||
let mut rejoined_projects = Vec::new();
|
||||
for rejoined_project in &rejoin_room.rejoined_projects {
|
||||
let project_id = ProjectId::from_proto(rejoined_project.id);
|
||||
let Some(project) = project::Entity::find_by_id(project_id)
|
||||
.one(&*tx)
|
||||
.await? else {
|
||||
continue
|
||||
};
|
||||
|
||||
let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?;
|
||||
let mut worktrees = Vec::new();
|
||||
for db_worktree in db_worktrees {
|
||||
let mut worktree = RejoinedWorktree {
|
||||
id: db_worktree.id as u64,
|
||||
abs_path: db_worktree.abs_path,
|
||||
root_name: db_worktree.root_name,
|
||||
visible: db_worktree.visible,
|
||||
updated_entries: Default::default(),
|
||||
removed_entries: Default::default(),
|
||||
diagnostic_summaries: Default::default(),
|
||||
scan_id: db_worktree.scan_id as u64,
|
||||
is_complete: db_worktree.is_complete,
|
||||
};
|
||||
|
||||
let rejoined_worktree = rejoined_project
|
||||
.worktrees
|
||||
.iter()
|
||||
.find(|worktree| worktree.id == db_worktree.id as u64);
|
||||
|
||||
let entry_filter = if let Some(rejoined_worktree) = rejoined_worktree {
|
||||
Condition::all()
|
||||
.add(worktree_entry::Column::WorktreeId.eq(worktree.id))
|
||||
.add(worktree_entry::Column::ScanId.gt(rejoined_worktree.scan_id))
|
||||
} else {
|
||||
Condition::all()
|
||||
.add(worktree_entry::Column::WorktreeId.eq(worktree.id))
|
||||
.add(worktree_entry::Column::IsDeleted.eq(false))
|
||||
};
|
||||
|
||||
let mut db_entries = worktree_entry::Entity::find()
|
||||
.filter(entry_filter)
|
||||
.stream(&*tx)
|
||||
.await?;
|
||||
|
||||
while let Some(db_entry) = db_entries.next().await {
|
||||
let db_entry = db_entry?;
|
||||
|
||||
if db_entry.is_deleted {
|
||||
worktree.removed_entries.push(db_entry.id as u64);
|
||||
} else {
|
||||
worktree.updated_entries.push(proto::Entry {
|
||||
id: db_entry.id as u64,
|
||||
is_dir: db_entry.is_dir,
|
||||
path: db_entry.path,
|
||||
inode: db_entry.inode as u64,
|
||||
mtime: Some(proto::Timestamp {
|
||||
seconds: db_entry.mtime_seconds as u64,
|
||||
nanos: db_entry.mtime_nanos as u32,
|
||||
}),
|
||||
is_symlink: db_entry.is_symlink,
|
||||
is_ignored: db_entry.is_ignored,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
worktrees.push(worktree);
|
||||
}
|
||||
|
||||
let language_servers = project
|
||||
.find_related(language_server::Entity)
|
||||
.all(&*tx)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|language_server| proto::LanguageServer {
|
||||
id: language_server.id as u64,
|
||||
name: language_server.name,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut collaborators = project
|
||||
.find_related(project_collaborator::Entity)
|
||||
.all(&*tx)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|collaborator| ProjectCollaborator {
|
||||
connection_id: collaborator.connection(),
|
||||
user_id: collaborator.user_id,
|
||||
replica_id: collaborator.replica_id,
|
||||
is_host: collaborator.is_host,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let old_connection_id;
|
||||
if let Some(self_collaborator_ix) = collaborators
|
||||
.iter()
|
||||
.position(|collaborator| collaborator.user_id == user_id)
|
||||
{
|
||||
let self_collaborator = collaborators.swap_remove(self_collaborator_ix);
|
||||
old_connection_id = self_collaborator.connection_id;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
rejoined_projects.push(RejoinedProject {
|
||||
id: project_id,
|
||||
old_connection_id,
|
||||
collaborators,
|
||||
worktrees,
|
||||
language_servers,
|
||||
});
|
||||
}
|
||||
|
||||
let room = self.get_room(room_id, &tx).await?;
|
||||
Ok((
|
||||
room_id,
|
||||
RejoinedRoom {
|
||||
room,
|
||||
// TODO: handle rejoined projects
|
||||
rejoined_projects: Default::default(),
|
||||
rejoined_projects,
|
||||
reshared_projects,
|
||||
},
|
||||
))
|
||||
|
@ -2079,6 +2189,8 @@ impl Database {
|
|||
mtime_nanos: ActiveValue::set(mtime.nanos as i32),
|
||||
is_symlink: ActiveValue::set(entry.is_symlink),
|
||||
is_ignored: ActiveValue::set(entry.is_ignored),
|
||||
is_deleted: ActiveValue::set(false),
|
||||
scan_id: ActiveValue::set(update.scan_id as i64),
|
||||
}
|
||||
}))
|
||||
.on_conflict(
|
||||
|
@ -2103,7 +2215,7 @@ impl Database {
|
|||
}
|
||||
|
||||
if !update.removed_entries.is_empty() {
|
||||
worktree_entry::Entity::delete_many()
|
||||
worktree_entry::Entity::update_many()
|
||||
.filter(
|
||||
worktree_entry::Column::ProjectId
|
||||
.eq(project_id)
|
||||
|
@ -2113,6 +2225,11 @@ impl Database {
|
|||
.is_in(update.removed_entries.iter().map(|id| *id as i64)),
|
||||
),
|
||||
)
|
||||
.set(worktree_entry::ActiveModel {
|
||||
is_deleted: ActiveValue::Set(true),
|
||||
scan_id: ActiveValue::Set(update.scan_id as i64),
|
||||
..Default::default()
|
||||
})
|
||||
.exec(&*tx)
|
||||
.await?;
|
||||
}
|
||||
|
@ -2935,6 +3052,7 @@ pub struct RejoinedProject {
|
|||
pub language_servers: Vec<proto::LanguageServer>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RejoinedWorktree {
|
||||
pub id: u64,
|
||||
pub abs_path: String,
|
||||
|
|
|
@ -17,6 +17,8 @@ pub struct Model {
|
|||
pub mtime_nanos: i32,
|
||||
pub is_symlink: bool,
|
||||
pub is_ignored: bool,
|
||||
pub is_deleted: bool,
|
||||
pub scan_id: i64,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
|
|
|
@ -1307,7 +1307,7 @@ async fn test_host_disconnect(
|
|||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_host_reconnect(
|
||||
async fn test_project_reconnect(
|
||||
deterministic: Arc<Deterministic>,
|
||||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
|
@ -1336,9 +1336,12 @@ async fn test_host_reconnect(
|
|||
}
|
||||
},
|
||||
"dir2": {
|
||||
"x": "x-contents",
|
||||
"y": "y-contents",
|
||||
"z": "z-contents",
|
||||
"x.txt": "x-contents",
|
||||
"y.txt": "y-contents",
|
||||
"z.txt": "z-contents",
|
||||
},
|
||||
"dir3": {
|
||||
"w.txt": "w-contents",
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
@ -1348,7 +1351,16 @@ async fn test_host_reconnect(
|
|||
.insert_tree(
|
||||
"/root-2",
|
||||
json!({
|
||||
"1.txt": "1-contents",
|
||||
"2.txt": "2-contents",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
"/root-3",
|
||||
json!({
|
||||
"3.txt": "3-contents",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
@ -1356,6 +1368,7 @@ async fn test_host_reconnect(
|
|||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
let (project_a1, _) = client_a.build_local_project("/root-1/dir1", cx_a).await;
|
||||
let (project_a2, _) = client_a.build_local_project("/root-2", cx_a).await;
|
||||
let (project_a3, _) = client_a.build_local_project("/root-3", cx_a).await;
|
||||
let worktree_a1 =
|
||||
project_a1.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
|
||||
let project1_id = active_call_a
|
||||
|
@ -1366,9 +1379,14 @@ async fn test_host_reconnect(
|
|||
.update(cx_a, |call, cx| call.share_project(project_a2.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let project3_id = active_call_a
|
||||
.update(cx_a, |call, cx| call.share_project(project_a3.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let project_b1 = client_b.build_remote_project(project1_id, cx_b).await;
|
||||
let project_b2 = client_b.build_remote_project(project2_id, cx_b).await;
|
||||
let project_b3 = client_b.build_remote_project(project3_id, cx_b).await;
|
||||
deterministic.run_until_parked();
|
||||
|
||||
let worktree1_id = worktree_a1.read_with(cx_a, |worktree, _| {
|
||||
|
@ -1473,7 +1491,7 @@ async fn test_host_reconnect(
|
|||
.paths()
|
||||
.map(|p| p.to_str().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
vec!["x", "y", "z"]
|
||||
vec!["x.txt", "y.txt", "z.txt"]
|
||||
);
|
||||
});
|
||||
project_b1.read_with(cx_b, |project, cx| {
|
||||
|
@ -1510,10 +1528,98 @@ async fn test_host_reconnect(
|
|||
.paths()
|
||||
.map(|p| p.to_str().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
vec!["x", "y", "z"]
|
||||
vec!["x.txt", "y.txt", "z.txt"]
|
||||
);
|
||||
});
|
||||
project_b2.read_with(cx_b, |project, _| assert!(project.is_read_only()));
|
||||
project_b3.read_with(cx_b, |project, _| assert!(!project.is_read_only()));
|
||||
|
||||
// Drop client B's connection.
|
||||
server.forbid_connections();
|
||||
server.disconnect_client(client_b.peer_id().unwrap());
|
||||
deterministic.advance_clock(RECEIVE_TIMEOUT);
|
||||
|
||||
// While client B is disconnected, add and remove files from client A's project
|
||||
client_a
|
||||
.fs
|
||||
.insert_file("/root-1/dir1/subdir2/j.txt", "j-contents".into())
|
||||
.await;
|
||||
client_a
|
||||
.fs
|
||||
.remove_file("/root-1/dir1/subdir2/i.txt".as_ref(), Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// While client B is disconnected, add and remove worktrees from client A's project.
|
||||
let (worktree_a3, _) = project_a1
|
||||
.update(cx_a, |p, cx| {
|
||||
p.find_or_create_local_worktree("/root-1/dir3", true, cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
worktree_a3
|
||||
.read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
|
||||
.await;
|
||||
let worktree3_id = worktree_a3.read_with(cx_a, |tree, _| {
|
||||
assert!(tree.as_local().unwrap().is_shared());
|
||||
tree.id()
|
||||
});
|
||||
project_a1
|
||||
.update(cx_a, |project, cx| {
|
||||
project.remove_worktree(worktree2_id, cx)
|
||||
})
|
||||
.await;
|
||||
deterministic.run_until_parked();
|
||||
|
||||
// While disconnected, close project 3
|
||||
cx_a.update(|_| drop(project_a3));
|
||||
|
||||
// Client B reconnects. They re-join the room and the remaining shared project.
|
||||
server.allow_connections();
|
||||
client_b
|
||||
.authenticate_and_connect(false, &cx_b.to_async())
|
||||
.await
|
||||
.unwrap();
|
||||
deterministic.run_until_parked();
|
||||
project_b1.read_with(cx_b, |project, cx| {
|
||||
assert!(!project.is_read_only());
|
||||
assert_eq!(
|
||||
project
|
||||
.worktree_for_id(worktree1_id, cx)
|
||||
.unwrap()
|
||||
.read(cx)
|
||||
.snapshot()
|
||||
.paths()
|
||||
.map(|p| p.to_str().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
vec![
|
||||
"a.txt",
|
||||
"b.txt",
|
||||
"subdir1",
|
||||
"subdir1/c.txt",
|
||||
"subdir1/d.txt",
|
||||
"subdir1/e.txt",
|
||||
"subdir2",
|
||||
"subdir2/f.txt",
|
||||
"subdir2/g.txt",
|
||||
"subdir2/h.txt",
|
||||
"subdir2/j.txt"
|
||||
]
|
||||
);
|
||||
assert!(project.worktree_for_id(worktree2_id, cx).is_none());
|
||||
assert_eq!(
|
||||
project
|
||||
.worktree_for_id(worktree3_id, cx)
|
||||
.unwrap()
|
||||
.read(cx)
|
||||
.snapshot()
|
||||
.paths()
|
||||
.map(|p| p.to_str().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
vec!["w.txt"]
|
||||
);
|
||||
});
|
||||
project_b3.read_with(cx_b, |project, _| assert!(project.is_read_only()));
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue