Remove project join requests
This commit is contained in:
parent
b35e8f0164
commit
be8990ea78
11 changed files with 284 additions and 1156 deletions
|
@ -7,7 +7,7 @@ use ::rpc::Peer;
|
|||
use anyhow::anyhow;
|
||||
use call::Room;
|
||||
use client::{
|
||||
self, proto, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Connection,
|
||||
self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Connection,
|
||||
Credentials, EstablishConnectionError, UserStore, RECEIVE_TIMEOUT,
|
||||
};
|
||||
use collections::{BTreeMap, HashMap, HashSet};
|
||||
|
@ -40,7 +40,6 @@ use serde_json::json;
|
|||
use settings::{Formatter, Settings};
|
||||
use sqlx::types::time::OffsetDateTime;
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
env,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
|
@ -459,12 +458,15 @@ async fn test_unshare_project(
|
|||
.await
|
||||
.unwrap();
|
||||
|
||||
// When client B leaves the project, it gets automatically unshared.
|
||||
cx_b.update(|_| drop(project_b));
|
||||
// When client A unshares the project, client B's project becomes read-only.
|
||||
project_a
|
||||
.update(cx_a, |project, cx| project.unshare(cx))
|
||||
.unwrap();
|
||||
deterministic.run_until_parked();
|
||||
assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared()));
|
||||
assert!(project_b.read_with(cx_b, |project, _| project.is_read_only()));
|
||||
|
||||
// When client B joins again, the project gets re-shared.
|
||||
// Client B can join again after client A re-shares.
|
||||
let project_b2 = client_b.build_remote_project(&project_a, cx_a, cx_b).await;
|
||||
assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
|
||||
project_b2
|
||||
|
@ -515,7 +517,7 @@ async fn test_host_disconnect(
|
|||
|
||||
let (project_a, worktree_id) = client_a.build_local_project("/a", cx_a).await;
|
||||
let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
|
||||
let project_id = project_a.read_with(cx_a, |project, _| project.remote_id().unwrap());
|
||||
project_a.read_with(cx_a, |project, _| project.remote_id().unwrap());
|
||||
|
||||
let project_b = client_b.build_remote_project(&project_a, cx_a, cx_b).await;
|
||||
assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
|
||||
|
@ -539,20 +541,6 @@ async fn test_host_disconnect(
|
|||
editor_b.update(cx_b, |editor, cx| editor.insert("X", cx));
|
||||
assert!(cx_b.is_window_edited(workspace_b.window_id()));
|
||||
|
||||
// Request to join that project as client C
|
||||
let project_c = cx_c.spawn(|cx| {
|
||||
Project::remote(
|
||||
project_id,
|
||||
client_c.client.clone(),
|
||||
client_c.user_store.clone(),
|
||||
client_c.project_store.clone(),
|
||||
client_c.language_registry.clone(),
|
||||
FakeFs::new(cx.background()),
|
||||
cx,
|
||||
)
|
||||
});
|
||||
deterministic.run_until_parked();
|
||||
|
||||
// Drop client A's connection. Collaborators should disappear and the project should not be shown as shared.
|
||||
server.disconnect_client(client_a.current_user_id(cx_a));
|
||||
cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT);
|
||||
|
@ -564,10 +552,6 @@ async fn test_host_disconnect(
|
|||
.condition(cx_b, |project, _| project.is_read_only())
|
||||
.await;
|
||||
assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared()));
|
||||
assert!(matches!(
|
||||
project_c.await.unwrap_err(),
|
||||
project::JoinProjectError::HostWentOffline
|
||||
));
|
||||
|
||||
// Ensure client B's edited state is reset and that the whole window is blurred.
|
||||
cx_b.read(|cx| {
|
||||
|
@ -598,139 +582,6 @@ async fn test_host_disconnect(
|
|||
.unwrap();
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_decline_join_request(
|
||||
deterministic: Arc<Deterministic>,
|
||||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
cx_a.foreground().forbid_parking();
|
||||
let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
server
|
||||
.make_contacts(vec![(&client_a, cx_a), (&client_b, cx_b)])
|
||||
.await;
|
||||
|
||||
client_a.fs.insert_tree("/a", json!({})).await;
|
||||
|
||||
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
|
||||
let project_id = project_a.read_with(cx_a, |project, _| project.remote_id().unwrap());
|
||||
|
||||
// Request to join that project as client B
|
||||
let project_b = cx_b.spawn(|cx| {
|
||||
Project::remote(
|
||||
project_id,
|
||||
client_b.client.clone(),
|
||||
client_b.user_store.clone(),
|
||||
client_b.project_store.clone(),
|
||||
client_b.language_registry.clone(),
|
||||
FakeFs::new(cx.background()),
|
||||
cx,
|
||||
)
|
||||
});
|
||||
deterministic.run_until_parked();
|
||||
project_a.update(cx_a, |project, cx| {
|
||||
project.respond_to_join_request(client_b.user_id().unwrap(), false, cx)
|
||||
});
|
||||
assert!(matches!(
|
||||
project_b.await.unwrap_err(),
|
||||
project::JoinProjectError::HostDeclined
|
||||
));
|
||||
|
||||
// Request to join the project again as client B
|
||||
let project_b = cx_b.spawn(|cx| {
|
||||
Project::remote(
|
||||
project_id,
|
||||
client_b.client.clone(),
|
||||
client_b.user_store.clone(),
|
||||
client_b.project_store.clone(),
|
||||
client_b.language_registry.clone(),
|
||||
FakeFs::new(cx.background()),
|
||||
cx,
|
||||
)
|
||||
});
|
||||
|
||||
// Close the project on the host
|
||||
deterministic.run_until_parked();
|
||||
cx_a.update(|_| drop(project_a));
|
||||
deterministic.run_until_parked();
|
||||
assert!(matches!(
|
||||
project_b.await.unwrap_err(),
|
||||
project::JoinProjectError::HostClosedProject
|
||||
));
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_cancel_join_request(
|
||||
deterministic: Arc<Deterministic>,
|
||||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
cx_a.foreground().forbid_parking();
|
||||
let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
server
|
||||
.make_contacts(vec![(&client_a, cx_a), (&client_b, cx_b)])
|
||||
.await;
|
||||
|
||||
client_a.fs.insert_tree("/a", json!({})).await;
|
||||
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
|
||||
let project_id = project_a.read_with(cx_a, |project, _| project.remote_id().unwrap());
|
||||
|
||||
let user_b = client_a
|
||||
.user_store
|
||||
.update(cx_a, |store, cx| {
|
||||
store.get_user(client_b.user_id().unwrap(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let project_a_events = Rc::new(RefCell::new(Vec::new()));
|
||||
project_a.update(cx_a, {
|
||||
let project_a_events = project_a_events.clone();
|
||||
move |_, cx| {
|
||||
cx.subscribe(&cx.handle(), move |_, _, event, _| {
|
||||
project_a_events.borrow_mut().push(event.clone());
|
||||
})
|
||||
.detach();
|
||||
}
|
||||
});
|
||||
|
||||
// Request to join that project as client B
|
||||
let project_b = cx_b.spawn(|cx| {
|
||||
Project::remote(
|
||||
project_id,
|
||||
client_b.client.clone(),
|
||||
client_b.user_store.clone(),
|
||||
client_b.project_store.clone(),
|
||||
client_b.language_registry.clone(),
|
||||
FakeFs::new(cx.background()),
|
||||
cx,
|
||||
)
|
||||
});
|
||||
deterministic.run_until_parked();
|
||||
assert_eq!(
|
||||
&*project_a_events.borrow(),
|
||||
&[project::Event::ContactRequestedJoin(user_b.clone())]
|
||||
);
|
||||
project_a_events.borrow_mut().clear();
|
||||
|
||||
// Cancel the join request by leaving the project
|
||||
client_b
|
||||
.client
|
||||
.send(proto::LeaveProject { project_id })
|
||||
.unwrap();
|
||||
drop(project_b);
|
||||
|
||||
deterministic.run_until_parked();
|
||||
assert_eq!(
|
||||
&*project_a_events.borrow(),
|
||||
&[project::Event::ContactCancelledJoinRequest(user_b)]
|
||||
);
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_propagate_saves_and_fs_changes(
|
||||
cx_a: &mut TestAppContext,
|
||||
|
@ -4586,7 +4437,6 @@ async fn test_random_collaboration(
|
|||
let host = server.create_client(&mut host_cx, "host").await;
|
||||
let host_project = host_cx.update(|cx| {
|
||||
Project::local(
|
||||
true,
|
||||
host.client.clone(),
|
||||
host.user_store.clone(),
|
||||
host.project_store.clone(),
|
||||
|
@ -4738,6 +4588,11 @@ async fn test_random_collaboration(
|
|||
.await;
|
||||
host_language_registry.add(Arc::new(language));
|
||||
|
||||
host_project
|
||||
.update(&mut host_cx, |project, cx| project.share(cx))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let op_start_signal = futures::channel::mpsc::unbounded();
|
||||
user_ids.push(host.current_user_id(&host_cx));
|
||||
op_start_signals.push(op_start_signal.0);
|
||||
|
@ -5097,7 +4952,7 @@ impl TestServer {
|
|||
|
||||
let fs = FakeFs::new(cx.background());
|
||||
let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
|
||||
let project_store = cx.add_model(|_| ProjectStore::new(project::Db::open_fake()));
|
||||
let project_store = cx.add_model(|_| ProjectStore::new());
|
||||
let app_state = Arc::new(workspace::AppState {
|
||||
client: client.clone(),
|
||||
user_store: user_store.clone(),
|
||||
|
@ -5283,7 +5138,6 @@ impl TestClient {
|
|||
) -> (ModelHandle<Project>, WorktreeId) {
|
||||
let project = cx.update(|cx| {
|
||||
Project::local(
|
||||
true,
|
||||
self.client.clone(),
|
||||
self.user_store.clone(),
|
||||
self.project_store.clone(),
|
||||
|
@ -5316,7 +5170,10 @@ impl TestClient {
|
|||
let host_project_id = host_project
|
||||
.read_with(host_cx, |project, _| project.next_remote_id())
|
||||
.await;
|
||||
let guest_user_id = self.user_id().unwrap();
|
||||
host_project
|
||||
.update(host_cx, |project, cx| project.share(cx))
|
||||
.await
|
||||
.unwrap();
|
||||
let languages = host_project.read_with(host_cx, |project, _| project.languages().clone());
|
||||
let project_b = guest_cx.spawn(|cx| {
|
||||
Project::remote(
|
||||
|
@ -5329,10 +5186,7 @@ impl TestClient {
|
|||
cx,
|
||||
)
|
||||
});
|
||||
host_cx.foreground().run_until_parked();
|
||||
host_project.update(host_cx, |project, cx| {
|
||||
project.respond_to_join_request(guest_user_id, true, cx)
|
||||
});
|
||||
|
||||
let project = project_b.await.unwrap();
|
||||
project
|
||||
}
|
||||
|
@ -5369,18 +5223,6 @@ impl TestClient {
|
|||
) -> anyhow::Result<()> {
|
||||
let fs = project.read_with(cx, |project, _| project.fs().clone());
|
||||
|
||||
cx.update(|cx| {
|
||||
cx.subscribe(&project, move |project, event, cx| {
|
||||
if let project::Event::ContactRequestedJoin(user) = event {
|
||||
log::info!("Host: accepting join request from {}", user.github_login);
|
||||
project.update(cx, |project, cx| {
|
||||
project.respond_to_join_request(user.id, true, cx)
|
||||
});
|
||||
}
|
||||
})
|
||||
.detach();
|
||||
});
|
||||
|
||||
while op_start_signal.next().await.is_some() {
|
||||
let distribution = rng.lock().gen_range::<usize, _>(0..100);
|
||||
let files = fs.as_fake().files().await;
|
||||
|
|
|
@ -88,11 +88,6 @@ impl<R: RequestMessage> Response<R> {
|
|||
self.server.peer.respond(self.receipt, payload)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn into_receipt(self) -> Receipt<R> {
|
||||
self.responded.store(true, SeqCst);
|
||||
self.receipt
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Server {
|
||||
|
@ -160,7 +155,7 @@ impl Server {
|
|||
.add_request_handler(Server::unregister_project)
|
||||
.add_request_handler(Server::join_project)
|
||||
.add_message_handler(Server::leave_project)
|
||||
.add_message_handler(Server::respond_to_join_project_request)
|
||||
.add_message_handler(Server::unshare_project)
|
||||
.add_message_handler(Server::update_project)
|
||||
.add_message_handler(Server::register_project_activity)
|
||||
.add_request_handler(Server::update_worktree)
|
||||
|
@ -491,21 +486,6 @@ impl Server {
|
|||
},
|
||||
)
|
||||
});
|
||||
|
||||
for (_, receipts) in project.join_requests {
|
||||
for receipt in receipts {
|
||||
self.peer.respond(
|
||||
receipt,
|
||||
proto::JoinProjectResponse {
|
||||
variant: Some(proto::join_project_response::Variant::Decline(
|
||||
proto::join_project_response::Decline {
|
||||
reason: proto::join_project_response::decline::Reason::WentOffline as i32
|
||||
},
|
||||
)),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for project_id in removed_connection.guest_project_ids {
|
||||
|
@ -519,16 +499,6 @@ impl Server {
|
|||
},
|
||||
)
|
||||
});
|
||||
if project.guests.is_empty() {
|
||||
self.peer
|
||||
.send(
|
||||
project.host_connection_id,
|
||||
proto::ProjectUnshared {
|
||||
project_id: project_id.to_proto(),
|
||||
},
|
||||
)
|
||||
.trace_err();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -727,11 +697,9 @@ impl Server {
|
|||
.await
|
||||
.user_id_for_connection(request.sender_id)?;
|
||||
let project_id = self.app_state.db.register_project(user_id).await?;
|
||||
self.store().await.register_project(
|
||||
request.sender_id,
|
||||
project_id,
|
||||
request.payload.online,
|
||||
)?;
|
||||
self.store()
|
||||
.await
|
||||
.register_project(request.sender_id, project_id)?;
|
||||
|
||||
response.send(proto::RegisterProjectResponse {
|
||||
project_id: project_id.to_proto(),
|
||||
|
@ -746,11 +714,10 @@ impl Server {
|
|||
response: Response<proto::UnregisterProject>,
|
||||
) -> Result<()> {
|
||||
let project_id = ProjectId::from_proto(request.payload.project_id);
|
||||
let (user_id, project) = {
|
||||
let mut state = self.store().await;
|
||||
let project = state.unregister_project(project_id, request.sender_id)?;
|
||||
(state.user_id_for_connection(request.sender_id)?, project)
|
||||
};
|
||||
let project = self
|
||||
.store()
|
||||
.await
|
||||
.unregister_project(project_id, request.sender_id)?;
|
||||
self.app_state.db.unregister_project(project_id).await?;
|
||||
|
||||
broadcast(
|
||||
|
@ -765,32 +732,27 @@ impl Server {
|
|||
)
|
||||
},
|
||||
);
|
||||
for (_, receipts) in project.join_requests {
|
||||
for receipt in receipts {
|
||||
self.peer.respond(
|
||||
receipt,
|
||||
proto::JoinProjectResponse {
|
||||
variant: Some(proto::join_project_response::Variant::Decline(
|
||||
proto::join_project_response::Decline {
|
||||
reason: proto::join_project_response::decline::Reason::Closed
|
||||
as i32,
|
||||
},
|
||||
)),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Send out the `UpdateContacts` message before responding to the unregister
|
||||
// request. This way, when the project's host can keep track of the project's
|
||||
// remote id until after they've received the `UpdateContacts` message for
|
||||
// themself.
|
||||
self.update_user_contacts(user_id).await?;
|
||||
response.send(proto::Ack {})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn unshare_project(
|
||||
self: Arc<Server>,
|
||||
message: TypedEnvelope<proto::UnshareProject>,
|
||||
) -> Result<()> {
|
||||
let project_id = ProjectId::from_proto(message.payload.project_id);
|
||||
let project = self
|
||||
.store()
|
||||
.await
|
||||
.unshare_project(project_id, message.sender_id)?;
|
||||
broadcast(message.sender_id, project.guest_connection_ids, |conn_id| {
|
||||
self.peer.send(conn_id, message.payload.clone())
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_user_contacts(self: &Arc<Server>, user_id: UserId) -> Result<()> {
|
||||
let contacts = self.app_state.db.get_contacts(user_id).await?;
|
||||
let store = self.store().await;
|
||||
|
@ -849,167 +811,93 @@ impl Server {
|
|||
return Err(anyhow!("no such project"))?;
|
||||
}
|
||||
|
||||
self.store().await.request_join_project(
|
||||
guest_user_id,
|
||||
project_id,
|
||||
response.into_receipt(),
|
||||
)?;
|
||||
self.peer.send(
|
||||
host_connection_id,
|
||||
proto::RequestJoinProject {
|
||||
project_id: project_id.to_proto(),
|
||||
requester_id: guest_user_id.to_proto(),
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
let mut store = self.store().await;
|
||||
let (project, replica_id) = store.join_project(request.sender_id, project_id)?;
|
||||
let peer_count = project.guests.len();
|
||||
let mut collaborators = Vec::with_capacity(peer_count);
|
||||
collaborators.push(proto::Collaborator {
|
||||
peer_id: project.host_connection_id.0,
|
||||
replica_id: 0,
|
||||
user_id: project.host.user_id.to_proto(),
|
||||
});
|
||||
let worktrees = project
|
||||
.worktrees
|
||||
.iter()
|
||||
.map(|(id, worktree)| proto::WorktreeMetadata {
|
||||
id: *id,
|
||||
root_name: worktree.root_name.clone(),
|
||||
visible: worktree.visible,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
async fn respond_to_join_project_request(
|
||||
self: Arc<Server>,
|
||||
request: TypedEnvelope<proto::RespondToJoinProjectRequest>,
|
||||
) -> Result<()> {
|
||||
let host_user_id;
|
||||
|
||||
{
|
||||
let mut state = self.store().await;
|
||||
let project_id = ProjectId::from_proto(request.payload.project_id);
|
||||
let project = state.project(project_id)?;
|
||||
if project.host_connection_id != request.sender_id {
|
||||
Err(anyhow!("no such connection"))?;
|
||||
}
|
||||
|
||||
host_user_id = project.host.user_id;
|
||||
let guest_user_id = UserId::from_proto(request.payload.requester_id);
|
||||
|
||||
if !request.payload.allow {
|
||||
let receipts = state
|
||||
.deny_join_project_request(request.sender_id, guest_user_id, project_id)
|
||||
.ok_or_else(|| anyhow!("no such request"))?;
|
||||
for receipt in receipts {
|
||||
self.peer.respond(
|
||||
receipt,
|
||||
proto::JoinProjectResponse {
|
||||
variant: Some(proto::join_project_response::Variant::Decline(
|
||||
proto::join_project_response::Decline {
|
||||
reason: proto::join_project_response::decline::Reason::Declined
|
||||
as i32,
|
||||
},
|
||||
)),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (receipts_with_replica_ids, project) = state
|
||||
.accept_join_project_request(request.sender_id, guest_user_id, project_id)
|
||||
.ok_or_else(|| anyhow!("no such request"))?;
|
||||
|
||||
let peer_count = project.guests.len();
|
||||
let mut collaborators = Vec::with_capacity(peer_count);
|
||||
collaborators.push(proto::Collaborator {
|
||||
peer_id: project.host_connection_id.0,
|
||||
replica_id: 0,
|
||||
user_id: project.host.user_id.to_proto(),
|
||||
});
|
||||
let worktrees = project
|
||||
.worktrees
|
||||
.iter()
|
||||
.map(|(id, worktree)| proto::WorktreeMetadata {
|
||||
id: *id,
|
||||
root_name: worktree.root_name.clone(),
|
||||
visible: worktree.visible,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Add all guests other than the requesting user's own connections as collaborators
|
||||
for (guest_conn_id, guest) in &project.guests {
|
||||
if receipts_with_replica_ids
|
||||
.iter()
|
||||
.all(|(receipt, _)| receipt.sender_id != *guest_conn_id)
|
||||
{
|
||||
collaborators.push(proto::Collaborator {
|
||||
peer_id: guest_conn_id.0,
|
||||
replica_id: guest.replica_id as u32,
|
||||
user_id: guest.user_id.to_proto(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for conn_id in project.connection_ids() {
|
||||
for (receipt, replica_id) in &receipts_with_replica_ids {
|
||||
if conn_id != receipt.sender_id {
|
||||
self.peer.send(
|
||||
conn_id,
|
||||
proto::AddProjectCollaborator {
|
||||
project_id: project_id.to_proto(),
|
||||
collaborator: Some(proto::Collaborator {
|
||||
peer_id: receipt.sender_id.0,
|
||||
replica_id: *replica_id as u32,
|
||||
user_id: guest_user_id.to_proto(),
|
||||
}),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// First, we send the metadata associated with each worktree.
|
||||
for (receipt, replica_id) in &receipts_with_replica_ids {
|
||||
self.peer.respond(
|
||||
*receipt,
|
||||
proto::JoinProjectResponse {
|
||||
variant: Some(proto::join_project_response::Variant::Accept(
|
||||
proto::join_project_response::Accept {
|
||||
worktrees: worktrees.clone(),
|
||||
replica_id: *replica_id as u32,
|
||||
collaborators: collaborators.clone(),
|
||||
language_servers: project.language_servers.clone(),
|
||||
},
|
||||
)),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
|
||||
for (worktree_id, worktree) in &project.worktrees {
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
const MAX_CHUNK_SIZE: usize = 2;
|
||||
#[cfg(not(any(test, feature = "test-support")))]
|
||||
const MAX_CHUNK_SIZE: usize = 256;
|
||||
|
||||
// Stream this worktree's entries.
|
||||
let message = proto::UpdateWorktree {
|
||||
project_id: project_id.to_proto(),
|
||||
worktree_id: *worktree_id,
|
||||
root_name: worktree.root_name.clone(),
|
||||
updated_entries: worktree.entries.values().cloned().collect(),
|
||||
removed_entries: Default::default(),
|
||||
scan_id: worktree.scan_id,
|
||||
is_last_update: worktree.is_complete,
|
||||
};
|
||||
for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) {
|
||||
for (receipt, _) in &receipts_with_replica_ids {
|
||||
self.peer.send(receipt.sender_id, update.clone())?;
|
||||
}
|
||||
}
|
||||
|
||||
// Stream this worktree's diagnostics.
|
||||
for summary in worktree.diagnostic_summaries.values() {
|
||||
for (receipt, _) in &receipts_with_replica_ids {
|
||||
self.peer.send(
|
||||
receipt.sender_id,
|
||||
proto::UpdateDiagnosticSummary {
|
||||
project_id: project_id.to_proto(),
|
||||
worktree_id: *worktree_id,
|
||||
summary: Some(summary.clone()),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
// Add all guests other than the requesting user's own connections as collaborators
|
||||
for (guest_conn_id, guest) in &project.guests {
|
||||
if request.sender_id != *guest_conn_id {
|
||||
collaborators.push(proto::Collaborator {
|
||||
peer_id: guest_conn_id.0,
|
||||
replica_id: guest.replica_id as u32,
|
||||
user_id: guest.user_id.to_proto(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for conn_id in project.connection_ids() {
|
||||
if conn_id != request.sender_id {
|
||||
self.peer.send(
|
||||
conn_id,
|
||||
proto::AddProjectCollaborator {
|
||||
project_id: project_id.to_proto(),
|
||||
collaborator: Some(proto::Collaborator {
|
||||
peer_id: request.sender_id.0,
|
||||
replica_id: replica_id as u32,
|
||||
user_id: guest_user_id.to_proto(),
|
||||
}),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// First, we send the metadata associated with each worktree.
|
||||
response.send(proto::JoinProjectResponse {
|
||||
worktrees: worktrees.clone(),
|
||||
replica_id: replica_id as u32,
|
||||
collaborators: collaborators.clone(),
|
||||
language_servers: project.language_servers.clone(),
|
||||
})?;
|
||||
|
||||
for (worktree_id, worktree) in &project.worktrees {
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
const MAX_CHUNK_SIZE: usize = 2;
|
||||
#[cfg(not(any(test, feature = "test-support")))]
|
||||
const MAX_CHUNK_SIZE: usize = 256;
|
||||
|
||||
// Stream this worktree's entries.
|
||||
let message = proto::UpdateWorktree {
|
||||
project_id: project_id.to_proto(),
|
||||
worktree_id: *worktree_id,
|
||||
root_name: worktree.root_name.clone(),
|
||||
updated_entries: worktree.entries.values().cloned().collect(),
|
||||
removed_entries: Default::default(),
|
||||
scan_id: worktree.scan_id,
|
||||
is_last_update: worktree.is_complete,
|
||||
};
|
||||
for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) {
|
||||
self.peer.send(request.sender_id, update.clone())?;
|
||||
}
|
||||
|
||||
// Stream this worktree's diagnostics.
|
||||
for summary in worktree.diagnostic_summaries.values() {
|
||||
self.peer.send(
|
||||
request.sender_id,
|
||||
proto::UpdateDiagnosticSummary {
|
||||
project_id: project_id.to_proto(),
|
||||
worktree_id: *worktree_id,
|
||||
summary: Some(summary.clone()),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.update_user_contacts(host_user_id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1041,27 +929,8 @@ impl Server {
|
|||
)
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(requester_id) = project.cancel_request {
|
||||
self.peer.send(
|
||||
project.host_connection_id,
|
||||
proto::JoinProjectRequestCancelled {
|
||||
project_id: project_id.to_proto(),
|
||||
requester_id: requester_id.to_proto(),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
|
||||
if project.unshare {
|
||||
self.peer.send(
|
||||
project.host_connection_id,
|
||||
proto::ProjectUnshared {
|
||||
project_id: project_id.to_proto(),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
self.update_user_contacts(project.host_user_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1070,61 +939,18 @@ impl Server {
|
|||
request: TypedEnvelope<proto::UpdateProject>,
|
||||
) -> Result<()> {
|
||||
let project_id = ProjectId::from_proto(request.payload.project_id);
|
||||
let user_id;
|
||||
{
|
||||
let mut state = self.store().await;
|
||||
user_id = state.user_id_for_connection(request.sender_id)?;
|
||||
let guest_connection_ids = state
|
||||
.read_project(project_id, request.sender_id)?
|
||||
.guest_connection_ids();
|
||||
let unshared_project = state.update_project(
|
||||
project_id,
|
||||
&request.payload.worktrees,
|
||||
request.payload.online,
|
||||
request.sender_id,
|
||||
)?;
|
||||
|
||||
if let Some(unshared_project) = unshared_project {
|
||||
broadcast(
|
||||
request.sender_id,
|
||||
unshared_project.guests.keys().copied(),
|
||||
|conn_id| {
|
||||
self.peer.send(
|
||||
conn_id,
|
||||
proto::UnregisterProject {
|
||||
project_id: project_id.to_proto(),
|
||||
},
|
||||
)
|
||||
},
|
||||
);
|
||||
for (_, receipts) in unshared_project.pending_join_requests {
|
||||
for receipt in receipts {
|
||||
self.peer.respond(
|
||||
receipt,
|
||||
proto::JoinProjectResponse {
|
||||
variant: Some(proto::join_project_response::Variant::Decline(
|
||||
proto::join_project_response::Decline {
|
||||
reason:
|
||||
proto::join_project_response::decline::Reason::Closed
|
||||
as i32,
|
||||
},
|
||||
)),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
broadcast(request.sender_id, guest_connection_ids, |connection_id| {
|
||||
self.peer.forward_send(
|
||||
request.sender_id,
|
||||
connection_id,
|
||||
request.payload.clone(),
|
||||
)
|
||||
});
|
||||
}
|
||||
state.update_project(project_id, &request.payload.worktrees, request.sender_id)?;
|
||||
broadcast(request.sender_id, guest_connection_ids, |connection_id| {
|
||||
self.peer
|
||||
.forward_send(request.sender_id, connection_id, request.payload.clone())
|
||||
});
|
||||
};
|
||||
|
||||
self.update_user_contacts(user_id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1146,32 +972,21 @@ impl Server {
|
|||
) -> Result<()> {
|
||||
let project_id = ProjectId::from_proto(request.payload.project_id);
|
||||
let worktree_id = request.payload.worktree_id;
|
||||
let (connection_ids, metadata_changed) = {
|
||||
let mut store = self.store().await;
|
||||
let (connection_ids, metadata_changed) = store.update_worktree(
|
||||
request.sender_id,
|
||||
project_id,
|
||||
worktree_id,
|
||||
&request.payload.root_name,
|
||||
&request.payload.removed_entries,
|
||||
&request.payload.updated_entries,
|
||||
request.payload.scan_id,
|
||||
request.payload.is_last_update,
|
||||
)?;
|
||||
(connection_ids, metadata_changed)
|
||||
};
|
||||
let connection_ids = self.store().await.update_worktree(
|
||||
request.sender_id,
|
||||
project_id,
|
||||
worktree_id,
|
||||
&request.payload.root_name,
|
||||
&request.payload.removed_entries,
|
||||
&request.payload.updated_entries,
|
||||
request.payload.scan_id,
|
||||
request.payload.is_last_update,
|
||||
)?;
|
||||
|
||||
broadcast(request.sender_id, connection_ids, |connection_id| {
|
||||
self.peer
|
||||
.forward_send(request.sender_id, connection_id, request.payload.clone())
|
||||
});
|
||||
if metadata_changed {
|
||||
let user_id = self
|
||||
.store()
|
||||
.await
|
||||
.user_id_for_connection(request.sender_id)?;
|
||||
self.update_user_contacts(user_id).await?;
|
||||
}
|
||||
response.send(proto::Ack {})?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crate::db::{self, ChannelId, ProjectId, UserId};
|
||||
use anyhow::{anyhow, Result};
|
||||
use collections::{btree_map, hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
use rpc::{proto, ConnectionId, Receipt};
|
||||
use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
use rpc::{proto, ConnectionId};
|
||||
use serde::Serialize;
|
||||
use std::{mem, path::PathBuf, str, time::Duration};
|
||||
use time::OffsetDateTime;
|
||||
|
@ -32,7 +32,6 @@ struct ConnectionState {
|
|||
user_id: UserId,
|
||||
admin: bool,
|
||||
projects: BTreeSet<ProjectId>,
|
||||
requested_projects: HashSet<ProjectId>,
|
||||
channels: HashSet<ChannelId>,
|
||||
}
|
||||
|
||||
|
@ -45,12 +44,9 @@ pub struct Call {
|
|||
|
||||
#[derive(Serialize)]
|
||||
pub struct Project {
|
||||
pub online: bool,
|
||||
pub host_connection_id: ConnectionId,
|
||||
pub host: Collaborator,
|
||||
pub guests: HashMap<ConnectionId, Collaborator>,
|
||||
#[serde(skip)]
|
||||
pub join_requests: HashMap<UserId, Vec<Receipt<proto::JoinProject>>>,
|
||||
pub active_replica_ids: HashSet<ReplicaId>,
|
||||
pub worktrees: BTreeMap<u64, Worktree>,
|
||||
pub language_servers: Vec<proto::LanguageServer>,
|
||||
|
@ -98,13 +94,10 @@ pub struct LeftProject {
|
|||
pub host_connection_id: ConnectionId,
|
||||
pub connection_ids: Vec<ConnectionId>,
|
||||
pub remove_collaborator: bool,
|
||||
pub cancel_request: Option<UserId>,
|
||||
pub unshare: bool,
|
||||
}
|
||||
|
||||
pub struct UnsharedProject {
|
||||
pub guests: HashMap<ConnectionId, Collaborator>,
|
||||
pub pending_join_requests: HashMap<UserId, Vec<Receipt<proto::JoinProject>>>,
|
||||
pub guest_connection_ids: Vec<ConnectionId>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
|
@ -159,7 +152,6 @@ impl Store {
|
|||
user_id,
|
||||
admin,
|
||||
projects: Default::default(),
|
||||
requested_projects: Default::default(),
|
||||
channels: Default::default(),
|
||||
},
|
||||
);
|
||||
|
@ -578,7 +570,6 @@ impl Store {
|
|||
&mut self,
|
||||
host_connection_id: ConnectionId,
|
||||
project_id: ProjectId,
|
||||
online: bool,
|
||||
) -> Result<()> {
|
||||
let connection = self
|
||||
.connections
|
||||
|
@ -588,7 +579,6 @@ impl Store {
|
|||
self.projects.insert(
|
||||
project_id,
|
||||
Project {
|
||||
online,
|
||||
host_connection_id,
|
||||
host: Collaborator {
|
||||
user_id: connection.user_id,
|
||||
|
@ -597,7 +587,6 @@ impl Store {
|
|||
admin: connection.admin,
|
||||
},
|
||||
guests: Default::default(),
|
||||
join_requests: Default::default(),
|
||||
active_replica_ids: Default::default(),
|
||||
worktrees: Default::default(),
|
||||
language_servers: Default::default(),
|
||||
|
@ -610,9 +599,8 @@ impl Store {
|
|||
&mut self,
|
||||
project_id: ProjectId,
|
||||
worktrees: &[proto::WorktreeMetadata],
|
||||
online: bool,
|
||||
connection_id: ConnectionId,
|
||||
) -> Result<Option<UnsharedProject>> {
|
||||
) -> Result<()> {
|
||||
let project = self
|
||||
.projects
|
||||
.get_mut(&project_id)
|
||||
|
@ -634,32 +622,7 @@ impl Store {
|
|||
}
|
||||
}
|
||||
|
||||
if online != project.online {
|
||||
project.online = online;
|
||||
if project.online {
|
||||
Ok(None)
|
||||
} else {
|
||||
for connection_id in project.guest_connection_ids() {
|
||||
if let Some(connection) = self.connections.get_mut(&connection_id) {
|
||||
connection.projects.remove(&project_id);
|
||||
}
|
||||
}
|
||||
|
||||
project.active_replica_ids.clear();
|
||||
project.language_servers.clear();
|
||||
for worktree in project.worktrees.values_mut() {
|
||||
worktree.diagnostic_summaries.clear();
|
||||
worktree.entries.clear();
|
||||
}
|
||||
|
||||
Ok(Some(UnsharedProject {
|
||||
guests: mem::take(&mut project.guests),
|
||||
pending_join_requests: mem::take(&mut project.join_requests),
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("no such project"))?
|
||||
}
|
||||
|
@ -685,22 +648,6 @@ impl Store {
|
|||
}
|
||||
}
|
||||
|
||||
for requester_user_id in project.join_requests.keys() {
|
||||
if let Some(requester_user_connection_state) =
|
||||
self.connected_users.get_mut(requester_user_id)
|
||||
{
|
||||
for requester_connection_id in
|
||||
&requester_user_connection_state.connection_ids
|
||||
{
|
||||
if let Some(requester_connection) =
|
||||
self.connections.get_mut(requester_connection_id)
|
||||
{
|
||||
requester_connection.requested_projects.remove(&project_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(project)
|
||||
} else {
|
||||
Err(anyhow!("no such project"))?
|
||||
|
@ -710,6 +657,37 @@ impl Store {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn unshare_project(
|
||||
&mut self,
|
||||
project_id: ProjectId,
|
||||
connection_id: ConnectionId,
|
||||
) -> Result<UnsharedProject> {
|
||||
let project = self
|
||||
.projects
|
||||
.get_mut(&project_id)
|
||||
.ok_or_else(|| anyhow!("no such project"))?;
|
||||
anyhow::ensure!(
|
||||
project.host_connection_id == connection_id,
|
||||
"no such project"
|
||||
);
|
||||
|
||||
let guest_connection_ids = project.guest_connection_ids();
|
||||
project.active_replica_ids.clear();
|
||||
project.guests.clear();
|
||||
project.language_servers.clear();
|
||||
project.worktrees.clear();
|
||||
|
||||
for connection_id in &guest_connection_ids {
|
||||
if let Some(connection) = self.connections.get_mut(connection_id) {
|
||||
connection.projects.remove(&project_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(UnsharedProject {
|
||||
guest_connection_ids,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_diagnostic_summary(
|
||||
&mut self,
|
||||
project_id: ProjectId,
|
||||
|
@ -753,91 +731,37 @@ impl Store {
|
|||
Err(anyhow!("no such project"))?
|
||||
}
|
||||
|
||||
pub fn request_join_project(
|
||||
pub fn join_project(
|
||||
&mut self,
|
||||
requester_id: UserId,
|
||||
requester_connection_id: ConnectionId,
|
||||
project_id: ProjectId,
|
||||
receipt: Receipt<proto::JoinProject>,
|
||||
) -> Result<()> {
|
||||
) -> Result<(&Project, ReplicaId)> {
|
||||
let connection = self
|
||||
.connections
|
||||
.get_mut(&receipt.sender_id)
|
||||
.get_mut(&requester_connection_id)
|
||||
.ok_or_else(|| anyhow!("no such connection"))?;
|
||||
let project = self
|
||||
.projects
|
||||
.get_mut(&project_id)
|
||||
.ok_or_else(|| anyhow!("no such project"))?;
|
||||
if project.online {
|
||||
connection.requested_projects.insert(project_id);
|
||||
project
|
||||
.join_requests
|
||||
.entry(requester_id)
|
||||
.or_default()
|
||||
.push(receipt);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("no such project"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deny_join_project_request(
|
||||
&mut self,
|
||||
responder_connection_id: ConnectionId,
|
||||
requester_id: UserId,
|
||||
project_id: ProjectId,
|
||||
) -> Option<Vec<Receipt<proto::JoinProject>>> {
|
||||
let project = self.projects.get_mut(&project_id)?;
|
||||
if responder_connection_id != project.host_connection_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
let receipts = project.join_requests.remove(&requester_id)?;
|
||||
for receipt in &receipts {
|
||||
let requester_connection = self.connections.get_mut(&receipt.sender_id)?;
|
||||
requester_connection.requested_projects.remove(&project_id);
|
||||
}
|
||||
project.host.last_activity = Some(OffsetDateTime::now_utc());
|
||||
|
||||
Some(receipts)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn accept_join_project_request(
|
||||
&mut self,
|
||||
responder_connection_id: ConnectionId,
|
||||
requester_id: UserId,
|
||||
project_id: ProjectId,
|
||||
) -> Option<(Vec<(Receipt<proto::JoinProject>, ReplicaId)>, &Project)> {
|
||||
let project = self.projects.get_mut(&project_id)?;
|
||||
if responder_connection_id != project.host_connection_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
let receipts = project.join_requests.remove(&requester_id)?;
|
||||
let mut receipts_with_replica_ids = Vec::new();
|
||||
for receipt in receipts {
|
||||
let requester_connection = self.connections.get_mut(&receipt.sender_id)?;
|
||||
requester_connection.requested_projects.remove(&project_id);
|
||||
requester_connection.projects.insert(project_id);
|
||||
let mut replica_id = 1;
|
||||
while project.active_replica_ids.contains(&replica_id) {
|
||||
replica_id += 1;
|
||||
}
|
||||
project.active_replica_ids.insert(replica_id);
|
||||
project.guests.insert(
|
||||
receipt.sender_id,
|
||||
Collaborator {
|
||||
replica_id,
|
||||
user_id: requester_id,
|
||||
last_activity: Some(OffsetDateTime::now_utc()),
|
||||
admin: requester_connection.admin,
|
||||
},
|
||||
);
|
||||
receipts_with_replica_ids.push((receipt, replica_id));
|
||||
connection.projects.insert(project_id);
|
||||
let mut replica_id = 1;
|
||||
while project.active_replica_ids.contains(&replica_id) {
|
||||
replica_id += 1;
|
||||
}
|
||||
project.active_replica_ids.insert(replica_id);
|
||||
project.guests.insert(
|
||||
requester_connection_id,
|
||||
Collaborator {
|
||||
replica_id,
|
||||
user_id: connection.user_id,
|
||||
last_activity: Some(OffsetDateTime::now_utc()),
|
||||
admin: connection.admin,
|
||||
},
|
||||
);
|
||||
|
||||
project.host.last_activity = Some(OffsetDateTime::now_utc());
|
||||
Some((receipts_with_replica_ids, project))
|
||||
Ok((project, replica_id))
|
||||
}
|
||||
|
||||
pub fn leave_project(
|
||||
|
@ -845,7 +769,6 @@ impl Store {
|
|||
connection_id: ConnectionId,
|
||||
project_id: ProjectId,
|
||||
) -> Result<LeftProject> {
|
||||
let user_id = self.user_id_for_connection(connection_id)?;
|
||||
let project = self
|
||||
.projects
|
||||
.get_mut(&project_id)
|
||||
|
@ -859,39 +782,14 @@ impl Store {
|
|||
false
|
||||
};
|
||||
|
||||
// If the connection leaving the project has a pending request, remove it.
|
||||
// If that user has no other pending requests on other connections, indicate that the request should be cancelled.
|
||||
let mut cancel_request = None;
|
||||
if let Entry::Occupied(mut entry) = project.join_requests.entry(user_id) {
|
||||
entry
|
||||
.get_mut()
|
||||
.retain(|receipt| receipt.sender_id != connection_id);
|
||||
if entry.get().is_empty() {
|
||||
entry.remove();
|
||||
cancel_request = Some(user_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(connection) = self.connections.get_mut(&connection_id) {
|
||||
connection.projects.remove(&project_id);
|
||||
}
|
||||
|
||||
let connection_ids = project.connection_ids();
|
||||
let unshare = connection_ids.len() <= 1 && project.join_requests.is_empty();
|
||||
if unshare {
|
||||
project.language_servers.clear();
|
||||
for worktree in project.worktrees.values_mut() {
|
||||
worktree.diagnostic_summaries.clear();
|
||||
worktree.entries.clear();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(LeftProject {
|
||||
host_connection_id: project.host_connection_id,
|
||||
host_user_id: project.host.user_id,
|
||||
connection_ids,
|
||||
cancel_request,
|
||||
unshare,
|
||||
connection_ids: project.connection_ids(),
|
||||
remove_collaborator,
|
||||
})
|
||||
}
|
||||
|
@ -907,15 +805,11 @@ impl Store {
|
|||
updated_entries: &[proto::Entry],
|
||||
scan_id: u64,
|
||||
is_last_update: bool,
|
||||
) -> Result<(Vec<ConnectionId>, bool)> {
|
||||
) -> Result<Vec<ConnectionId>> {
|
||||
let project = self.write_project(project_id, connection_id)?;
|
||||
if !project.online {
|
||||
return Err(anyhow!("project is not online"));
|
||||
}
|
||||
|
||||
let connection_ids = project.connection_ids();
|
||||
let mut worktree = project.worktrees.entry(worktree_id).or_default();
|
||||
let metadata_changed = worktree_root_name != worktree.root_name;
|
||||
worktree.root_name = worktree_root_name.to_string();
|
||||
|
||||
for entry_id in removed_entries {
|
||||
|
@ -928,7 +822,7 @@ impl Store {
|
|||
|
||||
worktree.scan_id = scan_id;
|
||||
worktree.is_complete = is_last_update;
|
||||
Ok((connection_ids, metadata_changed))
|
||||
Ok(connection_ids)
|
||||
}
|
||||
|
||||
pub fn project_connection_ids(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue