From f639c4c3d14f19128c244610e93c0589d174aa0a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 10:41:44 +0100 Subject: [PATCH 001/240] Add schema for reconnection support --- .../20221109000000_test_schema.sql | 56 +++++++++++++++++-- .../20221111092550_reconnection_support.sql | 47 ++++++++++++++++ 2 files changed, 97 insertions(+), 6 deletions(-) create mode 100644 crates/collab/migrations/20221111092550_reconnection_support.sql diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 63d2661de5..731910027e 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -1,4 +1,4 @@ -CREATE TABLE IF NOT EXISTS "users" ( +CREATE TABLE "users" ( "id" INTEGER PRIMARY KEY, "github_login" VARCHAR, "admin" BOOLEAN, @@ -16,14 +16,14 @@ CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code"); CREATE INDEX "index_users_on_email_address" ON "users" ("email_address"); CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id"); -CREATE TABLE IF NOT EXISTS "access_tokens" ( +CREATE TABLE "access_tokens" ( "id" INTEGER PRIMARY KEY, "user_id" INTEGER REFERENCES users (id), "hash" VARCHAR(128) ); CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id"); -CREATE TABLE IF NOT EXISTS "contacts" ( +CREATE TABLE "contacts" ( "id" INTEGER PRIMARY KEY, "user_id_a" INTEGER REFERENCES users (id) NOT NULL, "user_id_b" INTEGER REFERENCES users (id) NOT NULL, @@ -34,8 +34,52 @@ CREATE TABLE IF NOT EXISTS "contacts" ( CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b"); CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b"); -CREATE TABLE IF NOT EXISTS "projects" ( +CREATE TABLE "rooms" ( "id" INTEGER PRIMARY KEY, - "host_user_id" INTEGER REFERENCES users (id) NOT NULL, - "unregistered" BOOLEAN NOT NULL DEFAULT false + "version" INTEGER NOT NULL, + "live_kit_room" VARCHAR NOT NULL ); + +CREATE TABLE "projects" ( + "id" INTEGER PRIMARY KEY, + "room_id" INTEGER REFERENCES rooms (id), + "host_user_id" INTEGER REFERENCES users (id) NOT NULL +); + +CREATE TABLE "project_collaborators" ( + "id" INTEGER PRIMARY KEY, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "connection_id" INTEGER NOT NULL, + "user_id" INTEGER NOT NULL, + "replica_id" INTEGER NOT NULL, + "is_host" BOOLEAN NOT NULL +); +CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); + +CREATE TABLE "worktrees" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "root_name" VARCHAR NOT NULL, + PRIMARY KEY(project_id, id) +); +CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); + +CREATE TABLE "room_participants" ( + "id" INTEGER PRIMARY KEY, + "room_id" INTEGER NOT NULL REFERENCES rooms (id), + "user_id" INTEGER NOT NULL REFERENCES users (id), + "connection_id" INTEGER, + "location_kind" INTEGER, + "location_project_id" INTEGER REFERENCES projects (id) +); +CREATE UNIQUE INDEX "index_room_participants_on_user_id_and_room_id" ON "room_participants" ("user_id", "room_id"); + +CREATE TABLE "calls" ( + "id" INTEGER PRIMARY KEY, + "room_id" INTEGER NOT NULL REFERENCES rooms (id), + "calling_user_id" INTEGER NOT NULL REFERENCES users (id), + "called_user_id" INTEGER NOT NULL REFERENCES users (id), + "answering_connection_id" INTEGER, + "initial_project_id" INTEGER REFERENCES projects (id) +); +CREATE UNIQUE INDEX "index_calls_on_calling_user_id" ON "calls" ("calling_user_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql new file mode 100644 index 0000000000..9474beff42 --- /dev/null +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -0,0 +1,47 @@ +CREATE TABLE IF NOT EXISTS "rooms" ( + "id" SERIAL PRIMARY KEY, + "version" INTEGER NOT NULL, + "live_kit_room" VARCHAR NOT NULL +); + +ALTER TABLE "projects" + ADD "room_id" INTEGER REFERENCES rooms (id), + DROP COLUMN "unregistered"; + +CREATE TABLE "project_collaborators" ( + "id" SERIAL PRIMARY KEY, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "connection_id" INTEGER NOT NULL, + "user_id" INTEGER NOT NULL, + "replica_id" INTEGER NOT NULL, + "is_host" BOOLEAN NOT NULL +); +CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); + +CREATE TABLE IF NOT EXISTS "worktrees" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "root_name" VARCHAR NOT NULL, + PRIMARY KEY(project_id, id) +); +CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); + +CREATE TABLE IF NOT EXISTS "room_participants" ( + "id" SERIAL PRIMARY KEY, + "room_id" INTEGER NOT NULL REFERENCES rooms (id), + "user_id" INTEGER NOT NULL REFERENCES users (id), + "connection_id" INTEGER, + "location_kind" INTEGER, + "location_project_id" INTEGER REFERENCES projects (id) +); +CREATE UNIQUE INDEX "index_room_participants_on_user_id_and_room_id" ON "room_participants" ("user_id", "room_id"); + +CREATE TABLE IF NOT EXISTS "calls" ( + "id" SERIAL PRIMARY KEY, + "room_id" INTEGER NOT NULL REFERENCES rooms (id), + "calling_user_id" INTEGER NOT NULL REFERENCES users (id), + "called_user_id" INTEGER NOT NULL REFERENCES users (id), + "answering_connection_id" INTEGER, + "initial_project_id" INTEGER REFERENCES projects (id) +); +CREATE UNIQUE INDEX "index_calls_on_calling_user_id" ON "calls" ("calling_user_id"); From 28aa1567ce8d814a9a3ffbcd1b566a1b343907d4 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 7 Nov 2022 15:40:02 +0100 Subject: [PATCH 002/240] Include `sender_user_id` when handling a server message/request --- crates/collab/src/rpc.rs | 465 +++++++++++++++++++++++---------------- 1 file changed, 276 insertions(+), 189 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 7bc2b43b9b..757c765838 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -68,8 +68,15 @@ lazy_static! { .unwrap(); } -type MessageHandler = - Box, Box) -> BoxFuture<'static, ()>>; +type MessageHandler = Box< + dyn Send + Sync + Fn(Arc, UserId, Box) -> BoxFuture<'static, ()>, +>; + +struct Message { + sender_user_id: UserId, + sender_connection_id: ConnectionId, + payload: T, +} struct Response { server: Arc, @@ -193,15 +200,15 @@ impl Server { Arc::new(server) } - fn add_message_handler(&mut self, handler: F) -> &mut Self + fn add_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, + F: 'static + Send + Sync + Fn(Arc, UserId, TypedEnvelope) -> Fut, Fut: 'static + Send + Future>, M: EnvelopedMessage, { let prev_handler = self.handlers.insert( TypeId::of::(), - Box::new(move |server, envelope| { + Box::new(move |server, sender_user_id, envelope| { let envelope = envelope.into_any().downcast::>().unwrap(); let span = info_span!( "handle message", @@ -213,7 +220,7 @@ impl Server { "message received" ); }); - let future = (handler)(server, *envelope); + let future = (handler)(server, sender_user_id, *envelope); async move { if let Err(error) = future.await { tracing::error!(%error, "error handling message"); @@ -229,26 +236,50 @@ impl Server { self } + fn add_message_handler(&mut self, handler: F) -> &mut Self + where + F: 'static + Send + Sync + Fn(Arc, Message) -> Fut, + Fut: 'static + Send + Future>, + M: EnvelopedMessage, + { + self.add_handler(move |server, sender_user_id, envelope| { + handler( + server, + Message { + sender_user_id, + sender_connection_id: envelope.sender_id, + payload: envelope.payload, + }, + ) + }); + self + } + /// Handle a request while holding a lock to the store. This is useful when we're registering /// a connection but we want to respond on the connection before anybody else can send on it. fn add_request_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, TypedEnvelope, Response) -> Fut, + F: 'static + Send + Sync + Fn(Arc, Message, Response) -> Fut, Fut: Send + Future>, M: RequestMessage, { let handler = Arc::new(handler); - self.add_message_handler(move |server, envelope| { + self.add_handler(move |server, sender_user_id, envelope| { let receipt = envelope.receipt(); let handler = handler.clone(); async move { + let request = Message { + sender_user_id, + sender_connection_id: envelope.sender_id, + payload: envelope.payload, + }; let responded = Arc::new(AtomicBool::default()); let response = Response { server: server.clone(), responded: responded.clone(), - receipt: envelope.receipt(), + receipt, }; - match (handler)(server.clone(), envelope, response).await { + match (handler)(server.clone(), request, response).await { Ok(()) => { if responded.load(std::sync::atomic::Ordering::SeqCst) { Ok(()) @@ -361,7 +392,7 @@ impl Server { let span_enter = span.enter(); if let Some(handler) = this.handlers.get(&message.payload_type_id()) { let is_background = message.is_background(); - let handle_message = (handler)(this.clone(), message); + let handle_message = (handler)(this.clone(), user_id, message); drop(span_enter); let handle_message = handle_message.instrument(span); @@ -516,7 +547,7 @@ impl Server { async fn ping( self: Arc, - _: TypedEnvelope, + _: Message, response: Response, ) -> Result<()> { response.send(proto::Ack {})?; @@ -525,15 +556,13 @@ impl Server { async fn create_room( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let user_id; let room; { let mut store = self.store().await; - user_id = store.user_id_for_connection(request.sender_id)?; - room = store.create_room(request.sender_id)?.clone(); + room = store.create_room(request.sender_connection_id)?.clone(); } let live_kit_connection_info = @@ -544,7 +573,10 @@ impl Server { .trace_err() { if let Some(token) = live_kit - .room_token(&room.live_kit_room, &request.sender_id.to_string()) + .room_token( + &room.live_kit_room, + &request.sender_connection_id.to_string(), + ) .trace_err() { Some(proto::LiveKitConnectionInfo { @@ -565,21 +597,19 @@ impl Server { room: Some(room), live_kit_connection_info, })?; - self.update_user_contacts(user_id).await?; + self.update_user_contacts(request.sender_user_id).await?; Ok(()) } async fn join_room( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let user_id; { let mut store = self.store().await; - user_id = store.user_id_for_connection(request.sender_id)?; let (room, recipient_connection_ids) = - store.join_room(request.payload.id, request.sender_id)?; + store.join_room(request.payload.id, request.sender_connection_id)?; for recipient_id in recipient_connection_ids { self.peer .send(recipient_id, proto::CallCanceled {}) @@ -589,7 +619,10 @@ impl Server { let live_kit_connection_info = if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { if let Some(token) = live_kit - .room_token(&room.live_kit_room, &request.sender_id.to_string()) + .room_token( + &room.live_kit_room, + &request.sender_connection_id.to_string(), + ) .trace_err() { Some(proto::LiveKitConnectionInfo { @@ -609,18 +642,17 @@ impl Server { })?; self.room_updated(room); } - self.update_user_contacts(user_id).await?; + self.update_user_contacts(request.sender_user_id).await?; Ok(()) } - async fn leave_room(self: Arc, message: TypedEnvelope) -> Result<()> { + async fn leave_room(self: Arc, message: Message) -> Result<()> { let mut contacts_to_update = HashSet::default(); let room_left; { let mut store = self.store().await; - let user_id = store.user_id_for_connection(message.sender_id)?; - let left_room = store.leave_room(message.payload.id, message.sender_id)?; - contacts_to_update.insert(user_id); + let left_room = store.leave_room(message.payload.id, message.sender_connection_id)?; + contacts_to_update.insert(message.sender_user_id); for project in left_room.unshared_projects { for connection_id in project.connection_ids() { @@ -640,13 +672,13 @@ impl Server { connection_id, proto::RemoveProjectCollaborator { project_id: project.id.to_proto(), - peer_id: message.sender_id.0, + peer_id: message.sender_connection_id.0, }, )?; } self.peer.send( - message.sender_id, + message.sender_connection_id, proto::UnshareProject { project_id: project.id.to_proto(), }, @@ -655,7 +687,7 @@ impl Server { } self.room_updated(&left_room.room); - room_left = self.room_left(&left_room.room, message.sender_id); + room_left = self.room_left(&left_room.room, message.sender_connection_id); for connection_id in left_room.canceled_call_connection_ids { self.peer @@ -675,13 +707,10 @@ impl Server { async fn call( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let caller_user_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; + let caller_user_id = request.sender_user_id; let recipient_user_id = UserId::from_proto(request.payload.recipient_user_id); let initial_project_id = request .payload @@ -703,7 +732,7 @@ impl Server { room_id, recipient_user_id, initial_project_id, - request.sender_id, + request.sender_connection_id, )?; self.room_updated(room); recipient_connection_ids @@ -740,7 +769,7 @@ impl Server { async fn cancel_call( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let recipient_user_id = UserId::from_proto(request.payload.recipient_user_id); @@ -749,7 +778,7 @@ impl Server { let (room, recipient_connection_ids) = store.cancel_call( request.payload.room_id, recipient_user_id, - request.sender_id, + request.sender_connection_id, )?; for recipient_id in recipient_connection_ids { self.peer @@ -763,16 +792,12 @@ impl Server { Ok(()) } - async fn decline_call( - self: Arc, - message: TypedEnvelope, - ) -> Result<()> { - let recipient_user_id; + async fn decline_call(self: Arc, message: Message) -> Result<()> { + let recipient_user_id = message.sender_user_id; { let mut store = self.store().await; - recipient_user_id = store.user_id_for_connection(message.sender_id)?; let (room, recipient_connection_ids) = - store.decline_call(message.payload.room_id, message.sender_id)?; + store.decline_call(message.payload.room_id, message.sender_connection_id)?; for recipient_id in recipient_connection_ids { self.peer .send(recipient_id, proto::CallCanceled {}) @@ -786,7 +811,7 @@ impl Server { async fn update_participant_location( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let room_id = request.payload.room_id; @@ -795,7 +820,8 @@ impl Server { .location .ok_or_else(|| anyhow!("invalid location"))?; let mut store = self.store().await; - let room = store.update_participant_location(room_id, location, request.sender_id)?; + let room = + store.update_participant_location(room_id, location, request.sender_connection_id)?; self.room_updated(room); response.send(proto::Ack {})?; Ok(()) @@ -839,20 +865,20 @@ impl Server { async fn share_project( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let user_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; - let project_id = self.app_state.db.register_project(user_id).await?; + let project_id = self + .app_state + .db + .register_project(request.sender_user_id) + .await?; let mut store = self.store().await; let room = store.share_project( request.payload.room_id, project_id, request.payload.worktrees, - request.sender_id, + request.sender_connection_id, )?; response.send(proto::ShareProjectResponse { project_id: project_id.to_proto(), @@ -864,13 +890,13 @@ impl Server { async fn unshare_project( self: Arc, - message: TypedEnvelope, + message: Message, ) -> Result<()> { let project_id = ProjectId::from_proto(message.payload.project_id); let mut store = self.store().await; - let (room, project) = store.unshare_project(project_id, message.sender_id)?; + let (room, project) = store.unshare_project(project_id, message.sender_connection_id)?; broadcast( - message.sender_id, + message.sender_connection_id, project.guest_connection_ids(), |conn_id| self.peer.send(conn_id, message.payload.clone()), ); @@ -911,26 +937,24 @@ impl Server { async fn join_project( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); - + let guest_user_id = request.sender_user_id; let host_user_id; - let guest_user_id; let host_connection_id; { let state = self.store().await; let project = state.project(project_id)?; host_user_id = project.host.user_id; host_connection_id = project.host_connection_id; - guest_user_id = state.user_id_for_connection(request.sender_id)?; }; tracing::info!(%project_id, %host_user_id, %host_connection_id, "join project"); let mut store = self.store().await; - let (project, replica_id) = store.join_project(request.sender_id, project_id)?; + let (project, replica_id) = store.join_project(request.sender_connection_id, project_id)?; let peer_count = project.guests.len(); let mut collaborators = Vec::with_capacity(peer_count); collaborators.push(proto::Collaborator { @@ -951,7 +975,7 @@ impl Server { // Add all guests other than the requesting user's own connections as collaborators for (guest_conn_id, guest) in &project.guests { - if request.sender_id != *guest_conn_id { + if request.sender_connection_id != *guest_conn_id { collaborators.push(proto::Collaborator { peer_id: guest_conn_id.0, replica_id: guest.replica_id as u32, @@ -961,14 +985,14 @@ impl Server { } for conn_id in project.connection_ids() { - if conn_id != request.sender_id { + if conn_id != request.sender_connection_id { self.peer .send( conn_id, proto::AddProjectCollaborator { project_id: project_id.to_proto(), collaborator: Some(proto::Collaborator { - peer_id: request.sender_id.0, + peer_id: request.sender_connection_id.0, replica_id: replica_id as u32, user_id: guest_user_id.to_proto(), }), @@ -1004,13 +1028,14 @@ impl Server { is_last_update: worktree.is_complete, }; for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { - self.peer.send(request.sender_id, update.clone())?; + self.peer + .send(request.sender_connection_id, update.clone())?; } // Stream this worktree's diagnostics. for summary in worktree.diagnostic_summaries.values() { self.peer.send( - request.sender_id, + request.sender_connection_id, proto::UpdateDiagnosticSummary { project_id: project_id.to_proto(), worktree_id: *worktree_id, @@ -1022,7 +1047,7 @@ impl Server { for language_server in &project.language_servers { self.peer.send( - request.sender_id, + request.sender_connection_id, proto::UpdateLanguageServer { project_id: project_id.to_proto(), language_server_id: language_server.id, @@ -1038,11 +1063,8 @@ impl Server { Ok(()) } - async fn leave_project( - self: Arc, - request: TypedEnvelope, - ) -> Result<()> { - let sender_id = request.sender_id; + async fn leave_project(self: Arc, request: Message) -> Result<()> { + let sender_id = request.sender_connection_id; let project_id = ProjectId::from_proto(request.payload.project_id); let project; { @@ -1073,20 +1095,30 @@ impl Server { async fn update_project( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); { let mut state = self.store().await; let guest_connection_ids = state - .read_project(project_id, request.sender_id)? + .read_project(project_id, request.sender_connection_id)? .guest_connection_ids(); - let room = - state.update_project(project_id, &request.payload.worktrees, request.sender_id)?; - broadcast(request.sender_id, guest_connection_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + let room = state.update_project( + project_id, + &request.payload.worktrees, + request.sender_connection_id, + )?; + broadcast( + request.sender_connection_id, + guest_connection_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); self.room_updated(room); }; @@ -1095,13 +1127,13 @@ impl Server { async fn update_worktree( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let worktree_id = request.payload.worktree_id; let connection_ids = self.store().await.update_worktree( - request.sender_id, + request.sender_connection_id, project_id, worktree_id, &request.payload.root_name, @@ -1111,17 +1143,24 @@ impl Server { request.payload.is_last_update, )?; - broadcast(request.sender_id, connection_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + connection_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); response.send(proto::Ack {})?; Ok(()) } async fn update_diagnostic_summary( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let summary = request .payload @@ -1131,55 +1170,76 @@ impl Server { let receiver_ids = self.store().await.update_diagnostic_summary( ProjectId::from_proto(request.payload.project_id), request.payload.worktree_id, - request.sender_id, + request.sender_connection_id, summary, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn start_language_server( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let receiver_ids = self.store().await.start_language_server( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, request .payload .server .clone() .ok_or_else(|| anyhow!("invalid language server"))?, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn update_language_server( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let receiver_ids = self.store().await.project_connection_ids( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn forward_project_request( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> where @@ -1189,17 +1249,21 @@ impl Server { let host_connection_id = self .store() .await - .read_project(project_id, request.sender_id)? + .read_project(project_id, request.sender_connection_id)? .host_connection_id; let payload = self .peer - .forward_request(request.sender_id, host_connection_id, request.payload) + .forward_request( + request.sender_connection_id, + host_connection_id, + request.payload, + ) .await?; // Ensure project still exists by the time we get the response from the host. self.store() .await - .read_project(project_id, request.sender_id)?; + .read_project(project_id, request.sender_connection_id)?; response.send(payload)?; Ok(()) @@ -1207,26 +1271,26 @@ impl Server { async fn save_buffer( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let host = self .store() .await - .read_project(project_id, request.sender_id)? + .read_project(project_id, request.sender_connection_id)? .host_connection_id; let response_payload = self .peer - .forward_request(request.sender_id, host, request.payload.clone()) + .forward_request(request.sender_connection_id, host, request.payload.clone()) .await?; let mut guests = self .store() .await - .read_project(project_id, request.sender_id)? + .read_project(project_id, request.sender_connection_id)? .connection_ids(); - guests.retain(|guest_connection_id| *guest_connection_id != request.sender_id); + guests.retain(|guest_connection_id| *guest_connection_id != request.sender_connection_id); broadcast(host, guests, |conn_id| { self.peer .forward_send(host, conn_id, response_payload.clone()) @@ -1237,10 +1301,10 @@ impl Server { async fn create_buffer_for_peer( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { self.peer.forward_send( - request.sender_id, + request.sender_connection_id, ConnectionId(request.payload.peer_id), request.payload, )?; @@ -1249,76 +1313,101 @@ impl Server { async fn update_buffer( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let receiver_ids = { let store = self.store().await; - store.project_connection_ids(project_id, request.sender_id)? + store.project_connection_ids(project_id, request.sender_connection_id)? }; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); response.send(proto::Ack {})?; Ok(()) } async fn update_buffer_file( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let receiver_ids = self.store().await.project_connection_ids( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn buffer_reloaded( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let receiver_ids = self.store().await.project_connection_ids( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } - async fn buffer_saved( - self: Arc, - request: TypedEnvelope, - ) -> Result<()> { + async fn buffer_saved(self: Arc, request: Message) -> Result<()> { let receiver_ids = self.store().await.project_connection_ids( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn follow( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let leader_id = ConnectionId(request.payload.leader_id); - let follower_id = request.sender_id; + let follower_id = request.sender_connection_id; { let store = self.store().await; if !store @@ -1331,7 +1420,7 @@ impl Server { let mut response_payload = self .peer - .forward_request(request.sender_id, leader_id, request.payload) + .forward_request(request.sender_connection_id, leader_id, request.payload) .await?; response_payload .views @@ -1340,28 +1429,29 @@ impl Server { Ok(()) } - async fn unfollow(self: Arc, request: TypedEnvelope) -> Result<()> { + async fn unfollow(self: Arc, request: Message) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let leader_id = ConnectionId(request.payload.leader_id); let store = self.store().await; if !store - .project_connection_ids(project_id, request.sender_id)? + .project_connection_ids(project_id, request.sender_connection_id)? .contains(&leader_id) { Err(anyhow!("no such peer"))?; } self.peer - .forward_send(request.sender_id, leader_id, request.payload)?; + .forward_send(request.sender_connection_id, leader_id, request.payload)?; Ok(()) } async fn update_followers( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let store = self.store().await; - let connection_ids = store.project_connection_ids(project_id, request.sender_id)?; + let connection_ids = + store.project_connection_ids(project_id, request.sender_connection_id)?; let leader_id = request .payload .variant @@ -1374,8 +1464,11 @@ impl Server { for follower_id in &request.payload.follower_ids { let follower_id = ConnectionId(*follower_id); if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { - self.peer - .forward_send(request.sender_id, follower_id, request.payload.clone())?; + self.peer.forward_send( + request.sender_connection_id, + follower_id, + request.payload.clone(), + )?; } } Ok(()) @@ -1383,7 +1476,7 @@ impl Server { async fn get_users( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { let user_ids = request @@ -1410,13 +1503,9 @@ impl Server { async fn fuzzy_search_users( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let user_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; let query = request.payload.query; let db = &self.app_state.db; let users = match query.len() { @@ -1430,7 +1519,7 @@ impl Server { }; let users = users .into_iter() - .filter(|user| user.id != user_id) + .filter(|user| user.id != request.sender_user_id) .map(|user| proto::User { id: user.id.to_proto(), avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), @@ -1443,13 +1532,10 @@ impl Server { async fn request_contact( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let requester_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; + let requester_id = request.sender_user_id; let responder_id = UserId::from_proto(request.payload.responder_id); if requester_id == responder_id { return Err(anyhow!("cannot add yourself as a contact"))?; @@ -1485,13 +1571,10 @@ impl Server { async fn respond_to_contact_request( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let responder_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; + let responder_id = request.sender_user_id; let requester_id = UserId::from_proto(request.payload.requester_id); if request.payload.response == proto::ContactRequestResponse::Dismiss as i32 { self.app_state @@ -1541,13 +1624,10 @@ impl Server { async fn remove_contact( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let requester_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; + let requester_id = request.sender_user_id; let responder_id = UserId::from_proto(request.payload.user_id); self.app_state .db @@ -1578,33 +1658,40 @@ impl Server { async fn update_diff_base( self: Arc, - request: TypedEnvelope, + request: Message, ) -> Result<()> { let receiver_ids = self.store().await.project_connection_ids( ProjectId::from_proto(request.payload.project_id), - request.sender_id, + request.sender_connection_id, )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); + broadcast( + request.sender_connection_id, + receiver_ids, + |connection_id| { + self.peer.forward_send( + request.sender_connection_id, + connection_id, + request.payload.clone(), + ) + }, + ); Ok(()) } async fn get_private_user_info( self: Arc, - request: TypedEnvelope, + request: Message, response: Response, ) -> Result<()> { - let user_id = self - .store() - .await - .user_id_for_connection(request.sender_id)?; - let metrics_id = self.app_state.db.get_user_metrics_id(user_id).await?; + let metrics_id = self + .app_state + .db + .get_user_metrics_id(request.sender_user_id) + .await?; let user = self .app_state .db - .get_user_by_id(user_id) + .get_user_by_id(request.sender_user_id) .await? .ok_or_else(|| anyhow!("user not found"))?; response.send(proto::GetPrivateUserInfoResponse { From 6871bbbc718d8d60951712f03462ce9c69d20c4a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 12:06:43 +0100 Subject: [PATCH 003/240] Start moving `Store` state into the database --- crates/call/src/call.rs | 20 +- crates/call/src/room.rs | 8 +- .../20221109000000_test_schema.sql | 2 +- .../20221111092550_reconnection_support.sql | 2 +- crates/collab/src/db.rs | 354 +++++++++++++++++- crates/collab/src/integration_tests.rs | 14 +- crates/collab/src/rpc.rs | 115 +++--- crates/collab/src/rpc/store.rs | 248 +----------- .../src/incoming_call_notification.rs | 6 +- crates/rpc/proto/zed.proto | 13 +- crates/rpc/src/rpc.rs | 2 +- 11 files changed, 447 insertions(+), 337 deletions(-) diff --git a/crates/call/src/call.rs b/crates/call/src/call.rs index 6b72eb61da..803fbb906a 100644 --- a/crates/call/src/call.rs +++ b/crates/call/src/call.rs @@ -22,7 +22,7 @@ pub fn init(client: Arc, user_store: ModelHandle, cx: &mut Mu #[derive(Clone)] pub struct IncomingCall { pub room_id: u64, - pub caller: Arc, + pub calling_user: Arc, pub participants: Vec>, pub initial_project: Option, } @@ -78,9 +78,9 @@ impl ActiveCall { user_store.get_users(envelope.payload.participant_user_ids, cx) }) .await?, - caller: user_store + calling_user: user_store .update(&mut cx, |user_store, cx| { - user_store.get_user(envelope.payload.caller_user_id, cx) + user_store.get_user(envelope.payload.calling_user_id, cx) }) .await?, initial_project: envelope.payload.initial_project, @@ -110,13 +110,13 @@ impl ActiveCall { pub fn invite( &mut self, - recipient_user_id: u64, + called_user_id: u64, initial_project: Option>, cx: &mut ModelContext, ) -> Task> { let client = self.client.clone(); let user_store = self.user_store.clone(); - if !self.pending_invites.insert(recipient_user_id) { + if !self.pending_invites.insert(called_user_id) { return Task::ready(Err(anyhow!("user was already invited"))); } @@ -136,13 +136,13 @@ impl ActiveCall { }; room.update(&mut cx, |room, cx| { - room.call(recipient_user_id, initial_project_id, cx) + room.call(called_user_id, initial_project_id, cx) }) .await?; } else { let room = cx .update(|cx| { - Room::create(recipient_user_id, initial_project, client, user_store, cx) + Room::create(called_user_id, initial_project, client, user_store, cx) }) .await?; @@ -155,7 +155,7 @@ impl ActiveCall { let result = invite.await; this.update(&mut cx, |this, cx| { - this.pending_invites.remove(&recipient_user_id); + this.pending_invites.remove(&called_user_id); cx.notify(); }); result @@ -164,7 +164,7 @@ impl ActiveCall { pub fn cancel_invite( &mut self, - recipient_user_id: u64, + called_user_id: u64, cx: &mut ModelContext, ) -> Task> { let room_id = if let Some(room) = self.room() { @@ -178,7 +178,7 @@ impl ActiveCall { client .request(proto::CancelCall { room_id, - recipient_user_id, + called_user_id, }) .await?; anyhow::Ok(()) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 7d5153950d..3e55dc4ce9 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -149,7 +149,7 @@ impl Room { } pub(crate) fn create( - recipient_user_id: u64, + called_user_id: u64, initial_project: Option>, client: Arc, user_store: ModelHandle, @@ -182,7 +182,7 @@ impl Room { match room .update(&mut cx, |room, cx| { room.leave_when_empty = true; - room.call(recipient_user_id, initial_project_id, cx) + room.call(called_user_id, initial_project_id, cx) }) .await { @@ -487,7 +487,7 @@ impl Room { pub(crate) fn call( &mut self, - recipient_user_id: u64, + called_user_id: u64, initial_project_id: Option, cx: &mut ModelContext, ) -> Task> { @@ -503,7 +503,7 @@ impl Room { let result = client .request(proto::Call { room_id, - recipient_user_id, + called_user_id, initial_project_id, }) .await; diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 731910027e..9302657523 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -82,4 +82,4 @@ CREATE TABLE "calls" ( "answering_connection_id" INTEGER, "initial_project_id" INTEGER REFERENCES projects (id) ); -CREATE UNIQUE INDEX "index_calls_on_calling_user_id" ON "calls" ("calling_user_id"); +CREATE UNIQUE INDEX "index_calls_on_called_user_id" ON "calls" ("called_user_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 9474beff42..8f932acff3 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -44,4 +44,4 @@ CREATE TABLE IF NOT EXISTS "calls" ( "answering_connection_id" INTEGER, "initial_project_id" INTEGER REFERENCES projects (id) ); -CREATE UNIQUE INDEX "index_calls_on_calling_user_id" ON "calls" ("calling_user_id"); +CREATE UNIQUE INDEX "index_calls_on_called_user_id" ON "calls" ("called_user_id"); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 10da609d57..b7d6f995b0 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -3,6 +3,7 @@ use anyhow::anyhow; use axum::http::StatusCode; use collections::HashMap; use futures::StreamExt; +use rpc::{proto, ConnectionId}; use serde::{Deserialize, Serialize}; use sqlx::{ migrate::{Migrate as _, Migration, MigrationSource}, @@ -565,6 +566,7 @@ where for<'a> i64: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, for<'a> bool: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, for<'a> Uuid: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, + for<'a> Option: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, for<'a> sqlx::types::JsonValue: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, for<'a> OffsetDateTime: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, for<'a> PrimitiveDateTime: sqlx::Decode<'a, D> + sqlx::Decode<'a, D>, @@ -882,42 +884,352 @@ where }) } - // projects - - /// Registers a new project for the given user. - pub async fn register_project(&self, host_user_id: UserId) -> Result { + pub async fn create_room( + &self, + user_id: UserId, + connection_id: ConnectionId, + ) -> Result { test_support!(self, { - Ok(sqlx::query_scalar( + let mut tx = self.pool.begin().await?; + let live_kit_room = nanoid::nanoid!(30); + let room_id = sqlx::query_scalar( " - INSERT INTO projects(host_user_id) - VALUES ($1) + INSERT INTO rooms (live_kit_room, version) + VALUES ($1, $2) RETURNING id ", ) - .bind(host_user_id) - .fetch_one(&self.pool) + .bind(&live_kit_room) + .bind(0) + .fetch_one(&mut tx) .await - .map(ProjectId)?) + .map(RoomId)?; + + sqlx::query( + " + INSERT INTO room_participants (room_id, user_id, connection_id) + VALUES ($1, $2, $3) + ", + ) + .bind(room_id) + .bind(user_id) + .bind(connection_id.0 as i32) + .execute(&mut tx) + .await?; + + sqlx::query( + " + INSERT INTO calls (room_id, calling_user_id, called_user_id, answering_connection_id) + VALUES ($1, $2, $3, $4) + ", + ) + .bind(room_id) + .bind(user_id) + .bind(user_id) + .bind(connection_id.0 as i32) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await }) } - /// Unregisters a project for the given project id. - pub async fn unregister_project(&self, project_id: ProjectId) -> Result<()> { + pub async fn call( + &self, + room_id: RoomId, + calling_user_id: UserId, + called_user_id: UserId, + initial_project_id: Option, + ) -> Result { test_support!(self, { + let mut tx = self.pool.begin().await?; sqlx::query( " - UPDATE projects - SET unregistered = TRUE - WHERE id = $1 + INSERT INTO calls (room_id, calling_user_id, called_user_id, initial_project_id) + VALUES ($1, $2, $3, $4) + ", + ) + .bind(room_id) + .bind(calling_user_id) + .bind(called_user_id) + .bind(initial_project_id) + .execute(&mut tx) + .await?; + + sqlx::query( + " + INSERT INTO room_participants (room_id, user_id) + VALUES ($1, $2) + ", + ) + .bind(room_id) + .bind(called_user_id) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + } + + pub async fn call_failed( + &self, + room_id: RoomId, + called_user_id: UserId, + ) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + sqlx::query( + " + DELETE FROM calls + WHERE room_id = $1 AND called_user_id = $2 + ", + ) + .bind(room_id) + .bind(called_user_id) + .execute(&mut tx) + .await?; + + sqlx::query( + " + DELETE FROM room_participants + WHERE room_id = $1 AND user_id = $2 + ", + ) + .bind(room_id) + .bind(called_user_id) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + } + + pub async fn update_room_participant_location( + &self, + room_id: RoomId, + user_id: UserId, + location: proto::ParticipantLocation, + ) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + + let location_kind; + let location_project_id; + match location + .variant + .ok_or_else(|| anyhow!("invalid location"))? + { + proto::participant_location::Variant::SharedProject(project) => { + location_kind = 0; + location_project_id = Some(ProjectId::from_proto(project.id)); + } + proto::participant_location::Variant::UnsharedProject(_) => { + location_kind = 1; + location_project_id = None; + } + proto::participant_location::Variant::External(_) => { + location_kind = 2; + location_project_id = None; + } + } + + sqlx::query( + " + UPDATE room_participants + SET location_kind = $1 AND location_project_id = $2 + WHERE room_id = $1 AND user_id = $2 + ", + ) + .bind(location_kind) + .bind(location_project_id) + .bind(room_id) + .bind(user_id) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + } + + async fn commit_room_transaction( + &self, + room_id: RoomId, + mut tx: sqlx::Transaction<'_, D>, + ) -> Result { + sqlx::query( + " + UPDATE rooms + SET version = version + 1 + WHERE id = $1 + ", + ) + .bind(room_id) + .execute(&mut tx) + .await?; + + let room: Room = sqlx::query_as( + " + SELECT * + FROM rooms + WHERE id = $1 + ", + ) + .bind(room_id) + .fetch_one(&mut tx) + .await?; + + let mut db_participants = + sqlx::query_as::<_, (UserId, Option, Option, Option)>( + " + SELECT user_id, connection_id, location_kind, location_project_id + FROM room_participants + WHERE room_id = $1 + ", + ) + .bind(room_id) + .fetch(&mut tx); + + let mut participants = Vec::new(); + let mut pending_participant_user_ids = Vec::new(); + while let Some(participant) = db_participants.next().await { + let (user_id, connection_id, _location_kind, _location_project_id) = participant?; + if let Some(connection_id) = connection_id { + participants.push(proto::Participant { + user_id: user_id.to_proto(), + peer_id: connection_id as u32, + projects: Default::default(), + location: Some(proto::ParticipantLocation { + variant: Some(proto::participant_location::Variant::External( + Default::default(), + )), + }), + }); + } else { + pending_participant_user_ids.push(user_id.to_proto()); + } + } + drop(db_participants); + + for participant in &mut participants { + let mut entries = sqlx::query_as::<_, (ProjectId, String)>( + " + SELECT projects.id, worktrees.root_name + FROM projects + LEFT JOIN worktrees ON projects.id = worktrees.project_id + WHERE room_id = $1 AND host_user_id = $2 + ", + ) + .bind(room_id) + .fetch(&mut tx); + + let mut projects = HashMap::default(); + while let Some(entry) = entries.next().await { + let (project_id, worktree_root_name) = entry?; + let participant_project = + projects + .entry(project_id) + .or_insert(proto::ParticipantProject { + id: project_id.to_proto(), + worktree_root_names: Default::default(), + }); + participant_project + .worktree_root_names + .push(worktree_root_name); + } + + participant.projects = projects.into_values().collect(); + } + + tx.commit().await?; + + Ok(proto::Room { + id: room.id.to_proto(), + version: room.version as u64, + live_kit_room: room.live_kit_room, + participants, + pending_participant_user_ids, + }) + } + + // projects + + pub async fn share_project( + &self, + user_id: UserId, + connection_id: ConnectionId, + room_id: RoomId, + worktrees: &[proto::WorktreeMetadata], + ) -> Result<(ProjectId, proto::Room)> { + test_support!(self, { + let mut tx = self.pool.begin().await?; + let project_id = sqlx::query_scalar( + " + INSERT INTO projects (host_user_id, room_id) + VALUES ($1) + RETURNING id + ", + ) + .bind(user_id) + .bind(room_id) + .fetch_one(&mut tx) + .await + .map(ProjectId)?; + + for worktree in worktrees { + sqlx::query( + " + INSERT INTO worktrees (id, project_id, root_name) + ", + ) + .bind(worktree.id as i32) + .bind(project_id) + .bind(&worktree.root_name) + .execute(&mut tx) + .await?; + } + + sqlx::query( + " + INSERT INTO project_collaborators ( + project_id, + connection_id, + user_id, + replica_id, + is_host + ) + VALUES ($1, $2, $3, $4, $5) ", ) .bind(project_id) - .execute(&self.pool) + .bind(connection_id.0 as i32) + .bind(user_id) + .bind(0) + .bind(true) + .execute(&mut tx) .await?; - Ok(()) + + let room = self.commit_room_transaction(room_id, tx).await?; + Ok((project_id, room)) }) } + pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { + todo!() + // test_support!(self, { + // sqlx::query( + // " + // UPDATE projects + // SET unregistered = TRUE + // WHERE id = $1 + // ", + // ) + // .bind(project_id) + // .execute(&self.pool) + // .await?; + // Ok(()) + // }) + } + // contacts pub async fn get_contacts(&self, user_id: UserId) -> Result> { @@ -1246,6 +1558,14 @@ pub struct User { pub connected_once: bool, } +id_type!(RoomId); +#[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] +pub struct Room { + pub id: RoomId, + pub version: i32, + pub live_kit_room: String, +} + id_type!(ProjectId); #[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] pub struct Project { diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 0a6c01a691..6d3cff1718 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -104,7 +104,7 @@ async fn test_basic_calls( // User B receives the call. let mut incoming_call_b = active_call_b.read_with(cx_b, |call, _| call.incoming()); let call_b = incoming_call_b.next().await.unwrap().unwrap(); - assert_eq!(call_b.caller.github_login, "user_a"); + assert_eq!(call_b.calling_user.github_login, "user_a"); // User B connects via another client and also receives a ring on the newly-connected client. let _client_b2 = server.create_client(cx_b2, "user_b").await; @@ -112,7 +112,7 @@ async fn test_basic_calls( let mut incoming_call_b2 = active_call_b2.read_with(cx_b2, |call, _| call.incoming()); deterministic.run_until_parked(); let call_b2 = incoming_call_b2.next().await.unwrap().unwrap(); - assert_eq!(call_b2.caller.github_login, "user_a"); + assert_eq!(call_b2.calling_user.github_login, "user_a"); // User B joins the room using the first client. active_call_b @@ -165,7 +165,7 @@ async fn test_basic_calls( // User C receives the call, but declines it. let call_c = incoming_call_c.next().await.unwrap().unwrap(); - assert_eq!(call_c.caller.github_login, "user_b"); + assert_eq!(call_c.calling_user.github_login, "user_b"); active_call_c.update(cx_c, |call, _| call.decline_incoming().unwrap()); assert!(incoming_call_c.next().await.unwrap().is_none()); @@ -308,7 +308,7 @@ async fn test_room_uniqueness( // User B receives the call from user A. let mut incoming_call_b = active_call_b.read_with(cx_b, |call, _| call.incoming()); let call_b1 = incoming_call_b.next().await.unwrap().unwrap(); - assert_eq!(call_b1.caller.github_login, "user_a"); + assert_eq!(call_b1.calling_user.github_login, "user_a"); // Ensure calling users A and B from client C fails. active_call_c @@ -367,7 +367,7 @@ async fn test_room_uniqueness( .unwrap(); deterministic.run_until_parked(); let call_b2 = incoming_call_b.next().await.unwrap().unwrap(); - assert_eq!(call_b2.caller.github_login, "user_c"); + assert_eq!(call_b2.calling_user.github_login, "user_c"); } #[gpui::test(iterations = 10)] @@ -695,7 +695,7 @@ async fn test_share_project( let incoming_call_b = active_call_b.read_with(cx_b, |call, _| call.incoming()); deterministic.run_until_parked(); let call = incoming_call_b.borrow().clone().unwrap(); - assert_eq!(call.caller.github_login, "user_a"); + assert_eq!(call.calling_user.github_login, "user_a"); let initial_project = call.initial_project.unwrap(); active_call_b .update(cx_b, |call, cx| call.accept_incoming(cx)) @@ -766,7 +766,7 @@ async fn test_share_project( let incoming_call_c = active_call_c.read_with(cx_c, |call, _| call.incoming()); deterministic.run_until_parked(); let call = incoming_call_c.borrow().clone().unwrap(); - assert_eq!(call.caller.github_login, "user_b"); + assert_eq!(call.calling_user.github_login, "user_b"); let initial_project = call.initial_project.unwrap(); active_call_c .update(cx_c, |call, cx| call.accept_incoming(cx)) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 757c765838..75ff703b1f 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2,7 +2,7 @@ mod store; use crate::{ auth, - db::{self, ProjectId, User, UserId}, + db::{self, ProjectId, RoomId, User, UserId}, AppState, Result, }; use anyhow::anyhow; @@ -486,7 +486,7 @@ impl Server { for project_id in projects_to_unshare { self.app_state .db - .unregister_project(project_id) + .unshare_project(project_id) .await .trace_err(); } @@ -559,11 +559,11 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let room; - { - let mut store = self.store().await; - room = store.create_room(request.sender_connection_id)?.clone(); - } + let room = self + .app_state + .db + .create_room(request.sender_user_id, request.sender_connection_id) + .await?; let live_kit_connection_info = if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { @@ -710,8 +710,9 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let caller_user_id = request.sender_user_id; - let recipient_user_id = UserId::from_proto(request.payload.recipient_user_id); + let room_id = RoomId::from_proto(request.payload.room_id); + let calling_user_id = request.sender_user_id; + let called_user_id = UserId::from_proto(request.payload.called_user_id); let initial_project_id = request .payload .initial_project_id @@ -719,31 +720,44 @@ impl Server { if !self .app_state .db - .has_contact(caller_user_id, recipient_user_id) + .has_contact(calling_user_id, called_user_id) .await? { return Err(anyhow!("cannot call a user who isn't a contact"))?; } - let room_id = request.payload.room_id; - let mut calls = { - let mut store = self.store().await; - let (room, recipient_connection_ids, incoming_call) = store.call( - room_id, - recipient_user_id, - initial_project_id, - request.sender_connection_id, - )?; - self.room_updated(room); - recipient_connection_ids - .into_iter() - .map(|recipient_connection_id| { - self.peer - .request(recipient_connection_id, incoming_call.clone()) - }) - .collect::>() + let room = self + .app_state + .db + .call(room_id, calling_user_id, called_user_id, initial_project_id) + .await?; + self.room_updated(&room); + self.update_user_contacts(called_user_id).await?; + + let incoming_call = proto::IncomingCall { + room_id: room_id.to_proto(), + calling_user_id: calling_user_id.to_proto(), + participant_user_ids: room + .participants + .iter() + .map(|participant| participant.user_id) + .collect(), + initial_project: room.participants.iter().find_map(|participant| { + let initial_project_id = initial_project_id?.to_proto(); + participant + .projects + .iter() + .find(|project| project.id == initial_project_id) + .cloned() + }), }; - self.update_user_contacts(recipient_user_id).await?; + + let mut calls = self + .store() + .await + .connection_ids_for_user(called_user_id) + .map(|connection_id| self.peer.request(connection_id, incoming_call.clone())) + .collect::>(); while let Some(call_response) = calls.next().await { match call_response.as_ref() { @@ -757,12 +771,13 @@ impl Server { } } - { - let mut store = self.store().await; - let room = store.call_failed(room_id, recipient_user_id)?; - self.room_updated(&room); - } - self.update_user_contacts(recipient_user_id).await?; + let room = self + .app_state + .db + .call_failed(room_id, called_user_id) + .await?; + self.room_updated(&room); + self.update_user_contacts(called_user_id).await?; Err(anyhow!("failed to ring call recipient"))? } @@ -772,7 +787,7 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let recipient_user_id = UserId::from_proto(request.payload.recipient_user_id); + let recipient_user_id = UserId::from_proto(request.payload.called_user_id); { let mut store = self.store().await; let (room, recipient_connection_ids) = store.cancel_call( @@ -814,15 +829,17 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let room_id = request.payload.room_id; + let room_id = RoomId::from_proto(request.payload.room_id); let location = request .payload .location .ok_or_else(|| anyhow!("invalid location"))?; - let mut store = self.store().await; - let room = - store.update_participant_location(room_id, location, request.sender_connection_id)?; - self.room_updated(room); + let room = self + .app_state + .db + .update_room_participant_location(room_id, request.sender_user_id, location) + .await?; + self.room_updated(&room); response.send(proto::Ack {})?; Ok(()) } @@ -868,22 +885,20 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let project_id = self + let (project_id, room) = self .app_state .db - .register_project(request.sender_user_id) + .share_project( + request.sender_user_id, + request.sender_connection_id, + RoomId::from_proto(request.payload.room_id), + &request.payload.worktrees, + ) .await?; - let mut store = self.store().await; - let room = store.share_project( - request.payload.room_id, - project_id, - request.payload.worktrees, - request.sender_connection_id, - )?; response.send(proto::ShareProjectResponse { project_id: project_id.to_proto(), })?; - self.room_updated(room); + self.room_updated(&room); Ok(()) } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 81ef594ccd..72da82ea8c 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -1,12 +1,10 @@ use crate::db::{self, ProjectId, UserId}; use anyhow::{anyhow, Result}; use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; -use nanoid::nanoid; use rpc::{proto, ConnectionId}; use serde::Serialize; use std::{borrow::Cow, mem, path::PathBuf, str}; use tracing::instrument; -use util::post_inc; pub type RoomId = u64; @@ -34,7 +32,7 @@ struct ConnectionState { #[derive(Copy, Clone, Eq, PartialEq, Serialize)] pub struct Call { - pub caller_user_id: UserId, + pub calling_user_id: UserId, pub room_id: RoomId, pub connection_id: Option, pub initial_project_id: Option, @@ -147,7 +145,7 @@ impl Store { let room = self.room(active_call.room_id)?; Some(proto::IncomingCall { room_id: active_call.room_id, - caller_user_id: active_call.caller_user_id.to_proto(), + calling_user_id: active_call.calling_user_id.to_proto(), participant_user_ids: room .participants .iter() @@ -285,47 +283,6 @@ impl Store { } } - pub fn create_room(&mut self, creator_connection_id: ConnectionId) -> Result<&proto::Room> { - let connection = self - .connections - .get_mut(&creator_connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let connected_user = self - .connected_users - .get_mut(&connection.user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - anyhow::ensure!( - connected_user.active_call.is_none(), - "can't create a room with an active call" - ); - - let room_id = post_inc(&mut self.next_room_id); - let room = proto::Room { - id: room_id, - participants: vec![proto::Participant { - user_id: connection.user_id.to_proto(), - peer_id: creator_connection_id.0, - projects: Default::default(), - location: Some(proto::ParticipantLocation { - variant: Some(proto::participant_location::Variant::External( - proto::participant_location::External {}, - )), - }), - }], - pending_participant_user_ids: Default::default(), - live_kit_room: nanoid!(30), - }; - - self.rooms.insert(room_id, room); - connected_user.active_call = Some(Call { - caller_user_id: connection.user_id, - room_id, - connection_id: Some(creator_connection_id), - initial_project_id: None, - }); - Ok(self.rooms.get(&room_id).unwrap()) - } - pub fn join_room( &mut self, room_id: RoomId, @@ -424,7 +381,7 @@ impl Store { .get_mut(&UserId::from_proto(*pending_participant_user_id)) { if let Some(call) = connected_user.active_call.as_ref() { - if call.caller_user_id == user_id { + if call.calling_user_id == user_id { connected_user.active_call.take(); canceled_call_connection_ids .extend(connected_user.connection_ids.iter().copied()); @@ -462,101 +419,10 @@ impl Store { &self.rooms } - pub fn call( - &mut self, - room_id: RoomId, - recipient_user_id: UserId, - initial_project_id: Option, - from_connection_id: ConnectionId, - ) -> Result<(&proto::Room, Vec, proto::IncomingCall)> { - let caller_user_id = self.user_id_for_connection(from_connection_id)?; - - let recipient_connection_ids = self - .connection_ids_for_user(recipient_user_id) - .collect::>(); - let mut recipient = self - .connected_users - .get_mut(&recipient_user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - anyhow::ensure!( - recipient.active_call.is_none(), - "recipient is already on another call" - ); - - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - anyhow::ensure!( - room.participants - .iter() - .any(|participant| participant.peer_id == from_connection_id.0), - "no such room" - ); - anyhow::ensure!( - room.pending_participant_user_ids - .iter() - .all(|user_id| UserId::from_proto(*user_id) != recipient_user_id), - "cannot call the same user more than once" - ); - room.pending_participant_user_ids - .push(recipient_user_id.to_proto()); - - if let Some(initial_project_id) = initial_project_id { - let project = self - .projects - .get(&initial_project_id) - .ok_or_else(|| anyhow!("no such project"))?; - anyhow::ensure!(project.room_id == room_id, "no such project"); - } - - recipient.active_call = Some(Call { - caller_user_id, - room_id, - connection_id: None, - initial_project_id, - }); - - Ok(( - room, - recipient_connection_ids, - proto::IncomingCall { - room_id, - caller_user_id: caller_user_id.to_proto(), - participant_user_ids: room - .participants - .iter() - .map(|participant| participant.user_id) - .collect(), - initial_project: initial_project_id - .and_then(|id| Self::build_participant_project(id, &self.projects)), - }, - )) - } - - pub fn call_failed(&mut self, room_id: RoomId, to_user_id: UserId) -> Result<&proto::Room> { - let mut recipient = self - .connected_users - .get_mut(&to_user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - anyhow::ensure!(recipient - .active_call - .map_or(false, |call| call.room_id == room_id - && call.connection_id.is_none())); - recipient.active_call = None; - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - room.pending_participant_user_ids - .retain(|user_id| UserId::from_proto(*user_id) != to_user_id); - Ok(room) - } - pub fn cancel_call( &mut self, room_id: RoomId, - recipient_user_id: UserId, + called_user_id: UserId, canceller_connection_id: ConnectionId, ) -> Result<(&proto::Room, HashSet)> { let canceller_user_id = self.user_id_for_connection(canceller_connection_id)?; @@ -566,7 +432,7 @@ impl Store { .ok_or_else(|| anyhow!("no such connection"))?; let recipient = self .connected_users - .get(&recipient_user_id) + .get(&called_user_id) .ok_or_else(|| anyhow!("no such connection"))?; let canceller_active_call = canceller .active_call @@ -595,9 +461,9 @@ impl Store { .get_mut(&room_id) .ok_or_else(|| anyhow!("no such room"))?; room.pending_participant_user_ids - .retain(|user_id| UserId::from_proto(*user_id) != recipient_user_id); + .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); - let recipient = self.connected_users.get_mut(&recipient_user_id).unwrap(); + let recipient = self.connected_users.get_mut(&called_user_id).unwrap(); recipient.active_call.take(); Ok((room, recipient.connection_ids.clone())) @@ -608,10 +474,10 @@ impl Store { room_id: RoomId, recipient_connection_id: ConnectionId, ) -> Result<(&proto::Room, Vec)> { - let recipient_user_id = self.user_id_for_connection(recipient_connection_id)?; + let called_user_id = self.user_id_for_connection(recipient_connection_id)?; let recipient = self .connected_users - .get_mut(&recipient_user_id) + .get_mut(&called_user_id) .ok_or_else(|| anyhow!("no such connection"))?; if let Some(active_call) = recipient.active_call { anyhow::ensure!(active_call.room_id == room_id, "no such room"); @@ -621,112 +487,20 @@ impl Store { ); recipient.active_call.take(); let recipient_connection_ids = self - .connection_ids_for_user(recipient_user_id) + .connection_ids_for_user(called_user_id) .collect::>(); let room = self .rooms .get_mut(&active_call.room_id) .ok_or_else(|| anyhow!("no such room"))?; room.pending_participant_user_ids - .retain(|user_id| UserId::from_proto(*user_id) != recipient_user_id); + .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); Ok((room, recipient_connection_ids)) } else { Err(anyhow!("user is not being called")) } } - pub fn update_participant_location( - &mut self, - room_id: RoomId, - location: proto::ParticipantLocation, - connection_id: ConnectionId, - ) -> Result<&proto::Room> { - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - if let Some(proto::participant_location::Variant::SharedProject(project)) = - location.variant.as_ref() - { - anyhow::ensure!( - room.participants - .iter() - .flat_map(|participant| &participant.projects) - .any(|participant_project| participant_project.id == project.id), - "no such project" - ); - } - - let participant = room - .participants - .iter_mut() - .find(|participant| participant.peer_id == connection_id.0) - .ok_or_else(|| anyhow!("no such room"))?; - participant.location = Some(location); - - Ok(room) - } - - pub fn share_project( - &mut self, - room_id: RoomId, - project_id: ProjectId, - worktrees: Vec, - host_connection_id: ConnectionId, - ) -> Result<&proto::Room> { - let connection = self - .connections - .get_mut(&host_connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - let participant = room - .participants - .iter_mut() - .find(|participant| participant.peer_id == host_connection_id.0) - .ok_or_else(|| anyhow!("no such room"))?; - - connection.projects.insert(project_id); - self.projects.insert( - project_id, - Project { - id: project_id, - room_id, - host_connection_id, - host: Collaborator { - user_id: connection.user_id, - replica_id: 0, - admin: connection.admin, - }, - guests: Default::default(), - active_replica_ids: Default::default(), - worktrees: worktrees - .into_iter() - .map(|worktree| { - ( - worktree.id, - Worktree { - root_name: worktree.root_name, - visible: worktree.visible, - ..Default::default() - }, - ) - }) - .collect(), - language_servers: Default::default(), - }, - ); - - participant - .projects - .extend(Self::build_participant_project(project_id, &self.projects)); - - Ok(room) - } - pub fn unshare_project( &mut self, project_id: ProjectId, diff --git a/crates/collab_ui/src/incoming_call_notification.rs b/crates/collab_ui/src/incoming_call_notification.rs index e5c4b27d7e..a51fb4891d 100644 --- a/crates/collab_ui/src/incoming_call_notification.rs +++ b/crates/collab_ui/src/incoming_call_notification.rs @@ -74,7 +74,7 @@ impl IncomingCallNotification { let active_call = ActiveCall::global(cx); if action.accept { let join = active_call.update(cx, |active_call, cx| active_call.accept_incoming(cx)); - let caller_user_id = self.call.caller.id; + let caller_user_id = self.call.calling_user.id; let initial_project_id = self.call.initial_project.as_ref().map(|project| project.id); cx.spawn_weak(|_, mut cx| async move { join.await?; @@ -105,7 +105,7 @@ impl IncomingCallNotification { .as_ref() .unwrap_or(&default_project); Flex::row() - .with_children(self.call.caller.avatar.clone().map(|avatar| { + .with_children(self.call.calling_user.avatar.clone().map(|avatar| { Image::new(avatar) .with_style(theme.caller_avatar) .aligned() @@ -115,7 +115,7 @@ impl IncomingCallNotification { Flex::column() .with_child( Label::new( - self.call.caller.github_login.clone(), + self.call.calling_user.github_login.clone(), theme.caller_username.text.clone(), ) .contained() diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index ded708370d..07e6fae3a8 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -164,9 +164,10 @@ message LeaveRoom { message Room { uint64 id = 1; - repeated Participant participants = 2; - repeated uint64 pending_participant_user_ids = 3; - string live_kit_room = 4; + uint64 version = 2; + repeated Participant participants = 3; + repeated uint64 pending_participant_user_ids = 4; + string live_kit_room = 5; } message Participant { @@ -199,13 +200,13 @@ message ParticipantLocation { message Call { uint64 room_id = 1; - uint64 recipient_user_id = 2; + uint64 called_user_id = 2; optional uint64 initial_project_id = 3; } message IncomingCall { uint64 room_id = 1; - uint64 caller_user_id = 2; + uint64 calling_user_id = 2; repeated uint64 participant_user_ids = 3; optional ParticipantProject initial_project = 4; } @@ -214,7 +215,7 @@ message CallCanceled {} message CancelCall { uint64 room_id = 1; - uint64 recipient_user_id = 2; + uint64 called_user_id = 2; } message DeclineCall { diff --git a/crates/rpc/src/rpc.rs b/crates/rpc/src/rpc.rs index b6aef64677..5ca5711d9c 100644 --- a/crates/rpc/src/rpc.rs +++ b/crates/rpc/src/rpc.rs @@ -6,4 +6,4 @@ pub use conn::Connection; pub use peer::*; mod macros; -pub const PROTOCOL_VERSION: u32 = 39; +pub const PROTOCOL_VERSION: u32 = 40; From 58947c5c7269ec5de2421cd018abe0d254626695 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 14:28:26 +0100 Subject: [PATCH 004/240] Move incoming calls into `Db` --- crates/collab/src/db.rs | 89 +++++++++++++++++++++++++++++++--- crates/collab/src/rpc.rs | 31 +++--------- crates/collab/src/rpc/store.rs | 48 +----------------- 3 files changed, 89 insertions(+), 79 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index b7d6f995b0..506606274d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -940,7 +940,7 @@ where calling_user_id: UserId, called_user_id: UserId, initial_project_id: Option, - ) -> Result { + ) -> Result<(proto::Room, proto::IncomingCall)> { test_support!(self, { let mut tx = self.pool.begin().await?; sqlx::query( @@ -967,10 +967,67 @@ where .execute(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await + let room = self.commit_room_transaction(room_id, tx).await?; + let incoming_call = + Self::build_incoming_call(&room, calling_user_id, initial_project_id); + Ok((room, incoming_call)) }) } + pub async fn incoming_call_for_user( + &self, + user_id: UserId, + ) -> Result> { + test_support!(self, { + let mut tx = self.pool.begin().await?; + let call = sqlx::query_as::<_, Call>( + " + SELECT * + FROM calls + WHERE called_user_id = $1 AND answering_connection_id IS NULL + ", + ) + .bind(user_id) + .fetch_optional(&mut tx) + .await?; + + if let Some(call) = call { + let room = self.get_room(call.room_id, &mut tx).await?; + Ok(Some(Self::build_incoming_call( + &room, + call.calling_user_id, + call.initial_project_id, + ))) + } else { + Ok(None) + } + }) + } + + fn build_incoming_call( + room: &proto::Room, + calling_user_id: UserId, + initial_project_id: Option, + ) -> proto::IncomingCall { + proto::IncomingCall { + room_id: room.id, + calling_user_id: calling_user_id.to_proto(), + participant_user_ids: room + .participants + .iter() + .map(|participant| participant.user_id) + .collect(), + initial_project: room.participants.iter().find_map(|participant| { + let initial_project_id = initial_project_id?.to_proto(); + participant + .projects + .iter() + .find(|project| project.id == initial_project_id) + .cloned() + }), + } + } + pub async fn call_failed( &self, room_id: RoomId, @@ -1066,7 +1123,17 @@ where .bind(room_id) .execute(&mut tx) .await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) + } + + async fn get_room( + &self, + room_id: RoomId, + tx: &mut sqlx::Transaction<'_, D>, + ) -> Result { let room: Room = sqlx::query_as( " SELECT * @@ -1075,7 +1142,7 @@ where ", ) .bind(room_id) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; let mut db_participants = @@ -1087,7 +1154,7 @@ where ", ) .bind(room_id) - .fetch(&mut tx); + .fetch(&mut *tx); let mut participants = Vec::new(); let mut pending_participant_user_ids = Vec::new(); @@ -1120,7 +1187,7 @@ where ", ) .bind(room_id) - .fetch(&mut tx); + .fetch(&mut *tx); let mut projects = HashMap::default(); while let Some(entry) = entries.next().await { @@ -1139,9 +1206,6 @@ where participant.projects = projects.into_values().collect(); } - - tx.commit().await?; - Ok(proto::Room { id: room.id.to_proto(), version: room.version as u64, @@ -1566,6 +1630,15 @@ pub struct Room { pub live_kit_room: String, } +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +pub struct Call { + pub room_id: RoomId, + pub calling_user_id: UserId, + pub called_user_id: UserId, + pub answering_connection_id: Option, + pub initial_project_id: Option, +} + id_type!(ProjectId); #[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] pub struct Project { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 75ff703b1f..64affdb825 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -346,11 +346,7 @@ impl Server { { let mut store = this.store().await; - let incoming_call = store.add_connection(connection_id, user_id, user.admin); - if let Some(incoming_call) = incoming_call { - this.peer.send(connection_id, incoming_call)?; - } - + store.add_connection(connection_id, user_id, user.admin); this.peer.send(connection_id, store.build_initial_contacts_update(contacts))?; if let Some((code, count)) = invite_code { @@ -360,6 +356,11 @@ impl Server { })?; } } + + if let Some(incoming_call) = this.app_state.db.incoming_call_for_user(user_id).await? { + this.peer.send(connection_id, incoming_call)?; + } + this.update_user_contacts(user_id).await?; let handle_io = handle_io.fuse(); @@ -726,7 +727,7 @@ impl Server { return Err(anyhow!("cannot call a user who isn't a contact"))?; } - let room = self + let (room, incoming_call) = self .app_state .db .call(room_id, calling_user_id, called_user_id, initial_project_id) @@ -734,24 +735,6 @@ impl Server { self.room_updated(&room); self.update_user_contacts(called_user_id).await?; - let incoming_call = proto::IncomingCall { - room_id: room_id.to_proto(), - calling_user_id: calling_user_id.to_proto(), - participant_user_ids: room - .participants - .iter() - .map(|participant| participant.user_id) - .collect(), - initial_project: room.participants.iter().find_map(|participant| { - let initial_project_id = initial_project_id?.to_proto(); - participant - .projects - .iter() - .find(|project| project.id == initial_project_id) - .cloned() - }), - }; - let mut calls = self .store() .await diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 72da82ea8c..f16910fac5 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -122,12 +122,7 @@ impl Store { } #[instrument(skip(self))] - pub fn add_connection( - &mut self, - connection_id: ConnectionId, - user_id: UserId, - admin: bool, - ) -> Option { + pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId, admin: bool) { self.connections.insert( connection_id, ConnectionState { @@ -138,27 +133,6 @@ impl Store { ); let connected_user = self.connected_users.entry(user_id).or_default(); connected_user.connection_ids.insert(connection_id); - if let Some(active_call) = connected_user.active_call { - if active_call.connection_id.is_some() { - None - } else { - let room = self.room(active_call.room_id)?; - Some(proto::IncomingCall { - room_id: active_call.room_id, - calling_user_id: active_call.calling_user_id.to_proto(), - participant_user_ids: room - .participants - .iter() - .map(|participant| participant.user_id) - .collect(), - initial_project: active_call - .initial_project_id - .and_then(|id| Self::build_participant_project(id, &self.projects)), - }) - } - } else { - None - } } #[instrument(skip(self))] @@ -411,10 +385,6 @@ impl Store { }) } - pub fn room(&self, room_id: RoomId) -> Option<&proto::Room> { - self.rooms.get(&room_id) - } - pub fn rooms(&self) -> &BTreeMap { &self.rooms } @@ -740,22 +710,6 @@ impl Store { Ok(connection_ids) } - fn build_participant_project( - project_id: ProjectId, - projects: &BTreeMap, - ) -> Option { - Some(proto::ParticipantProject { - id: project_id.to_proto(), - worktree_root_names: projects - .get(&project_id)? - .worktrees - .values() - .filter(|worktree| worktree.visible) - .map(|worktree| worktree.root_name.clone()) - .collect(), - }) - } - pub fn project_connection_ids( &self, project_id: ProjectId, From cc58607c3b0d23d5907008d0f8eb1e9cfc0a8bab Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 14:43:40 +0100 Subject: [PATCH 005/240] Move `Store::join_room` into `Db::join_room` --- crates/collab/src/db.rs | 85 ++++++++++++++++++++++++++++++++++ crates/collab/src/rpc.rs | 71 +++++++++++++++------------- crates/collab/src/rpc/store.rs | 51 -------------------- 3 files changed, 125 insertions(+), 82 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 506606274d..7cc0dc35fe 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1061,6 +1061,91 @@ where }) } + pub async fn join_room( + &self, + room_id: RoomId, + user_id: UserId, + connection_id: ConnectionId, + ) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + sqlx::query( + " + UPDATE calls + SET answering_connection_id = $1 + WHERE room_id = $2 AND called_user_id = $3 + RETURNING 1 + ", + ) + .bind(connection_id.0 as i32) + .bind(room_id) + .bind(user_id) + .fetch_one(&mut tx) + .await?; + + sqlx::query( + " + UPDATE room_participants + SET connection_id = $1 + WHERE room_id = $2 AND user_id = $3 + RETURNING 1 + ", + ) + .bind(connection_id.0 as i32) + .bind(room_id) + .bind(user_id) + .fetch_one(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + + // let connection = self + // .connections + // .get_mut(&connection_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // let user_id = connection.user_id; + // let recipient_connection_ids = self.connection_ids_for_user(user_id).collect::>(); + + // let connected_user = self + // .connected_users + // .get_mut(&user_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // let active_call = connected_user + // .active_call + // .as_mut() + // .ok_or_else(|| anyhow!("not being called"))?; + // anyhow::ensure!( + // active_call.room_id == room_id && active_call.connection_id.is_none(), + // "not being called on this room" + // ); + + // let room = self + // .rooms + // .get_mut(&room_id) + // .ok_or_else(|| anyhow!("no such room"))?; + // anyhow::ensure!( + // room.pending_participant_user_ids + // .contains(&user_id.to_proto()), + // anyhow!("no such room") + // ); + // room.pending_participant_user_ids + // .retain(|pending| *pending != user_id.to_proto()); + // room.participants.push(proto::Participant { + // user_id: user_id.to_proto(), + // peer_id: connection_id.0, + // projects: Default::default(), + // location: Some(proto::ParticipantLocation { + // variant: Some(proto::participant_location::Variant::External( + // proto::participant_location::External {}, + // )), + // }), + // }); + // active_call.connection_id = Some(connection_id); + + // Ok((room, recipient_connection_ids)) + } + pub async fn update_room_participant_location( &self, room_id: RoomId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 64affdb825..c7c222ee1c 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -607,42 +607,51 @@ impl Server { request: Message, response: Response, ) -> Result<()> { + let room = self + .app_state + .db + .join_room( + RoomId::from_proto(request.payload.id), + request.sender_user_id, + request.sender_connection_id, + ) + .await?; + for recipient_id in self + .store() + .await + .connection_ids_for_user(request.sender_user_id) { - let mut store = self.store().await; - let (room, recipient_connection_ids) = - store.join_room(request.payload.id, request.sender_connection_id)?; - for recipient_id in recipient_connection_ids { - self.peer - .send(recipient_id, proto::CallCanceled {}) - .trace_err(); - } + self.peer + .send(recipient_id, proto::CallCanceled {}) + .trace_err(); + } - let live_kit_connection_info = - if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { - if let Some(token) = live_kit - .room_token( - &room.live_kit_room, - &request.sender_connection_id.to_string(), - ) - .trace_err() - { - Some(proto::LiveKitConnectionInfo { - server_url: live_kit.url().into(), - token, - }) - } else { - None - } + let live_kit_connection_info = + if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { + if let Some(token) = live_kit + .room_token( + &room.live_kit_room, + &request.sender_connection_id.to_string(), + ) + .trace_err() + { + Some(proto::LiveKitConnectionInfo { + server_url: live_kit.url().into(), + token, + }) } else { None - }; + } + } else { + None + }; + + self.room_updated(&room); + response.send(proto::JoinRoomResponse { + room: Some(room), + live_kit_connection_info, + })?; - response.send(proto::JoinRoomResponse { - room: Some(room.clone()), - live_kit_connection_info, - })?; - self.room_updated(room); - } self.update_user_contacts(request.sender_user_id).await?; Ok(()) } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index f16910fac5..dfd534dbe9 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -257,57 +257,6 @@ impl Store { } } - pub fn join_room( - &mut self, - room_id: RoomId, - connection_id: ConnectionId, - ) -> Result<(&proto::Room, Vec)> { - let connection = self - .connections - .get_mut(&connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let user_id = connection.user_id; - let recipient_connection_ids = self.connection_ids_for_user(user_id).collect::>(); - - let connected_user = self - .connected_users - .get_mut(&user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let active_call = connected_user - .active_call - .as_mut() - .ok_or_else(|| anyhow!("not being called"))?; - anyhow::ensure!( - active_call.room_id == room_id && active_call.connection_id.is_none(), - "not being called on this room" - ); - - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - anyhow::ensure!( - room.pending_participant_user_ids - .contains(&user_id.to_proto()), - anyhow!("no such room") - ); - room.pending_participant_user_ids - .retain(|pending| *pending != user_id.to_proto()); - room.participants.push(proto::Participant { - user_id: user_id.to_proto(), - peer_id: connection_id.0, - projects: Default::default(), - location: Some(proto::ParticipantLocation { - variant: Some(proto::participant_location::Variant::External( - proto::participant_location::External {}, - )), - }), - }); - active_call.connection_id = Some(connection_id); - - Ok((room, recipient_connection_ids)) - } - pub fn leave_room(&mut self, room_id: RoomId, connection_id: ConnectionId) -> Result { let connection = self .connections From c213c98ea40dca5408f1f4250bc338dc49953905 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 15:22:04 +0100 Subject: [PATCH 006/240] Remove `calls` table and use just `room_participants` --- crates/call/src/room.rs | 7 +- .../20221109000000_test_schema.sql | 16 +- .../20221111092550_reconnection_support.sql | 15 +- crates/collab/src/db.rs | 165 +++------- crates/collab/src/rpc/store.rs | 289 +++++++++--------- crates/rpc/proto/zed.proto | 8 +- 6 files changed, 208 insertions(+), 292 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 3e55dc4ce9..4f3079e72c 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -294,6 +294,11 @@ impl Room { .position(|participant| Some(participant.user_id) == self.client.user_id()); let local_participant = local_participant_ix.map(|ix| room.participants.swap_remove(ix)); + let pending_participant_user_ids = room + .pending_participants + .iter() + .map(|p| p.user_id) + .collect::>(); let remote_participant_user_ids = room .participants .iter() @@ -303,7 +308,7 @@ impl Room { self.user_store.update(cx, move |user_store, cx| { ( user_store.get_users(remote_participant_user_ids, cx), - user_store.get_users(room.pending_participant_user_ids, cx), + user_store.get_users(pending_participant_user_ids, cx), ) }); self.pending_room_update = Some(cx.spawn(|this, mut cx| async move { diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 9302657523..5b38ebf8b1 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -70,16 +70,8 @@ CREATE TABLE "room_participants" ( "user_id" INTEGER NOT NULL REFERENCES users (id), "connection_id" INTEGER, "location_kind" INTEGER, - "location_project_id" INTEGER REFERENCES projects (id) + "location_project_id" INTEGER REFERENCES projects (id), + "initial_project_id" INTEGER REFERENCES projects (id), + "calling_user_id" INTEGER NOT NULL REFERENCES users (id) ); -CREATE UNIQUE INDEX "index_room_participants_on_user_id_and_room_id" ON "room_participants" ("user_id", "room_id"); - -CREATE TABLE "calls" ( - "id" INTEGER PRIMARY KEY, - "room_id" INTEGER NOT NULL REFERENCES rooms (id), - "calling_user_id" INTEGER NOT NULL REFERENCES users (id), - "called_user_id" INTEGER NOT NULL REFERENCES users (id), - "answering_connection_id" INTEGER, - "initial_project_id" INTEGER REFERENCES projects (id) -); -CREATE UNIQUE INDEX "index_calls_on_called_user_id" ON "calls" ("called_user_id"); +CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 8f932acff3..621512bf43 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -32,16 +32,9 @@ CREATE TABLE IF NOT EXISTS "room_participants" ( "user_id" INTEGER NOT NULL REFERENCES users (id), "connection_id" INTEGER, "location_kind" INTEGER, - "location_project_id" INTEGER REFERENCES projects (id) + "location_project_id" INTEGER REFERENCES projects (id), + "initial_project_id" INTEGER REFERENCES projects (id), + "calling_user_id" INTEGER NOT NULL REFERENCES users (id) ); -CREATE UNIQUE INDEX "index_room_participants_on_user_id_and_room_id" ON "room_participants" ("user_id", "room_id"); +CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); -CREATE TABLE IF NOT EXISTS "calls" ( - "id" SERIAL PRIMARY KEY, - "room_id" INTEGER NOT NULL REFERENCES rooms (id), - "calling_user_id" INTEGER NOT NULL REFERENCES users (id), - "called_user_id" INTEGER NOT NULL REFERENCES users (id), - "answering_connection_id" INTEGER, - "initial_project_id" INTEGER REFERENCES projects (id) -); -CREATE UNIQUE INDEX "index_calls_on_called_user_id" ON "calls" ("called_user_id"); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 7cc0dc35fe..a98621d894 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -907,26 +907,14 @@ where sqlx::query( " - INSERT INTO room_participants (room_id, user_id, connection_id) - VALUES ($1, $2, $3) - ", - ) - .bind(room_id) - .bind(user_id) - .bind(connection_id.0 as i32) - .execute(&mut tx) - .await?; - - sqlx::query( - " - INSERT INTO calls (room_id, calling_user_id, called_user_id, answering_connection_id) + INSERT INTO room_participants (room_id, user_id, connection_id, calling_user_id) VALUES ($1, $2, $3, $4) ", ) .bind(room_id) .bind(user_id) - .bind(user_id) .bind(connection_id.0 as i32) + .bind(user_id) .execute(&mut tx) .await?; @@ -945,31 +933,20 @@ where let mut tx = self.pool.begin().await?; sqlx::query( " - INSERT INTO calls (room_id, calling_user_id, called_user_id, initial_project_id) + INSERT INTO room_participants (room_id, user_id, calling_user_id, initial_project_id) VALUES ($1, $2, $3, $4) ", ) .bind(room_id) - .bind(calling_user_id) .bind(called_user_id) + .bind(calling_user_id) .bind(initial_project_id) .execute(&mut tx) .await?; - sqlx::query( - " - INSERT INTO room_participants (room_id, user_id) - VALUES ($1, $2) - ", - ) - .bind(room_id) - .bind(called_user_id) - .execute(&mut tx) - .await?; - let room = self.commit_room_transaction(room_id, tx).await?; - let incoming_call = - Self::build_incoming_call(&room, calling_user_id, initial_project_id); + let incoming_call = Self::build_incoming_call(&room, called_user_id) + .ok_or_else(|| anyhow!("failed to build incoming call"))?; Ok((room, incoming_call)) }) } @@ -980,24 +957,20 @@ where ) -> Result> { test_support!(self, { let mut tx = self.pool.begin().await?; - let call = sqlx::query_as::<_, Call>( + let room_id = sqlx::query_scalar::<_, RoomId>( " - SELECT * - FROM calls - WHERE called_user_id = $1 AND answering_connection_id IS NULL + SELECT room_id + FROM room_participants + WHERE user_id = $1 AND connection_id IS NULL ", ) .bind(user_id) .fetch_optional(&mut tx) .await?; - if let Some(call) = call { - let room = self.get_room(call.room_id, &mut tx).await?; - Ok(Some(Self::build_incoming_call( - &room, - call.calling_user_id, - call.initial_project_id, - ))) + if let Some(room_id) = room_id { + let room = self.get_room(room_id, &mut tx).await?; + Ok(Self::build_incoming_call(&room, user_id)) } else { Ok(None) } @@ -1006,26 +979,30 @@ where fn build_incoming_call( room: &proto::Room, - calling_user_id: UserId, - initial_project_id: Option, - ) -> proto::IncomingCall { - proto::IncomingCall { + called_user_id: UserId, + ) -> Option { + let pending_participant = room + .pending_participants + .iter() + .find(|participant| participant.user_id == called_user_id.to_proto())?; + + Some(proto::IncomingCall { room_id: room.id, - calling_user_id: calling_user_id.to_proto(), + calling_user_id: pending_participant.calling_user_id, participant_user_ids: room .participants .iter() .map(|participant| participant.user_id) .collect(), initial_project: room.participants.iter().find_map(|participant| { - let initial_project_id = initial_project_id?.to_proto(); + let initial_project_id = pending_participant.initial_project_id?; participant .projects .iter() .find(|project| project.id == initial_project_id) .cloned() }), - } + }) } pub async fn call_failed( @@ -1035,17 +1012,6 @@ where ) -> Result { test_support!(self, { let mut tx = self.pool.begin().await?; - sqlx::query( - " - DELETE FROM calls - WHERE room_id = $1 AND called_user_id = $2 - ", - ) - .bind(room_id) - .bind(called_user_id) - .execute(&mut tx) - .await?; - sqlx::query( " DELETE FROM room_participants @@ -1069,20 +1035,6 @@ where ) -> Result { test_support!(self, { let mut tx = self.pool.begin().await?; - sqlx::query( - " - UPDATE calls - SET answering_connection_id = $1 - WHERE room_id = $2 AND called_user_id = $3 - RETURNING 1 - ", - ) - .bind(connection_id.0 as i32) - .bind(room_id) - .bind(user_id) - .fetch_one(&mut tx) - .await?; - sqlx::query( " UPDATE room_participants @@ -1096,54 +1048,8 @@ where .bind(user_id) .fetch_one(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await }) - - // let connection = self - // .connections - // .get_mut(&connection_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // let user_id = connection.user_id; - // let recipient_connection_ids = self.connection_ids_for_user(user_id).collect::>(); - - // let connected_user = self - // .connected_users - // .get_mut(&user_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // let active_call = connected_user - // .active_call - // .as_mut() - // .ok_or_else(|| anyhow!("not being called"))?; - // anyhow::ensure!( - // active_call.room_id == room_id && active_call.connection_id.is_none(), - // "not being called on this room" - // ); - - // let room = self - // .rooms - // .get_mut(&room_id) - // .ok_or_else(|| anyhow!("no such room"))?; - // anyhow::ensure!( - // room.pending_participant_user_ids - // .contains(&user_id.to_proto()), - // anyhow!("no such room") - // ); - // room.pending_participant_user_ids - // .retain(|pending| *pending != user_id.to_proto()); - // room.participants.push(proto::Participant { - // user_id: user_id.to_proto(), - // peer_id: connection_id.0, - // projects: Default::default(), - // location: Some(proto::ParticipantLocation { - // variant: Some(proto::participant_location::Variant::External( - // proto::participant_location::External {}, - // )), - // }), - // }); - // active_call.connection_id = Some(connection_id); - - // Ok((room, recipient_connection_ids)) } pub async fn update_room_participant_location( @@ -1231,9 +1137,9 @@ where .await?; let mut db_participants = - sqlx::query_as::<_, (UserId, Option, Option, Option)>( + sqlx::query_as::<_, (UserId, Option, Option, Option, UserId, Option)>( " - SELECT user_id, connection_id, location_kind, location_project_id + SELECT user_id, connection_id, location_kind, location_project_id, calling_user_id, initial_project_id FROM room_participants WHERE room_id = $1 ", @@ -1242,9 +1148,16 @@ where .fetch(&mut *tx); let mut participants = Vec::new(); - let mut pending_participant_user_ids = Vec::new(); + let mut pending_participants = Vec::new(); while let Some(participant) = db_participants.next().await { - let (user_id, connection_id, _location_kind, _location_project_id) = participant?; + let ( + user_id, + connection_id, + _location_kind, + _location_project_id, + calling_user_id, + initial_project_id, + ) = participant?; if let Some(connection_id) = connection_id { participants.push(proto::Participant { user_id: user_id.to_proto(), @@ -1257,7 +1170,11 @@ where }), }); } else { - pending_participant_user_ids.push(user_id.to_proto()); + pending_participants.push(proto::PendingParticipant { + user_id: user_id.to_proto(), + calling_user_id: calling_user_id.to_proto(), + initial_project_id: initial_project_id.map(|id| id.to_proto()), + }); } } drop(db_participants); @@ -1296,7 +1213,7 @@ where version: room.version as u64, live_kit_room: room.live_kit_room, participants, - pending_participant_user_ids, + pending_participants, }) } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index dfd534dbe9..610a653dc9 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -258,80 +258,81 @@ impl Store { } pub fn leave_room(&mut self, room_id: RoomId, connection_id: ConnectionId) -> Result { - let connection = self - .connections - .get_mut(&connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let user_id = connection.user_id; + todo!() + // let connection = self + // .connections + // .get_mut(&connection_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // let user_id = connection.user_id; - let connected_user = self - .connected_users - .get(&user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - anyhow::ensure!( - connected_user - .active_call - .map_or(false, |call| call.room_id == room_id - && call.connection_id == Some(connection_id)), - "cannot leave a room before joining it" - ); + // let connected_user = self + // .connected_users + // .get(&user_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // anyhow::ensure!( + // connected_user + // .active_call + // .map_or(false, |call| call.room_id == room_id + // && call.connection_id == Some(connection_id)), + // "cannot leave a room before joining it" + // ); - // Given that users can only join one room at a time, we can safely unshare - // and leave all projects associated with the connection. - let mut unshared_projects = Vec::new(); - let mut left_projects = Vec::new(); - for project_id in connection.projects.clone() { - if let Ok((_, project)) = self.unshare_project(project_id, connection_id) { - unshared_projects.push(project); - } else if let Ok(project) = self.leave_project(project_id, connection_id) { - left_projects.push(project); - } - } - self.connected_users.get_mut(&user_id).unwrap().active_call = None; + // // Given that users can only join one room at a time, we can safely unshare + // // and leave all projects associated with the connection. + // let mut unshared_projects = Vec::new(); + // let mut left_projects = Vec::new(); + // for project_id in connection.projects.clone() { + // if let Ok((_, project)) = self.unshare_project(project_id, connection_id) { + // unshared_projects.push(project); + // } else if let Ok(project) = self.leave_project(project_id, connection_id) { + // left_projects.push(project); + // } + // } + // self.connected_users.get_mut(&user_id).unwrap().active_call = None; - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - room.participants - .retain(|participant| participant.peer_id != connection_id.0); + // let room = self + // .rooms + // .get_mut(&room_id) + // .ok_or_else(|| anyhow!("no such room"))?; + // room.participants + // .retain(|participant| participant.peer_id != connection_id.0); - let mut canceled_call_connection_ids = Vec::new(); - room.pending_participant_user_ids - .retain(|pending_participant_user_id| { - if let Some(connected_user) = self - .connected_users - .get_mut(&UserId::from_proto(*pending_participant_user_id)) - { - if let Some(call) = connected_user.active_call.as_ref() { - if call.calling_user_id == user_id { - connected_user.active_call.take(); - canceled_call_connection_ids - .extend(connected_user.connection_ids.iter().copied()); - false - } else { - true - } - } else { - true - } - } else { - true - } - }); + // let mut canceled_call_connection_ids = Vec::new(); + // room.pending_participant_user_ids + // .retain(|pending_participant_user_id| { + // if let Some(connected_user) = self + // .connected_users + // .get_mut(&UserId::from_proto(*pending_participant_user_id)) + // { + // if let Some(call) = connected_user.active_call.as_ref() { + // if call.calling_user_id == user_id { + // connected_user.active_call.take(); + // canceled_call_connection_ids + // .extend(connected_user.connection_ids.iter().copied()); + // false + // } else { + // true + // } + // } else { + // true + // } + // } else { + // true + // } + // }); - let room = if room.participants.is_empty() { - Cow::Owned(self.rooms.remove(&room_id).unwrap()) - } else { - Cow::Borrowed(self.rooms.get(&room_id).unwrap()) - }; + // let room = if room.participants.is_empty() { + // Cow::Owned(self.rooms.remove(&room_id).unwrap()) + // } else { + // Cow::Borrowed(self.rooms.get(&room_id).unwrap()) + // }; - Ok(LeftRoom { - room, - unshared_projects, - left_projects, - canceled_call_connection_ids, - }) + // Ok(LeftRoom { + // room, + // unshared_projects, + // left_projects, + // canceled_call_connection_ids, + // }) } pub fn rooms(&self) -> &BTreeMap { @@ -344,48 +345,49 @@ impl Store { called_user_id: UserId, canceller_connection_id: ConnectionId, ) -> Result<(&proto::Room, HashSet)> { - let canceller_user_id = self.user_id_for_connection(canceller_connection_id)?; - let canceller = self - .connected_users - .get(&canceller_user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let recipient = self - .connected_users - .get(&called_user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let canceller_active_call = canceller - .active_call - .as_ref() - .ok_or_else(|| anyhow!("no active call"))?; - let recipient_active_call = recipient - .active_call - .as_ref() - .ok_or_else(|| anyhow!("no active call for recipient"))?; + todo!() + // let canceller_user_id = self.user_id_for_connection(canceller_connection_id)?; + // let canceller = self + // .connected_users + // .get(&canceller_user_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // let recipient = self + // .connected_users + // .get(&called_user_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // let canceller_active_call = canceller + // .active_call + // .as_ref() + // .ok_or_else(|| anyhow!("no active call"))?; + // let recipient_active_call = recipient + // .active_call + // .as_ref() + // .ok_or_else(|| anyhow!("no active call for recipient"))?; - anyhow::ensure!( - canceller_active_call.room_id == room_id, - "users are on different calls" - ); - anyhow::ensure!( - recipient_active_call.room_id == room_id, - "users are on different calls" - ); - anyhow::ensure!( - recipient_active_call.connection_id.is_none(), - "recipient has already answered" - ); - let room_id = recipient_active_call.room_id; - let room = self - .rooms - .get_mut(&room_id) - .ok_or_else(|| anyhow!("no such room"))?; - room.pending_participant_user_ids - .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); + // anyhow::ensure!( + // canceller_active_call.room_id == room_id, + // "users are on different calls" + // ); + // anyhow::ensure!( + // recipient_active_call.room_id == room_id, + // "users are on different calls" + // ); + // anyhow::ensure!( + // recipient_active_call.connection_id.is_none(), + // "recipient has already answered" + // ); + // let room_id = recipient_active_call.room_id; + // let room = self + // .rooms + // .get_mut(&room_id) + // .ok_or_else(|| anyhow!("no such room"))?; + // room.pending_participant_user_ids + // .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); - let recipient = self.connected_users.get_mut(&called_user_id).unwrap(); - recipient.active_call.take(); + // let recipient = self.connected_users.get_mut(&called_user_id).unwrap(); + // recipient.active_call.take(); - Ok((room, recipient.connection_ids.clone())) + // Ok((room, recipient.connection_ids.clone())) } pub fn decline_call( @@ -393,31 +395,32 @@ impl Store { room_id: RoomId, recipient_connection_id: ConnectionId, ) -> Result<(&proto::Room, Vec)> { - let called_user_id = self.user_id_for_connection(recipient_connection_id)?; - let recipient = self - .connected_users - .get_mut(&called_user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - if let Some(active_call) = recipient.active_call { - anyhow::ensure!(active_call.room_id == room_id, "no such room"); - anyhow::ensure!( - active_call.connection_id.is_none(), - "cannot decline a call after joining room" - ); - recipient.active_call.take(); - let recipient_connection_ids = self - .connection_ids_for_user(called_user_id) - .collect::>(); - let room = self - .rooms - .get_mut(&active_call.room_id) - .ok_or_else(|| anyhow!("no such room"))?; - room.pending_participant_user_ids - .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); - Ok((room, recipient_connection_ids)) - } else { - Err(anyhow!("user is not being called")) - } + todo!() + // let called_user_id = self.user_id_for_connection(recipient_connection_id)?; + // let recipient = self + // .connected_users + // .get_mut(&called_user_id) + // .ok_or_else(|| anyhow!("no such connection"))?; + // if let Some(active_call) = recipient.active_call { + // anyhow::ensure!(active_call.room_id == room_id, "no such room"); + // anyhow::ensure!( + // active_call.connection_id.is_none(), + // "cannot decline a call after joining room" + // ); + // recipient.active_call.take(); + // let recipient_connection_ids = self + // .connection_ids_for_user(called_user_id) + // .collect::>(); + // let room = self + // .rooms + // .get_mut(&active_call.room_id) + // .ok_or_else(|| anyhow!("no such room"))?; + // room.pending_participant_user_ids + // .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); + // Ok((room, recipient_connection_ids)) + // } else { + // Err(anyhow!("user is not being called")) + // } } pub fn unshare_project( @@ -767,13 +770,13 @@ impl Store { } for (room_id, room) in &self.rooms { - for pending_user_id in &room.pending_participant_user_ids { - assert!( - self.connected_users - .contains_key(&UserId::from_proto(*pending_user_id)), - "call is active on a user that has disconnected" - ); - } + // for pending_user_id in &room.pending_participant_user_ids { + // assert!( + // self.connected_users + // .contains_key(&UserId::from_proto(*pending_user_id)), + // "call is active on a user that has disconnected" + // ); + // } for participant in &room.participants { assert!( @@ -793,10 +796,10 @@ impl Store { } } - assert!( - !room.pending_participant_user_ids.is_empty() || !room.participants.is_empty(), - "room can't be empty" - ); + // assert!( + // !room.pending_participant_user_ids.is_empty() || !room.participants.is_empty(), + // "room can't be empty" + // ); } for (project_id, project) in &self.projects { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 07e6fae3a8..c1daf75823 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -166,7 +166,7 @@ message Room { uint64 id = 1; uint64 version = 2; repeated Participant participants = 3; - repeated uint64 pending_participant_user_ids = 4; + repeated PendingParticipant pending_participants = 4; string live_kit_room = 5; } @@ -177,6 +177,12 @@ message Participant { ParticipantLocation location = 4; } +message PendingParticipant { + uint64 user_id = 1; + uint64 calling_user_id = 2; + optional uint64 initial_project_id = 3; +} + message ParticipantProject { uint64 id = 1; repeated string worktree_root_names = 2; From 0d1d267213b7494730dd9ae6abbdbb00e2bed34d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 15:41:56 +0100 Subject: [PATCH 007/240] Move `Store::decline_call` to `Db::decline_call` --- crates/collab/src/db.rs | 18 ++++++++++++++++ crates/collab/src/rpc.rs | 28 +++++++++++++++---------- crates/collab/src/rpc/store.rs | 38 +++------------------------------- 3 files changed, 38 insertions(+), 46 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index a98621d894..10f1dd0442 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1027,6 +1027,24 @@ where }) } + pub async fn decline_call(&self, room_id: RoomId, user_id: UserId) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + sqlx::query( + " + DELETE FROM room_participants + WHERE room_id = $1 AND user_id = $2 AND connection_id IS NULL + ", + ) + .bind(room_id) + .bind(user_id) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + } + pub async fn join_room( &self, room_id: RoomId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index c7c222ee1c..652ac5917b 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -800,19 +800,25 @@ impl Server { } async fn decline_call(self: Arc, message: Message) -> Result<()> { - let recipient_user_id = message.sender_user_id; + let room = self + .app_state + .db + .decline_call( + RoomId::from_proto(message.payload.room_id), + message.sender_user_id, + ) + .await?; + for recipient_id in self + .store() + .await + .connection_ids_for_user(message.sender_user_id) { - let mut store = self.store().await; - let (room, recipient_connection_ids) = - store.decline_call(message.payload.room_id, message.sender_connection_id)?; - for recipient_id in recipient_connection_ids { - self.peer - .send(recipient_id, proto::CallCanceled {}) - .trace_err(); - } - self.room_updated(room); + self.peer + .send(recipient_id, proto::CallCanceled {}) + .trace_err(); } - self.update_user_contacts(recipient_user_id).await?; + self.room_updated(&room); + self.update_user_contacts(message.sender_user_id).await?; Ok(()) } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 610a653dc9..d64464f601 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -162,8 +162,9 @@ impl Store { result.room = Some(Cow::Owned(left_room.room.into_owned())); result.canceled_call_connection_ids = left_room.canceled_call_connection_ids; } else if connected_user.connection_ids.len() == 1 { - let (room, _) = self.decline_call(room_id, connection_id)?; - result.room = Some(Cow::Owned(room.clone())); + todo!() + // let (room, _) = self.decline_call(room_id, connection_id)?; + // result.room = Some(Cow::Owned(room.clone())); } } @@ -390,39 +391,6 @@ impl Store { // Ok((room, recipient.connection_ids.clone())) } - pub fn decline_call( - &mut self, - room_id: RoomId, - recipient_connection_id: ConnectionId, - ) -> Result<(&proto::Room, Vec)> { - todo!() - // let called_user_id = self.user_id_for_connection(recipient_connection_id)?; - // let recipient = self - // .connected_users - // .get_mut(&called_user_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // if let Some(active_call) = recipient.active_call { - // anyhow::ensure!(active_call.room_id == room_id, "no such room"); - // anyhow::ensure!( - // active_call.connection_id.is_none(), - // "cannot decline a call after joining room" - // ); - // recipient.active_call.take(); - // let recipient_connection_ids = self - // .connection_ids_for_user(called_user_id) - // .collect::>(); - // let room = self - // .rooms - // .get_mut(&active_call.room_id) - // .ok_or_else(|| anyhow!("no such room"))?; - // room.pending_participant_user_ids - // .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); - // Ok((room, recipient_connection_ids)) - // } else { - // Err(anyhow!("user is not being called")) - // } - } - pub fn unshare_project( &mut self, project_id: ProjectId, From 1135aeecb8b9640111bc1e0c5566c8b3b64b7e4e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 16:59:54 +0100 Subject: [PATCH 008/240] WIP: Move `Store::leave_room` to `Db::leave_room` --- .../20221109000000_test_schema.sql | 4 +- .../20221111092550_reconnection_support.sql | 4 +- crates/collab/src/db.rs | 112 ++++++++++++++++++ crates/collab/src/rpc.rs | 73 ++++++------ crates/collab/src/rpc/store.rs | 96 +-------------- 5 files changed, 162 insertions(+), 127 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 5b38ebf8b1..44495f16ce 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -48,7 +48,7 @@ CREATE TABLE "projects" ( CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, @@ -58,7 +58,7 @@ CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborato CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "root_name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 621512bf43..ed6da2b7b1 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -10,7 +10,7 @@ ALTER TABLE "projects" CREATE TABLE "project_collaborators" ( "id" SERIAL PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, @@ -20,7 +20,7 @@ CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborato CREATE TABLE IF NOT EXISTS "worktrees" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "root_name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 10f1dd0442..fc5e3c242b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1070,6 +1070,97 @@ where }) } + pub async fn leave_room( + &self, + room_id: RoomId, + connection_id: ConnectionId, + ) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + + // Leave room. + let user_id: UserId = sqlx::query_scalar( + " + DELETE FROM room_participants + WHERE room_id = $1 AND connection_id = $2 + RETURNING user_id + ", + ) + .bind(room_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + // Cancel pending calls initiated by the leaving user. + let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( + " + DELETE FROM room_participants + WHERE calling_user_id = $1 AND connection_id IS NULL + RETURNING user_id + ", + ) + .bind(room_id) + .bind(connection_id.0 as i32) + .fetch_all(&mut tx) + .await?; + + let mut project_collaborators = sqlx::query_as::<_, ProjectCollaborator>( + " + SELECT project_collaborators.* + FROM projects, project_collaborators + WHERE + projects.room_id = $1 AND + projects.user_id = $2 AND + projects.id = project_collaborators.project_id + ", + ) + .bind(room_id) + .bind(user_id) + .fetch(&mut tx); + + let mut left_projects = HashMap::default(); + while let Some(collaborator) = project_collaborators.next().await { + let collaborator = collaborator?; + let left_project = + left_projects + .entry(collaborator.project_id) + .or_insert(LeftProject { + id: collaborator.project_id, + host_user_id: Default::default(), + connection_ids: Default::default(), + }); + + let collaborator_connection_id = ConnectionId(collaborator.connection_id as u32); + if collaborator_connection_id != connection_id || collaborator.is_host { + left_project.connection_ids.push(collaborator_connection_id); + } + + if collaborator.is_host { + left_project.host_user_id = collaborator.user_id; + } + } + drop(project_collaborators); + + sqlx::query( + " + DELETE FROM projects + WHERE room_id = $1 AND user_id = $2 + ", + ) + .bind(room_id) + .bind(user_id) + .execute(&mut tx) + .await?; + + let room = self.commit_room_transaction(room_id, tx).await?; + Ok(LeftRoom { + room, + left_projects, + canceled_calls_to_user_ids, + }) + }) + } + pub async fn update_room_participant_location( &self, room_id: RoomId, @@ -1667,6 +1758,27 @@ pub struct Project { pub unregistered: bool, } +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +pub struct ProjectCollaborator { + pub project_id: ProjectId, + pub connection_id: i32, + pub user_id: UserId, + pub replica_id: i32, + pub is_host: bool, +} + +pub struct LeftProject { + pub id: ProjectId, + pub host_user_id: UserId, + pub connection_ids: Vec, +} + +pub struct LeftRoom { + pub room: proto::Room, + pub left_projects: HashMap, + pub canceled_calls_to_user_ids: Vec, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub enum Contact { Accepted { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 652ac5917b..1221964601 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -658,14 +658,20 @@ impl Server { async fn leave_room(self: Arc, message: Message) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let room_left; - { - let mut store = self.store().await; - let left_room = store.leave_room(message.payload.id, message.sender_connection_id)?; - contacts_to_update.insert(message.sender_user_id); - for project in left_room.unshared_projects { - for connection_id in project.connection_ids() { + let left_room = self + .app_state + .db + .leave_room( + RoomId::from_proto(message.payload.id), + message.sender_connection_id, + ) + .await?; + contacts_to_update.insert(message.sender_user_id); + + for project in left_room.left_projects.into_values() { + if project.host_user_id == message.sender_user_id { + for connection_id in project.connection_ids { self.peer.send( connection_id, proto::UnshareProject { @@ -673,41 +679,42 @@ impl Server { }, )?; } - } - - for project in left_room.left_projects { - if project.remove_collaborator { - for connection_id in project.connection_ids { - self.peer.send( - connection_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: message.sender_connection_id.0, - }, - )?; - } - + } else { + for connection_id in project.connection_ids { self.peer.send( - message.sender_connection_id, - proto::UnshareProject { + connection_id, + proto::RemoveProjectCollaborator { project_id: project.id.to_proto(), + peer_id: message.sender_connection_id.0, }, )?; } - } - self.room_updated(&left_room.room); - room_left = self.room_left(&left_room.room, message.sender_connection_id); - - for connection_id in left_room.canceled_call_connection_ids { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - contacts_to_update.extend(store.user_id_for_connection(connection_id).ok()); + self.peer.send( + message.sender_connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + )?; } } - room_left.await.trace_err(); + self.room_updated(&left_room.room); + { + let store = self.store().await; + for user_id in left_room.canceled_calls_to_user_ids { + for connection_id in store.connection_ids_for_user(user_id) { + self.peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); + } + contacts_to_update.insert(user_id); + } + } + + self.room_left(&left_room.room, message.sender_connection_id) + .await + .trace_err(); for user_id in contacts_to_update { self.update_user_contacts(user_id).await?; } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index d64464f601..4ea2c7b38e 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -90,13 +90,6 @@ pub struct LeftProject { pub remove_collaborator: bool, } -pub struct LeftRoom<'a> { - pub room: Cow<'a, proto::Room>, - pub unshared_projects: Vec, - pub left_projects: Vec, - pub canceled_call_connection_ids: Vec, -} - #[derive(Copy, Clone)] pub struct Metrics { pub connections: usize, @@ -156,11 +149,12 @@ impl Store { if let Some(active_call) = connected_user.active_call.as_ref() { let room_id = active_call.room_id; if active_call.connection_id == Some(connection_id) { - let left_room = self.leave_room(room_id, connection_id)?; - result.hosted_projects = left_room.unshared_projects; - result.guest_projects = left_room.left_projects; - result.room = Some(Cow::Owned(left_room.room.into_owned())); - result.canceled_call_connection_ids = left_room.canceled_call_connection_ids; + todo!() + // let left_room = self.leave_room(room_id, connection_id)?; + // result.hosted_projects = left_room.unshared_projects; + // result.guest_projects = left_room.left_projects; + // result.room = Some(Cow::Owned(left_room.room.into_owned())); + // result.canceled_call_connection_ids = left_room.canceled_call_connection_ids; } else if connected_user.connection_ids.len() == 1 { todo!() // let (room, _) = self.decline_call(room_id, connection_id)?; @@ -258,84 +252,6 @@ impl Store { } } - pub fn leave_room(&mut self, room_id: RoomId, connection_id: ConnectionId) -> Result { - todo!() - // let connection = self - // .connections - // .get_mut(&connection_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // let user_id = connection.user_id; - - // let connected_user = self - // .connected_users - // .get(&user_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // anyhow::ensure!( - // connected_user - // .active_call - // .map_or(false, |call| call.room_id == room_id - // && call.connection_id == Some(connection_id)), - // "cannot leave a room before joining it" - // ); - - // // Given that users can only join one room at a time, we can safely unshare - // // and leave all projects associated with the connection. - // let mut unshared_projects = Vec::new(); - // let mut left_projects = Vec::new(); - // for project_id in connection.projects.clone() { - // if let Ok((_, project)) = self.unshare_project(project_id, connection_id) { - // unshared_projects.push(project); - // } else if let Ok(project) = self.leave_project(project_id, connection_id) { - // left_projects.push(project); - // } - // } - // self.connected_users.get_mut(&user_id).unwrap().active_call = None; - - // let room = self - // .rooms - // .get_mut(&room_id) - // .ok_or_else(|| anyhow!("no such room"))?; - // room.participants - // .retain(|participant| participant.peer_id != connection_id.0); - - // let mut canceled_call_connection_ids = Vec::new(); - // room.pending_participant_user_ids - // .retain(|pending_participant_user_id| { - // if let Some(connected_user) = self - // .connected_users - // .get_mut(&UserId::from_proto(*pending_participant_user_id)) - // { - // if let Some(call) = connected_user.active_call.as_ref() { - // if call.calling_user_id == user_id { - // connected_user.active_call.take(); - // canceled_call_connection_ids - // .extend(connected_user.connection_ids.iter().copied()); - // false - // } else { - // true - // } - // } else { - // true - // } - // } else { - // true - // } - // }); - - // let room = if room.participants.is_empty() { - // Cow::Owned(self.rooms.remove(&room_id).unwrap()) - // } else { - // Cow::Borrowed(self.rooms.get(&room_id).unwrap()) - // }; - - // Ok(LeftRoom { - // room, - // unshared_projects, - // left_projects, - // canceled_call_connection_ids, - // }) - } - pub fn rooms(&self) -> &BTreeMap { &self.rooms } From 9f39dcf7cf1dc589efe93b4815976ffc95118cb1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 18:53:23 +0100 Subject: [PATCH 009/240] Get basic calls test passing again --- crates/collab/src/db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index fc5e3c242b..e092bd9501 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1110,7 +1110,7 @@ where FROM projects, project_collaborators WHERE projects.room_id = $1 AND - projects.user_id = $2 AND + projects.host_user_id = $2 AND projects.id = project_collaborators.project_id ", ) @@ -1144,7 +1144,7 @@ where sqlx::query( " DELETE FROM projects - WHERE room_id = $1 AND user_id = $2 + WHERE room_id = $1 AND host_user_id = $2 ", ) .bind(room_id) From 11caba4a4c8b536fb6c0d3d0eea3f08c57cfce67 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 18:54:08 +0100 Subject: [PATCH 010/240] Remove stray log statement --- crates/collab/src/integration_tests.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 6d3cff1718..3a4c2368e8 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -71,8 +71,6 @@ async fn test_basic_calls( deterministic.forbid_parking(); let mut server = TestServer::start(cx_a.background()).await; - let start = std::time::Instant::now(); - let client_a = server.create_client(cx_a, "user_a").await; let client_b = server.create_client(cx_b, "user_b").await; let client_c = server.create_client(cx_c, "user_c").await; @@ -258,8 +256,6 @@ async fn test_basic_calls( pending: Default::default() } ); - - eprintln!("finished test {:?}", start.elapsed()); } #[gpui::test(iterations = 10)] From 2145965749b0edff3972fff1124d31cf3ff55348 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 11 Nov 2022 19:36:20 +0100 Subject: [PATCH 011/240] WIP --- .../20221109000000_test_schema.sql | 3 +- .../20221111092550_reconnection_support.sql | 4 +- crates/collab/src/db.rs | 13 +----- crates/collab/src/rpc.rs | 8 +--- crates/collab/src/rpc/store.rs | 41 +------------------ 5 files changed, 10 insertions(+), 59 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 44495f16ce..477cc5d607 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -68,10 +68,11 @@ CREATE TABLE "room_participants" ( "id" INTEGER PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), - "connection_id" INTEGER, + "answering_connection_id" INTEGER, "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), "calling_user_id" INTEGER NOT NULL REFERENCES users (id) + "calling_connection_id" INTEGER NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index ed6da2b7b1..48e6b50b06 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -34,7 +34,7 @@ CREATE TABLE IF NOT EXISTS "room_participants" ( "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), - "calling_user_id" INTEGER NOT NULL REFERENCES users (id) + "calling_user_id" INTEGER NOT NULL REFERENCES users (id), + "calling_connection_id" INTEGER NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); - diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index e092bd9501..3ffdc602da 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1342,7 +1342,7 @@ where INSERT INTO projects (host_user_id, room_id) VALUES ($1) RETURNING id - ", + ", ) .bind(user_id) .bind(room_id) @@ -1354,7 +1354,7 @@ where sqlx::query( " INSERT INTO worktrees (id, project_id, root_name) - ", + ", ) .bind(worktree.id as i32) .bind(project_id) @@ -1741,15 +1741,6 @@ pub struct Room { pub live_kit_room: String, } -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -pub struct Call { - pub room_id: RoomId, - pub calling_user_id: UserId, - pub called_user_id: UserId, - pub answering_connection_id: Option, - pub initial_project_id: Option, -} - id_type!(ProjectId); #[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] pub struct Project { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 1221964601..5b713226b1 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -431,12 +431,8 @@ impl Server { let mut contacts_to_update = HashSet::default(); let mut room_left = None; { - let mut store = self.store().await; - - #[cfg(test)] - let removed_connection = store.remove_connection(connection_id).unwrap(); - #[cfg(not(test))] - let removed_connection = store.remove_connection(connection_id)?; + let removed_connection = self.store().await.remove_connection(connection_id)?; + self.app_state.db.remove_connection(connection_id); for project in removed_connection.hosted_projects { projects_to_unshare.push(project.id); diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 4ea2c7b38e..de44492409 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -72,16 +72,6 @@ pub struct Worktree { pub type ReplicaId = u16; -#[derive(Default)] -pub struct RemovedConnectionState<'a> { - pub user_id: UserId, - pub hosted_projects: Vec, - pub guest_projects: Vec, - pub contact_ids: HashSet, - pub room: Option>, - pub canceled_call_connection_ids: Vec, -} - pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, @@ -129,47 +119,20 @@ impl Store { } #[instrument(skip(self))] - pub fn remove_connection( - &mut self, - connection_id: ConnectionId, - ) -> Result { + pub fn remove_connection(&mut self, connection_id: ConnectionId) -> Result<()> { let connection = self .connections .get_mut(&connection_id) .ok_or_else(|| anyhow!("no such connection"))?; let user_id = connection.user_id; - - let mut result = RemovedConnectionState { - user_id, - ..Default::default() - }; - - let connected_user = self.connected_users.get(&user_id).unwrap(); - if let Some(active_call) = connected_user.active_call.as_ref() { - let room_id = active_call.room_id; - if active_call.connection_id == Some(connection_id) { - todo!() - // let left_room = self.leave_room(room_id, connection_id)?; - // result.hosted_projects = left_room.unshared_projects; - // result.guest_projects = left_room.left_projects; - // result.room = Some(Cow::Owned(left_room.room.into_owned())); - // result.canceled_call_connection_ids = left_room.canceled_call_connection_ids; - } else if connected_user.connection_ids.len() == 1 { - todo!() - // let (room, _) = self.decline_call(room_id, connection_id)?; - // result.room = Some(Cow::Owned(room.clone())); - } - } - let connected_user = self.connected_users.get_mut(&user_id).unwrap(); connected_user.connection_ids.remove(&connection_id); if connected_user.connection_ids.is_empty() { self.connected_users.remove(&user_id); } self.connections.remove(&connection_id).unwrap(); - - Ok(result) + Ok(()) } pub fn user_id_for_connection(&self, connection_id: ConnectionId) -> Result { From 1da5be6e8fee5b42752100cd8729ccf2355f47b8 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Sat, 12 Nov 2022 21:39:08 -0500 Subject: [PATCH 012/240] Update release urls to match new zed.dev url format --- .github/workflows/release_actions.yml | 2 +- crates/auto_update/src/auto_update.rs | 9 ++++++++- crates/zed/src/main.rs | 15 --------------- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/.github/workflows/release_actions.yml b/.github/workflows/release_actions.yml index 65866baf7f..3866ee6c7b 100644 --- a/.github/workflows/release_actions.yml +++ b/.github/workflows/release_actions.yml @@ -14,7 +14,7 @@ jobs: content: | 📣 Zed ${{ github.event.release.tag_name }} was just released! - Restart your Zed or head to https://zed.dev/releases to grab it. + Restart your Zed or head to https://zed.dev/releases/latest to grab it. ```md ### Changelog diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index d73523c8bd..bda45053b1 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -70,7 +70,14 @@ pub fn init(db: project::Db, http_client: Arc, cx: &mut MutableA } }); cx.add_global_action(move |_: &ViewReleaseNotes, cx| { - cx.platform().open_url(&format!("{server_url}/releases")); + let latest_release_url = if cx.has_global::() + && *cx.global::() == ReleaseChannel::Preview + { + format!("{server_url}/releases/preview/latest") + } else { + format!("{server_url}/releases/latest") + }; + cx.platform().open_url(&latest_release_url); }); cx.add_action(UpdateNotification::dismiss); } diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index e849632a2d..c6862e66e4 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -213,21 +213,6 @@ fn init_paths() { std::fs::create_dir_all(&*zed::paths::LANGUAGES_DIR).expect("could not create languages path"); std::fs::create_dir_all(&*zed::paths::DB_DIR).expect("could not create database path"); std::fs::create_dir_all(&*zed::paths::LOGS_DIR).expect("could not create logs path"); - - // Copy setting files from legacy locations. TODO: remove this after a few releases. - thread::spawn(|| { - if std::fs::metadata(&*zed::paths::legacy::SETTINGS).is_ok() - && std::fs::metadata(&*zed::paths::SETTINGS).is_err() - { - std::fs::copy(&*zed::paths::legacy::SETTINGS, &*zed::paths::SETTINGS).log_err(); - } - - if std::fs::metadata(&*zed::paths::legacy::KEYMAP).is_ok() - && std::fs::metadata(&*zed::paths::KEYMAP).is_err() - { - std::fs::copy(&*zed::paths::legacy::KEYMAP, &*zed::paths::KEYMAP).log_err(); - } - }); } fn init_logger() { From 9902211af18da0979055de6d1c611e58973deed9 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 10:13:36 +0100 Subject: [PATCH 013/240] Leave room when connection is dropped --- crates/call/src/room.rs | 4 +- .../20221109000000_test_schema.sql | 5 +- .../20221111092550_reconnection_support.sql | 3 +- crates/collab/src/db.rs | 187 ++++++++-------- crates/collab/src/rpc.rs | 206 +++++++----------- crates/collab/src/rpc/store.rs | 10 +- crates/rpc/proto/zed.proto | 4 +- 7 files changed, 184 insertions(+), 235 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 4f3079e72c..0ecd6082d6 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -53,7 +53,7 @@ impl Entity for Room { fn release(&mut self, _: &mut MutableAppContext) { if self.status.is_online() { - self.client.send(proto::LeaveRoom { id: self.id }).log_err(); + self.client.send(proto::LeaveRoom {}).log_err(); } } } @@ -241,7 +241,7 @@ impl Room { self.participant_user_ids.clear(); self.subscriptions.clear(); self.live_kit.take(); - self.client.send(proto::LeaveRoom { id: self.id })?; + self.client.send(proto::LeaveRoom {})?; Ok(()) } diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 477cc5d607..2cef514e5a 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -43,7 +43,8 @@ CREATE TABLE "rooms" ( CREATE TABLE "projects" ( "id" INTEGER PRIMARY KEY, "room_id" INTEGER REFERENCES rooms (id), - "host_user_id" INTEGER REFERENCES users (id) NOT NULL + "host_user_id" INTEGER REFERENCES users (id) NOT NULL, + "host_connection_id" INTEGER NOT NULL ); CREATE TABLE "project_collaborators" ( @@ -72,7 +73,7 @@ CREATE TABLE "room_participants" ( "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), - "calling_user_id" INTEGER NOT NULL REFERENCES users (id) + "calling_user_id" INTEGER NOT NULL REFERENCES users (id), "calling_connection_id" INTEGER NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 48e6b50b06..7b82ce9ce7 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -6,6 +6,7 @@ CREATE TABLE IF NOT EXISTS "rooms" ( ALTER TABLE "projects" ADD "room_id" INTEGER REFERENCES rooms (id), + ADD "host_connection_id" INTEGER, DROP COLUMN "unregistered"; CREATE TABLE "project_collaborators" ( @@ -30,7 +31,7 @@ CREATE TABLE IF NOT EXISTS "room_participants" ( "id" SERIAL PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), - "connection_id" INTEGER, + "answering_connection_id" INTEGER, "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 3ffdc602da..f32bdf96ef 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -907,14 +907,15 @@ where sqlx::query( " - INSERT INTO room_participants (room_id, user_id, connection_id, calling_user_id) - VALUES ($1, $2, $3, $4) + INSERT INTO room_participants (room_id, user_id, answering_connection_id, calling_user_id, calling_connection_id) + VALUES ($1, $2, $3, $4, $5) ", ) .bind(room_id) .bind(user_id) .bind(connection_id.0 as i32) .bind(user_id) + .bind(connection_id.0 as i32) .execute(&mut tx) .await?; @@ -926,6 +927,7 @@ where &self, room_id: RoomId, calling_user_id: UserId, + calling_connection_id: ConnectionId, called_user_id: UserId, initial_project_id: Option, ) -> Result<(proto::Room, proto::IncomingCall)> { @@ -933,13 +935,14 @@ where let mut tx = self.pool.begin().await?; sqlx::query( " - INSERT INTO room_participants (room_id, user_id, calling_user_id, initial_project_id) - VALUES ($1, $2, $3, $4) + INSERT INTO room_participants (room_id, user_id, calling_user_id, calling_connection_id, initial_project_id) + VALUES ($1, $2, $3, $4, $5) ", ) .bind(room_id) .bind(called_user_id) .bind(calling_user_id) + .bind(calling_connection_id.0 as i32) .bind(initial_project_id) .execute(&mut tx) .await?; @@ -961,7 +964,7 @@ where " SELECT room_id FROM room_participants - WHERE user_id = $1 AND connection_id IS NULL + WHERE user_id = $1 AND answering_connection_id IS NULL ", ) .bind(user_id) @@ -1033,7 +1036,7 @@ where sqlx::query( " DELETE FROM room_participants - WHERE room_id = $1 AND user_id = $2 AND connection_id IS NULL + WHERE room_id = $1 AND user_id = $2 AND answering_connection_id IS NULL ", ) .bind(room_id) @@ -1056,7 +1059,7 @@ where sqlx::query( " UPDATE room_participants - SET connection_id = $1 + SET answering_connection_id = $1 WHERE room_id = $2 AND user_id = $3 RETURNING 1 ", @@ -1070,101 +1073,100 @@ where }) } - pub async fn leave_room( - &self, - room_id: RoomId, - connection_id: ConnectionId, - ) -> Result { + pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { test_support!(self, { let mut tx = self.pool.begin().await?; // Leave room. - let user_id: UserId = sqlx::query_scalar( + let room_id = sqlx::query_scalar::<_, RoomId>( " DELETE FROM room_participants - WHERE room_id = $1 AND connection_id = $2 - RETURNING user_id + WHERE answering_connection_id = $1 + RETURNING room_id ", ) - .bind(room_id) .bind(connection_id.0 as i32) - .fetch_one(&mut tx) + .fetch_optional(&mut tx) .await?; - // Cancel pending calls initiated by the leaving user. - let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( - " - DELETE FROM room_participants - WHERE calling_user_id = $1 AND connection_id IS NULL - RETURNING user_id - ", - ) - .bind(room_id) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; + if let Some(room_id) = room_id { + // Cancel pending calls initiated by the leaving user. + let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( + " + DELETE FROM room_participants + WHERE calling_connection_id = $1 AND answering_connection_id IS NULL + RETURNING user_id + ", + ) + .bind(connection_id.0 as i32) + .fetch_all(&mut tx) + .await?; - let mut project_collaborators = sqlx::query_as::<_, ProjectCollaborator>( - " - SELECT project_collaborators.* - FROM projects, project_collaborators - WHERE - projects.room_id = $1 AND - projects.host_user_id = $2 AND - projects.id = project_collaborators.project_id - ", - ) - .bind(room_id) - .bind(user_id) - .fetch(&mut tx); + let mut project_collaborators = sqlx::query_as::<_, ProjectCollaborator>( + " + SELECT project_collaborators.* + FROM projects, project_collaborators + WHERE + projects.room_id = $1 AND + projects.host_connection_id = $2 AND + projects.id = project_collaborators.project_id + ", + ) + .bind(room_id) + .bind(connection_id.0 as i32) + .fetch(&mut tx); - let mut left_projects = HashMap::default(); - while let Some(collaborator) = project_collaborators.next().await { - let collaborator = collaborator?; - let left_project = - left_projects - .entry(collaborator.project_id) - .or_insert(LeftProject { - id: collaborator.project_id, - host_user_id: Default::default(), - connection_ids: Default::default(), - }); + let mut left_projects = HashMap::default(); + while let Some(collaborator) = project_collaborators.next().await { + let collaborator = collaborator?; + let left_project = + left_projects + .entry(collaborator.project_id) + .or_insert(LeftProject { + id: collaborator.project_id, + host_user_id: Default::default(), + connection_ids: Default::default(), + }); - let collaborator_connection_id = ConnectionId(collaborator.connection_id as u32); - if collaborator_connection_id != connection_id || collaborator.is_host { - left_project.connection_ids.push(collaborator_connection_id); + let collaborator_connection_id = + ConnectionId(collaborator.connection_id as u32); + if collaborator_connection_id != connection_id || collaborator.is_host { + left_project.connection_ids.push(collaborator_connection_id); + } + + if collaborator.is_host { + left_project.host_user_id = collaborator.user_id; + } } + drop(project_collaborators); - if collaborator.is_host { - left_project.host_user_id = collaborator.user_id; - } + sqlx::query( + " + DELETE FROM projects + WHERE room_id = $1 AND host_connection_id = $2 + ", + ) + .bind(room_id) + .bind(connection_id.0 as i32) + .execute(&mut tx) + .await?; + + let room = self.commit_room_transaction(room_id, tx).await?; + Ok(Some(LeftRoom { + room, + left_projects, + canceled_calls_to_user_ids, + })) + } else { + Ok(None) } - drop(project_collaborators); - - sqlx::query( - " - DELETE FROM projects - WHERE room_id = $1 AND host_user_id = $2 - ", - ) - .bind(room_id) - .bind(user_id) - .execute(&mut tx) - .await?; - - let room = self.commit_room_transaction(room_id, tx).await?; - Ok(LeftRoom { - room, - left_projects, - canceled_calls_to_user_ids, - }) }) } pub async fn update_room_participant_location( &self, room_id: RoomId, - user_id: UserId, + connection_id: ConnectionId, location: proto::ParticipantLocation, ) -> Result { test_support!(self, { @@ -1194,13 +1196,13 @@ where " UPDATE room_participants SET location_kind = $1 AND location_project_id = $2 - WHERE room_id = $1 AND user_id = $2 + WHERE room_id = $3 AND answering_connection_id = $4 ", ) .bind(location_kind) .bind(location_project_id) .bind(room_id) - .bind(user_id) + .bind(connection_id.0 as i32) .execute(&mut tx) .await?; @@ -1248,7 +1250,7 @@ where let mut db_participants = sqlx::query_as::<_, (UserId, Option, Option, Option, UserId, Option)>( " - SELECT user_id, connection_id, location_kind, location_project_id, calling_user_id, initial_project_id + SELECT user_id, answering_connection_id, location_kind, location_project_id, calling_user_id, initial_project_id FROM room_participants WHERE room_id = $1 ", @@ -1261,16 +1263,16 @@ where while let Some(participant) = db_participants.next().await { let ( user_id, - connection_id, + answering_connection_id, _location_kind, _location_project_id, calling_user_id, initial_project_id, ) = participant?; - if let Some(connection_id) = connection_id { + if let Some(answering_connection_id) = answering_connection_id { participants.push(proto::Participant { user_id: user_id.to_proto(), - peer_id: connection_id as u32, + peer_id: answering_connection_id as u32, projects: Default::default(), location: Some(proto::ParticipantLocation { variant: Some(proto::participant_location::Variant::External( @@ -1339,12 +1341,13 @@ where let mut tx = self.pool.begin().await?; let project_id = sqlx::query_scalar( " - INSERT INTO projects (host_user_id, room_id) - VALUES ($1) + INSERT INTO projects (host_user_id, host_connection_id, room_id) + VALUES ($1, $2, $3) RETURNING id ", ) .bind(user_id) + .bind(connection_id.0 as i32) .bind(room_id) .fetch_one(&mut tx) .await @@ -1366,11 +1369,11 @@ where sqlx::query( " INSERT INTO project_collaborators ( - project_id, - connection_id, - user_id, - replica_id, - is_host + project_id, + connection_id, + user_id, + replica_id, + is_host ) VALUES ($1, $2, $3, $4, $5) ", diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 5b713226b1..e69393c642 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -415,7 +415,7 @@ impl Server { drop(foreground_message_handlers); tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); - if let Err(error) = this.sign_out(connection_id).await { + if let Err(error) = this.sign_out(connection_id, user_id).await { tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } @@ -424,69 +424,15 @@ impl Server { } #[instrument(skip(self), err)] - async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> Result<()> { + async fn sign_out( + self: &mut Arc, + connection_id: ConnectionId, + user_id: UserId, + ) -> Result<()> { self.peer.disconnect(connection_id); - - let mut projects_to_unshare = Vec::new(); - let mut contacts_to_update = HashSet::default(); - let mut room_left = None; - { - let removed_connection = self.store().await.remove_connection(connection_id)?; - self.app_state.db.remove_connection(connection_id); - - for project in removed_connection.hosted_projects { - projects_to_unshare.push(project.id); - broadcast(connection_id, project.guests.keys().copied(), |conn_id| { - self.peer.send( - conn_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - }); - } - - for project in removed_connection.guest_projects { - broadcast(connection_id, project.connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: connection_id.0, - }, - ) - }); - } - - if let Some(room) = removed_connection.room { - self.room_updated(&room); - room_left = Some(self.room_left(&room, connection_id)); - } - - contacts_to_update.insert(removed_connection.user_id); - for connection_id in removed_connection.canceled_call_connection_ids { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - contacts_to_update.extend(store.user_id_for_connection(connection_id).ok()); - } - }; - - if let Some(room_left) = room_left { - room_left.await.trace_err(); - } - - for user_id in contacts_to_update { - self.update_user_contacts(user_id).await.trace_err(); - } - - for project_id in projects_to_unshare { - self.app_state - .db - .unshare_project(project_id) - .await - .trace_err(); - } + self.store().await.remove_connection(connection_id)?; + self.leave_room_for_connection(connection_id, user_id) + .await?; Ok(()) } @@ -653,66 +599,90 @@ impl Server { } async fn leave_room(self: Arc, message: Message) -> Result<()> { + self.leave_room_for_connection(message.sender_connection_id, message.sender_user_id) + .await + } + + async fn leave_room_for_connection( + self: &Arc, + connection_id: ConnectionId, + user_id: UserId, + ) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let left_room = self - .app_state - .db - .leave_room( - RoomId::from_proto(message.payload.id), - message.sender_connection_id, - ) - .await?; - contacts_to_update.insert(message.sender_user_id); + let Some(left_room) = self.app_state.db.leave_room(connection_id).await? else { + return Err(anyhow!("no room to leave"))?; + }; + contacts_to_update.insert(user_id); for project in left_room.left_projects.into_values() { - if project.host_user_id == message.sender_user_id { + if project.host_user_id == user_id { for connection_id in project.connection_ids { - self.peer.send( + self.peer + .send( + connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); + } + } else { + for connection_id in project.connection_ids { + self.peer + .send( + connection_id, + proto::RemoveProjectCollaborator { + project_id: project.id.to_proto(), + peer_id: connection_id.0, + }, + ) + .trace_err(); + } + + self.peer + .send( connection_id, proto::UnshareProject { project_id: project.id.to_proto(), }, - )?; - } - } else { - for connection_id in project.connection_ids { - self.peer.send( - connection_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: message.sender_connection_id.0, - }, - )?; - } - - self.peer.send( - message.sender_connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - )?; + ) + .trace_err(); } } self.room_updated(&left_room.room); { let store = self.store().await; - for user_id in left_room.canceled_calls_to_user_ids { - for connection_id in store.connection_ids_for_user(user_id) { + for canceled_user_id in left_room.canceled_calls_to_user_ids { + for connection_id in store.connection_ids_for_user(canceled_user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); } - contacts_to_update.insert(user_id); + contacts_to_update.insert(canceled_user_id); } } - self.room_left(&left_room.room, message.sender_connection_id) - .await - .trace_err(); - for user_id in contacts_to_update { - self.update_user_contacts(user_id).await?; + for contact_user_id in contacts_to_update { + self.update_user_contacts(contact_user_id).await?; + } + + if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { + live_kit + .remove_participant( + left_room.room.live_kit_room.clone(), + connection_id.to_string(), + ) + .await + .trace_err(); + + if left_room.room.participants.is_empty() { + live_kit + .delete_room(left_room.room.live_kit_room) + .await + .trace_err(); + } } Ok(()) @@ -725,6 +695,7 @@ impl Server { ) -> Result<()> { let room_id = RoomId::from_proto(request.payload.room_id); let calling_user_id = request.sender_user_id; + let calling_connection_id = request.sender_connection_id; let called_user_id = UserId::from_proto(request.payload.called_user_id); let initial_project_id = request .payload @@ -742,7 +713,13 @@ impl Server { let (room, incoming_call) = self .app_state .db - .call(room_id, calling_user_id, called_user_id, initial_project_id) + .call( + room_id, + calling_user_id, + calling_connection_id, + called_user_id, + initial_project_id, + ) .await?; self.room_updated(&room); self.update_user_contacts(called_user_id).await?; @@ -838,7 +815,7 @@ impl Server { let room = self .app_state .db - .update_room_participant_location(room_id, request.sender_user_id, location) + .update_room_participant_location(room_id, request.sender_connection_id, location) .await?; self.room_updated(&room); response.send(proto::Ack {})?; @@ -858,29 +835,6 @@ impl Server { } } - fn room_left( - &self, - room: &proto::Room, - connection_id: ConnectionId, - ) -> impl Future> { - let client = self.app_state.live_kit_client.clone(); - let room_name = room.live_kit_room.clone(); - let participant_count = room.participants.len(); - async move { - if let Some(client) = client { - client - .remove_participant(room_name.clone(), connection_id.to_string()) - .await?; - - if participant_count == 0 { - client.delete_room(room_name).await?; - } - } - - Ok(()) - } - } - async fn share_project( self: Arc, request: Message, diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index de44492409..3896b8f7a4 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -3,7 +3,7 @@ use anyhow::{anyhow, Result}; use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; use rpc::{proto, ConnectionId}; use serde::Serialize; -use std::{borrow::Cow, mem, path::PathBuf, str}; +use std::{mem, path::PathBuf, str}; use tracing::instrument; pub type RoomId = u64; @@ -135,14 +135,6 @@ impl Store { Ok(()) } - pub fn user_id_for_connection(&self, connection_id: ConnectionId) -> Result { - Ok(self - .connections - .get(&connection_id) - .ok_or_else(|| anyhow!("unknown connection"))? - .user_id) - } - pub fn connection_ids_for_user( &self, user_id: UserId, diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index c1daf75823..a93c0b593f 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -158,9 +158,7 @@ message JoinRoomResponse { optional LiveKitConnectionInfo live_kit_connection_info = 2; } -message LeaveRoom { - uint64 id = 1; -} +message LeaveRoom {} message Room { uint64 id = 1; From 0310e27347cc19e45198df270842cab2b668f34b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 10:53:11 +0100 Subject: [PATCH 014/240] Fix query errors in `Db::share_project` --- crates/collab/src/db.rs | 11 +++++++---- crates/collab/src/rpc.rs | 3 ++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index f32bdf96ef..d329bf23e5 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1296,10 +1296,11 @@ where SELECT projects.id, worktrees.root_name FROM projects LEFT JOIN worktrees ON projects.id = worktrees.project_id - WHERE room_id = $1 AND host_user_id = $2 + WHERE room_id = $1 AND host_connection_id = $2 ", ) .bind(room_id) + .bind(participant.peer_id as i32) .fetch(&mut *tx); let mut projects = HashMap::default(); @@ -1341,14 +1342,14 @@ where let mut tx = self.pool.begin().await?; let project_id = sqlx::query_scalar( " - INSERT INTO projects (host_user_id, host_connection_id, room_id) + INSERT INTO projects (room_id, host_user_id, host_connection_id) VALUES ($1, $2, $3) RETURNING id ", ) + .bind(room_id) .bind(user_id) .bind(connection_id.0 as i32) - .bind(room_id) .fetch_one(&mut tx) .await .map(ProjectId)?; @@ -1356,7 +1357,8 @@ where for worktree in worktrees { sqlx::query( " - INSERT INTO worktrees (id, project_id, root_name) + INSERT INTO worktrees (id, project_id, root_name) + VALUES ($1, $2, $3) ", ) .bind(worktree.id as i32) @@ -1387,6 +1389,7 @@ where .await?; let room = self.commit_room_transaction(room_id, tx).await?; + dbg!(&room); Ok((project_id, room)) }) } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index e69393c642..038724c25a 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -849,7 +849,8 @@ impl Server { RoomId::from_proto(request.payload.room_id), &request.payload.worktrees, ) - .await?; + .await + .unwrap(); response.send(proto::ShareProjectResponse { project_id: project_id.to_proto(), })?; From 59e8600e4c43e412f6088eb80dfe4a78f5fb3969 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 11:12:23 +0100 Subject: [PATCH 015/240] Implement `Db::cancel_call` --- crates/collab/src/db.rs | 29 ++++++++++++++++++- crates/collab/src/rpc.rs | 44 ++++++++++++++--------------- crates/collab/src/rpc/store.rs | 51 ---------------------------------- 3 files changed, 50 insertions(+), 74 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index d329bf23e5..50a333bced 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1048,6 +1048,30 @@ where }) } + pub async fn cancel_call( + &self, + room_id: RoomId, + calling_connection_id: ConnectionId, + called_user_id: UserId, + ) -> Result { + test_support!(self, { + let mut tx = self.pool.begin().await?; + sqlx::query( + " + DELETE FROM room_participants + WHERE room_id = $1 AND user_id = $2 AND calling_connection_id = $3 AND answering_connection_id IS NULL + ", + ) + .bind(room_id) + .bind(called_user_id) + .bind(calling_connection_id.0 as i32) + .execute(&mut tx) + .await?; + + self.commit_room_transaction(room_id, tx).await + }) + } + pub async fn join_room( &self, room_id: RoomId, @@ -1073,7 +1097,10 @@ where }) } - pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { + pub async fn leave_room_for_connection( + &self, + connection_id: ConnectionId, + ) -> Result> { test_support!(self, { let mut tx = self.pool.begin().await?; diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 038724c25a..3e519d91ae 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -558,13 +558,13 @@ impl Server { request.sender_connection_id, ) .await?; - for recipient_id in self + for connection_id in self .store() .await .connection_ids_for_user(request.sender_user_id) { self.peer - .send(recipient_id, proto::CallCanceled {}) + .send(connection_id, proto::CallCanceled {}) .trace_err(); } @@ -610,7 +610,7 @@ impl Server { ) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let Some(left_room) = self.app_state.db.leave_room(connection_id).await? else { + let Some(left_room) = self.app_state.db.leave_room_for_connection(connection_id).await? else { return Err(anyhow!("no room to leave"))?; }; contacts_to_update.insert(user_id); @@ -751,7 +751,7 @@ impl Server { self.room_updated(&room); self.update_user_contacts(called_user_id).await?; - Err(anyhow!("failed to ring call recipient"))? + Err(anyhow!("failed to ring user"))? } async fn cancel_call( @@ -759,23 +759,23 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let recipient_user_id = UserId::from_proto(request.payload.called_user_id); - { - let mut store = self.store().await; - let (room, recipient_connection_ids) = store.cancel_call( - request.payload.room_id, - recipient_user_id, - request.sender_connection_id, - )?; - for recipient_id in recipient_connection_ids { - self.peer - .send(recipient_id, proto::CallCanceled {}) - .trace_err(); - } - self.room_updated(room); - response.send(proto::Ack {})?; + let called_user_id = UserId::from_proto(request.payload.called_user_id); + let room_id = RoomId::from_proto(request.payload.room_id); + + let room = self + .app_state + .db + .cancel_call(room_id, request.sender_connection_id, called_user_id) + .await?; + for connection_id in self.store().await.connection_ids_for_user(called_user_id) { + self.peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); } - self.update_user_contacts(recipient_user_id).await?; + self.room_updated(&room); + response.send(proto::Ack {})?; + + self.update_user_contacts(called_user_id).await?; Ok(()) } @@ -788,13 +788,13 @@ impl Server { message.sender_user_id, ) .await?; - for recipient_id in self + for connection_id in self .store() .await .connection_ids_for_user(message.sender_user_id) { self.peer - .send(recipient_id, proto::CallCanceled {}) + .send(connection_id, proto::CallCanceled {}) .trace_err(); } self.room_updated(&room); diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 3896b8f7a4..a9793e9fb6 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -211,57 +211,6 @@ impl Store { &self.rooms } - pub fn cancel_call( - &mut self, - room_id: RoomId, - called_user_id: UserId, - canceller_connection_id: ConnectionId, - ) -> Result<(&proto::Room, HashSet)> { - todo!() - // let canceller_user_id = self.user_id_for_connection(canceller_connection_id)?; - // let canceller = self - // .connected_users - // .get(&canceller_user_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // let recipient = self - // .connected_users - // .get(&called_user_id) - // .ok_or_else(|| anyhow!("no such connection"))?; - // let canceller_active_call = canceller - // .active_call - // .as_ref() - // .ok_or_else(|| anyhow!("no active call"))?; - // let recipient_active_call = recipient - // .active_call - // .as_ref() - // .ok_or_else(|| anyhow!("no active call for recipient"))?; - - // anyhow::ensure!( - // canceller_active_call.room_id == room_id, - // "users are on different calls" - // ); - // anyhow::ensure!( - // recipient_active_call.room_id == room_id, - // "users are on different calls" - // ); - // anyhow::ensure!( - // recipient_active_call.connection_id.is_none(), - // "recipient has already answered" - // ); - // let room_id = recipient_active_call.room_id; - // let room = self - // .rooms - // .get_mut(&room_id) - // .ok_or_else(|| anyhow!("no such room"))?; - // room.pending_participant_user_ids - // .retain(|user_id| UserId::from_proto(*user_id) != called_user_id); - - // let recipient = self.connected_users.get_mut(&called_user_id).unwrap(); - // recipient.active_call.take(); - - // Ok((room, recipient.connection_ids.clone())) - } - pub fn unshare_project( &mut self, project_id: ProjectId, From 65c5adff058c757142bcd8041806015b08d114a3 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 11:32:26 +0100 Subject: [PATCH 016/240] Automatically decline call when user drops their last connection --- crates/collab/src/db.rs | 30 ++++++++++++++++++++---------- crates/collab/src/rpc.rs | 33 +++++++++++++++++++++++++-------- 2 files changed, 45 insertions(+), 18 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 50a333bced..39bc2775a0 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1030,19 +1030,26 @@ where }) } - pub async fn decline_call(&self, room_id: RoomId, user_id: UserId) -> Result { + pub async fn decline_call( + &self, + expected_room_id: Option, + user_id: UserId, + ) -> Result { test_support!(self, { let mut tx = self.pool.begin().await?; - sqlx::query( + let room_id = sqlx::query_scalar( " DELETE FROM room_participants - WHERE room_id = $1 AND user_id = $2 AND answering_connection_id IS NULL + WHERE user_id = $1 AND answering_connection_id IS NULL + RETURNING room_id ", ) - .bind(room_id) .bind(user_id) - .execute(&mut tx) + .fetch_one(&mut tx) .await?; + if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + return Err(anyhow!("declining call on unexpected room"))?; + } self.commit_room_transaction(room_id, tx).await }) @@ -1050,23 +1057,26 @@ where pub async fn cancel_call( &self, - room_id: RoomId, + expected_room_id: Option, calling_connection_id: ConnectionId, called_user_id: UserId, ) -> Result { test_support!(self, { let mut tx = self.pool.begin().await?; - sqlx::query( + let room_id = sqlx::query_scalar( " DELETE FROM room_participants - WHERE room_id = $1 AND user_id = $2 AND calling_connection_id = $3 AND answering_connection_id IS NULL + WHERE user_id = $1 AND calling_connection_id = $2 AND answering_connection_id IS NULL + RETURNING room_id ", ) - .bind(room_id) .bind(called_user_id) .bind(calling_connection_id.0 as i32) - .execute(&mut tx) + .fetch_one(&mut tx) .await?; + if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + return Err(anyhow!("canceling call on unexpected room"))?; + } self.commit_room_transaction(room_id, tx).await }) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 3e519d91ae..d9c8c616f3 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -430,9 +430,29 @@ impl Server { user_id: UserId, ) -> Result<()> { self.peer.disconnect(connection_id); - self.store().await.remove_connection(connection_id)?; + let decline_calls = { + let mut store = self.store().await; + store.remove_connection(connection_id)?; + let mut connections = store.connection_ids_for_user(user_id); + connections.next().is_none() + }; + self.leave_room_for_connection(connection_id, user_id) - .await?; + .await + .trace_err(); + if decline_calls { + if let Some(room) = self + .app_state + .db + .decline_call(None, user_id) + .await + .trace_err() + { + self.room_updated(&room); + } + } + + self.update_user_contacts(user_id).await?; Ok(()) } @@ -761,11 +781,10 @@ impl Server { ) -> Result<()> { let called_user_id = UserId::from_proto(request.payload.called_user_id); let room_id = RoomId::from_proto(request.payload.room_id); - let room = self .app_state .db - .cancel_call(room_id, request.sender_connection_id, called_user_id) + .cancel_call(Some(room_id), request.sender_connection_id, called_user_id) .await?; for connection_id in self.store().await.connection_ids_for_user(called_user_id) { self.peer @@ -780,13 +799,11 @@ impl Server { } async fn decline_call(self: Arc, message: Message) -> Result<()> { + let room_id = RoomId::from_proto(message.payload.room_id); let room = self .app_state .db - .decline_call( - RoomId::from_proto(message.payload.room_id), - message.sender_user_id, - ) + .decline_call(Some(room_id), message.sender_user_id) .await?; for connection_id in self .store() From 40073f6100acff471c903c011695117f9751a3d1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 15:32:49 +0100 Subject: [PATCH 017/240] Wait for acknowledgment before sending the next project update --- crates/call/src/room.rs | 2 + crates/collab/src/db.rs | 16 +- crates/collab/src/integration_tests.rs | 12 +- crates/collab/src/rpc.rs | 6 +- crates/collab_ui/src/collab_ui.rs | 1 - crates/project/src/project.rs | 218 ++++++++----------------- crates/project/src/worktree.rs | 19 --- crates/rpc/proto/zed.proto | 7 +- crates/rpc/src/proto.rs | 9 +- crates/workspace/src/workspace.rs | 8 +- crates/zed/src/main.rs | 4 +- 11 files changed, 94 insertions(+), 208 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 0ecd6082d6..c1b0dc191d 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -287,6 +287,8 @@ impl Room { mut room: proto::Room, cx: &mut ModelContext, ) -> Result<()> { + // TODO: honor room version. + // Filter ourselves out from the room's participants. let local_participant_ix = room .participants diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 39bc2775a0..a12985b94b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1145,8 +1145,8 @@ where FROM projects, project_collaborators WHERE projects.room_id = $1 AND - projects.host_connection_id = $2 AND - projects.id = project_collaborators.project_id + projects.id = project_collaborators.project_id AND + project_collaborators.connection_id = $2 ", ) .bind(room_id) @@ -1370,9 +1370,9 @@ where pub async fn share_project( &self, + room_id: RoomId, user_id: UserId, connection_id: ConnectionId, - room_id: RoomId, worktrees: &[proto::WorktreeMetadata], ) -> Result<(ProjectId, proto::Room)> { test_support!(self, { @@ -1426,11 +1426,19 @@ where .await?; let room = self.commit_room_transaction(room_id, tx).await?; - dbg!(&room); Ok((project_id, room)) }) } + // pub async fn join_project( + // &self, + // user_id: UserId, + // connection_id: ConnectionId, + // project_id: ProjectId, + // ) -> Result<(Project, ReplicaId)> { + // todo!() + // } + pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { todo!() // test_support!(self, { diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3a4c2368e8..b54f03ce53 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -30,9 +30,7 @@ use language::{ use live_kit_client::MacOSDisplay; use lsp::{self, FakeLanguageServer}; use parking_lot::Mutex; -use project::{ - search::SearchQuery, DiagnosticSummary, Project, ProjectPath, ProjectStore, WorktreeId, -}; +use project::{search::SearchQuery, DiagnosticSummary, Project, ProjectPath, WorktreeId}; use rand::prelude::*; use serde_json::json; use settings::{Formatter, Settings}; @@ -2280,7 +2278,6 @@ async fn test_leaving_project( project_id, client_b.client.clone(), client_b.user_store.clone(), - client_b.project_store.clone(), client_b.language_registry.clone(), FakeFs::new(cx.background()), cx, @@ -5792,11 +5789,9 @@ impl TestServer { let fs = FakeFs::new(cx.background()); let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); - let project_store = cx.add_model(|_| ProjectStore::new()); let app_state = Arc::new(workspace::AppState { client: client.clone(), user_store: user_store.clone(), - project_store: project_store.clone(), languages: Arc::new(LanguageRegistry::new(Task::ready(()))), themes: ThemeRegistry::new((), cx.font_cache()), fs: fs.clone(), @@ -5823,7 +5818,6 @@ impl TestServer { remote_projects: Default::default(), next_root_dir_id: 0, user_store, - project_store, fs, language_registry: Arc::new(LanguageRegistry::test()), buffers: Default::default(), @@ -5929,7 +5923,6 @@ struct TestClient { remote_projects: Vec>, next_root_dir_id: usize, pub user_store: ModelHandle, - pub project_store: ModelHandle, language_registry: Arc, fs: Arc, buffers: HashMap, HashSet>>, @@ -5999,7 +5992,6 @@ impl TestClient { Project::local( self.client.clone(), self.user_store.clone(), - self.project_store.clone(), self.language_registry.clone(), self.fs.clone(), cx, @@ -6027,7 +6019,6 @@ impl TestClient { host_project_id, self.client.clone(), self.user_store.clone(), - self.project_store.clone(), self.language_registry.clone(), FakeFs::new(cx.background()), cx, @@ -6157,7 +6148,6 @@ impl TestClient { remote_project_id, client.client.clone(), client.user_store.clone(), - client.project_store.clone(), client.language_registry.clone(), FakeFs::new(cx.background()), cx.to_async(), diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d9c8c616f3..bed6ebf9cd 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -151,7 +151,7 @@ impl Server { .add_message_handler(Server::unshare_project) .add_request_handler(Server::join_project) .add_message_handler(Server::leave_project) - .add_message_handler(Server::update_project) + .add_request_handler(Server::update_project) .add_request_handler(Server::update_worktree) .add_message_handler(Server::start_language_server) .add_message_handler(Server::update_language_server) @@ -861,9 +861,9 @@ impl Server { .app_state .db .share_project( + RoomId::from_proto(request.payload.room_id), request.sender_user_id, request.sender_connection_id, - RoomId::from_proto(request.payload.room_id), &request.payload.worktrees, ) .await @@ -1084,6 +1084,7 @@ impl Server { async fn update_project( self: Arc, request: Message, + response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); { @@ -1108,6 +1109,7 @@ impl Server { }, ); self.room_updated(room); + response.send(proto::Ack {})?; }; Ok(()) diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index f5f508ce5b..dc8a171698 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -43,7 +43,6 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { project_id, app_state.client.clone(), app_state.user_store.clone(), - app_state.project_store.clone(), app_state.languages.clone(), app_state.fs.clone(), cx.clone(), diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 3c28f6b512..d01571f44b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -70,10 +70,6 @@ pub trait Item: Entity { fn entry_id(&self, cx: &AppContext) -> Option; } -pub struct ProjectStore { - projects: Vec>, -} - // Language server state is stored across 3 collections: // language_servers => // a mapping from unique server id to LanguageServerState which can either be a task for a @@ -102,7 +98,6 @@ pub struct Project { next_entry_id: Arc, next_diagnostic_group_id: usize, user_store: ModelHandle, - project_store: ModelHandle, fs: Arc, client_state: Option, collaborators: HashMap, @@ -152,6 +147,8 @@ enum WorktreeHandle { enum ProjectClientState { Local { remote_id: u64, + metadata_changed: watch::Sender<()>, + _maintain_metadata: Task<()>, _detect_unshare: Task>, }, Remote { @@ -376,7 +373,7 @@ impl Project { client.add_model_message_handler(Self::handle_start_language_server); client.add_model_message_handler(Self::handle_update_language_server); client.add_model_message_handler(Self::handle_remove_collaborator); - client.add_model_message_handler(Self::handle_update_project); + client.add_model_message_handler(Self::handle_project_updated); client.add_model_message_handler(Self::handle_unshare_project); client.add_model_message_handler(Self::handle_create_buffer_for_peer); client.add_model_message_handler(Self::handle_update_buffer_file); @@ -412,46 +409,39 @@ impl Project { pub fn local( client: Arc, user_store: ModelHandle, - project_store: ModelHandle, languages: Arc, fs: Arc, cx: &mut MutableAppContext, ) -> ModelHandle { - cx.add_model(|cx: &mut ModelContext| { - let handle = cx.weak_handle(); - project_store.update(cx, |store, cx| store.add_project(handle, cx)); - - Self { - worktrees: Default::default(), - collaborators: Default::default(), - opened_buffers: Default::default(), - shared_buffers: Default::default(), - incomplete_buffers: Default::default(), - loading_buffers: Default::default(), - loading_local_worktrees: Default::default(), - buffer_snapshots: Default::default(), - client_state: None, - opened_buffer: watch::channel(), - client_subscriptions: Vec::new(), - _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], - _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), - active_entry: None, - languages, - client, - user_store, - project_store, - fs, - next_entry_id: Default::default(), - next_diagnostic_group_id: Default::default(), - language_servers: Default::default(), - language_server_ids: Default::default(), - language_server_statuses: Default::default(), - last_workspace_edits_by_language_server: Default::default(), - language_server_settings: Default::default(), - buffers_being_formatted: Default::default(), - next_language_server_id: 0, - nonce: StdRng::from_entropy().gen(), - } + cx.add_model(|cx: &mut ModelContext| Self { + worktrees: Default::default(), + collaborators: Default::default(), + opened_buffers: Default::default(), + shared_buffers: Default::default(), + incomplete_buffers: Default::default(), + loading_buffers: Default::default(), + loading_local_worktrees: Default::default(), + buffer_snapshots: Default::default(), + client_state: None, + opened_buffer: watch::channel(), + client_subscriptions: Vec::new(), + _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], + _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), + active_entry: None, + languages, + client, + user_store, + fs, + next_entry_id: Default::default(), + next_diagnostic_group_id: Default::default(), + language_servers: Default::default(), + language_server_ids: Default::default(), + language_server_statuses: Default::default(), + last_workspace_edits_by_language_server: Default::default(), + language_server_settings: Default::default(), + buffers_being_formatted: Default::default(), + next_language_server_id: 0, + nonce: StdRng::from_entropy().gen(), }) } @@ -459,7 +449,6 @@ impl Project { remote_id: u64, client: Arc, user_store: ModelHandle, - project_store: ModelHandle, languages: Arc, fs: Arc, mut cx: AsyncAppContext, @@ -482,9 +471,6 @@ impl Project { } let this = cx.add_model(|cx: &mut ModelContext| { - let handle = cx.weak_handle(); - project_store.update(cx, |store, cx| store.add_project(handle, cx)); - let mut this = Self { worktrees: Vec::new(), loading_buffers: Default::default(), @@ -497,7 +483,6 @@ impl Project { _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), languages, user_store: user_store.clone(), - project_store, fs, next_entry_id: Default::default(), next_diagnostic_group_id: Default::default(), @@ -593,9 +578,7 @@ impl Project { let http_client = client::test::FakeHttpClient::with_404_response(); let client = cx.update(|cx| client::Client::new(http_client.clone(), cx)); let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); - let project_store = cx.add_model(|_| ProjectStore::new()); - let project = - cx.update(|cx| Project::local(client, user_store, project_store, languages, fs, cx)); + let project = cx.update(|cx| Project::local(client, user_store, languages, fs, cx)); for path in root_paths { let (tree, _) = project .update(cx, |project, cx| { @@ -676,10 +659,6 @@ impl Project { self.user_store.clone() } - pub fn project_store(&self) -> ModelHandle { - self.project_store.clone() - } - #[cfg(any(test, feature = "test-support"))] pub fn check_invariants(&self, cx: &AppContext) { if self.is_local() { @@ -752,51 +731,12 @@ impl Project { } fn metadata_changed(&mut self, cx: &mut ModelContext) { - if let Some(ProjectClientState::Local { remote_id, .. }) = &self.client_state { - let project_id = *remote_id; - // Broadcast worktrees only if the project is online. - let worktrees = self - .worktrees - .iter() - .filter_map(|worktree| { - worktree - .upgrade(cx) - .map(|worktree| worktree.read(cx).as_local().unwrap().metadata_proto()) - }) - .collect(); - self.client - .send(proto::UpdateProject { - project_id, - worktrees, - }) - .log_err(); - - let worktrees = self.visible_worktrees(cx).collect::>(); - let scans_complete = futures::future::join_all( - worktrees - .iter() - .filter_map(|worktree| Some(worktree.read(cx).as_local()?.scan_complete())), - ); - - let worktrees = worktrees.into_iter().map(|handle| handle.downgrade()); - - cx.spawn_weak(move |_, cx| async move { - scans_complete.await; - cx.read(|cx| { - for worktree in worktrees { - if let Some(worktree) = worktree - .upgrade(cx) - .and_then(|worktree| worktree.read(cx).as_local()) - { - worktree.send_extension_counts(project_id); - } - } - }) - }) - .detach(); + if let Some(ProjectClientState::Local { + metadata_changed, .. + }) = &mut self.client_state + { + *metadata_changed.borrow_mut() = (); } - - self.project_store.update(cx, |_, cx| cx.notify()); cx.notify(); } @@ -1092,8 +1032,32 @@ impl Project { cx.notify(); let mut status = self.client.status(); + let (metadata_changed_tx, mut metadata_changed_rx) = watch::channel(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, + metadata_changed: metadata_changed_tx, + _maintain_metadata: cx.spawn_weak(move |this, cx| async move { + while let Some(()) = metadata_changed_rx.next().await { + let Some(this) = this.upgrade(&cx) else { break }; + this.read_with(&cx, |this, cx| { + let worktrees = this + .worktrees + .iter() + .filter_map(|worktree| { + worktree.upgrade(cx).map(|worktree| { + worktree.read(cx).as_local().unwrap().metadata_proto() + }) + }) + .collect(); + this.client.request(proto::UpdateProject { + project_id, + worktrees, + }) + }) + .await + .log_err(); + } + }), _detect_unshare: cx.spawn_weak(move |this, mut cx| { async move { let is_connected = status.next().await.map_or(false, |s| s.is_connected()); @@ -1632,10 +1596,6 @@ impl Project { operations: vec![language::proto::serialize_operation(operation)], }); cx.background().spawn(request).detach_and_log_err(cx); - } else if let Some(project_id) = self.remote_id() { - let _ = self - .client - .send(proto::RegisterProjectActivity { project_id }); } } BufferEvent::Edited { .. } => { @@ -4573,9 +4533,9 @@ impl Project { }) } - async fn handle_update_project( + async fn handle_project_updated( this: ModelHandle, - envelope: TypedEnvelope, + envelope: TypedEnvelope, client: Arc, mut cx: AsyncAppContext, ) -> Result<()> { @@ -5832,48 +5792,6 @@ impl Project { } } -impl ProjectStore { - pub fn new() -> Self { - Self { - projects: Default::default(), - } - } - - pub fn projects<'a>( - &'a self, - cx: &'a AppContext, - ) -> impl 'a + Iterator> { - self.projects - .iter() - .filter_map(|project| project.upgrade(cx)) - } - - fn add_project(&mut self, project: WeakModelHandle, cx: &mut ModelContext) { - if let Err(ix) = self - .projects - .binary_search_by_key(&project.id(), WeakModelHandle::id) - { - self.projects.insert(ix, project); - } - cx.notify(); - } - - fn prune_projects(&mut self, cx: &mut ModelContext) { - let mut did_change = false; - self.projects.retain(|project| { - if project.is_upgradable(cx) { - true - } else { - did_change = true; - false - } - }); - if did_change { - cx.notify(); - } - } -} - impl WorktreeHandle { pub fn upgrade(&self, cx: &AppContext) -> Option> { match self { @@ -5952,16 +5870,10 @@ impl<'a> Iterator for PathMatchCandidateSetIter<'a> { } } -impl Entity for ProjectStore { - type Event = (); -} - impl Entity for Project { type Event = Event; - fn release(&mut self, cx: &mut gpui::MutableAppContext) { - self.project_store.update(cx, ProjectStore::prune_projects); - + fn release(&mut self, _: &mut gpui::MutableAppContext) { match &self.client_state { Some(ProjectClientState::Local { remote_id, .. }) => { self.client diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index db8fb8e3ff..9e4ec3ffb9 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1051,25 +1051,6 @@ impl LocalWorktree { pub fn is_shared(&self) -> bool { self.share.is_some() } - - pub fn send_extension_counts(&self, project_id: u64) { - let mut extensions = Vec::new(); - let mut counts = Vec::new(); - - for (extension, count) in self.extension_counts() { - extensions.push(extension.to_string_lossy().to_string()); - counts.push(*count as u32); - } - - self.client - .send(proto::UpdateWorktreeExtensions { - project_id, - worktree_id: self.id().to_proto(), - extensions, - counts, - }) - .log_err(); - } } impl RemoteWorktree { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index a93c0b593f..94880ce9f5 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -48,9 +48,8 @@ message Envelope { OpenBufferForSymbolResponse open_buffer_for_symbol_response = 40; UpdateProject update_project = 41; - RegisterProjectActivity register_project_activity = 42; + ProjectUpdated project_updated = 42; UpdateWorktree update_worktree = 43; - UpdateWorktreeExtensions update_worktree_extensions = 44; CreateProjectEntry create_project_entry = 45; RenameProjectEntry rename_project_entry = 46; @@ -258,8 +257,10 @@ message UpdateProject { repeated WorktreeMetadata worktrees = 2; } -message RegisterProjectActivity { +message ProjectUpdated { uint64 project_id = 1; + repeated WorktreeMetadata worktrees = 2; + uint64 room_version = 3; } message JoinProject { diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 11bbaaf5ff..31f53564a8 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -140,12 +140,12 @@ messages!( (OpenBufferResponse, Background), (PerformRename, Background), (PerformRenameResponse, Background), + (Ping, Foreground), (PrepareRename, Background), (PrepareRenameResponse, Background), (ProjectEntryResponse, Foreground), + (ProjectUpdated, Foreground), (RemoveContact, Foreground), - (Ping, Foreground), - (RegisterProjectActivity, Foreground), (ReloadBuffers, Foreground), (ReloadBuffersResponse, Foreground), (RemoveProjectCollaborator, Foreground), @@ -175,7 +175,6 @@ messages!( (UpdateParticipantLocation, Foreground), (UpdateProject, Foreground), (UpdateWorktree, Foreground), - (UpdateWorktreeExtensions, Background), (UpdateDiffBase, Background), (GetPrivateUserInfo, Foreground), (GetPrivateUserInfoResponse, Foreground), @@ -231,6 +230,7 @@ request_messages!( (Test, Test), (UpdateBuffer, Ack), (UpdateParticipantLocation, Ack), + (UpdateProject, Ack), (UpdateWorktree, Ack), ); @@ -261,8 +261,8 @@ entity_messages!( OpenBufferByPath, OpenBufferForSymbol, PerformRename, + ProjectUpdated, PrepareRename, - RegisterProjectActivity, ReloadBuffers, RemoveProjectCollaborator, RenameProjectEntry, @@ -278,7 +278,6 @@ entity_messages!( UpdateLanguageServer, UpdateProject, UpdateWorktree, - UpdateWorktreeExtensions, UpdateDiffBase ); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 2dbf923484..9db524ee9b 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -33,7 +33,7 @@ use log::{error, warn}; pub use pane::*; pub use pane_group::*; use postage::prelude::Stream; -use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; +use project::{Project, ProjectEntryId, ProjectPath, Worktree, WorktreeId}; use searchable::SearchableItemHandle; use serde::Deserialize; use settings::{Autosave, DockAnchor, Settings}; @@ -337,7 +337,6 @@ pub struct AppState { pub themes: Arc, pub client: Arc, pub user_store: ModelHandle, - pub project_store: ModelHandle, pub fs: Arc, pub build_window_options: fn() -> WindowOptions<'static>, pub initialize_workspace: fn(&mut Workspace, &Arc, &mut ViewContext), @@ -1039,7 +1038,6 @@ impl AppState { let languages = Arc::new(LanguageRegistry::test()); let http_client = client::test::FakeHttpClient::with_404_response(); let client = Client::new(http_client.clone(), cx); - let project_store = cx.add_model(|_| ProjectStore::new()); let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); let themes = ThemeRegistry::new((), cx.font_cache().clone()); Arc::new(Self { @@ -1048,7 +1046,6 @@ impl AppState { fs, languages, user_store, - project_store, initialize_workspace: |_, _, _| {}, build_window_options: Default::default, default_item_factory: |_, _| unimplemented!(), @@ -1301,7 +1298,6 @@ impl Workspace { Project::local( app_state.client.clone(), app_state.user_store.clone(), - app_state.project_store.clone(), app_state.languages.clone(), app_state.fs.clone(), cx, @@ -2965,7 +2961,6 @@ pub fn open_paths( let project = Project::local( app_state.client.clone(), app_state.user_store.clone(), - app_state.project_store.clone(), app_state.languages.clone(), app_state.fs.clone(), cx, @@ -2997,7 +2992,6 @@ fn open_new(app_state: &Arc, cx: &mut MutableAppContext) { Project::local( app_state.client.clone(), app_state.user_store.clone(), - app_state.project_store.clone(), app_state.languages.clone(), app_state.fs.clone(), cx, diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index e849632a2d..5a7ee2dbae 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -23,7 +23,7 @@ use isahc::{config::Configurable, Request}; use language::LanguageRegistry; use log::LevelFilter; use parking_lot::Mutex; -use project::{Fs, HomeDir, ProjectStore}; +use project::{Fs, HomeDir}; use serde_json::json; use settings::{ self, settings_file::SettingsFile, KeymapFileContent, Settings, SettingsFileContent, @@ -146,7 +146,6 @@ fn main() { }) .detach(); - let project_store = cx.add_model(|_| ProjectStore::new()); let db = cx.background().block(db); client.start_telemetry(db.clone()); client.report_event("start app", Default::default()); @@ -156,7 +155,6 @@ fn main() { themes, client: client.clone(), user_store, - project_store, fs, build_window_options, initialize_workspace, From d7369ace6a2e911464c9d2099258203823934586 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 15:35:39 +0100 Subject: [PATCH 018/240] Skip applying room updates if they're older than the local room state --- crates/call/src/room.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index c1b0dc191d..4ba8d8effc 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -34,6 +34,7 @@ pub enum Event { pub struct Room { id: u64, + version: u64, live_kit: Option, status: RoomStatus, local_participant: LocalParticipant, @@ -61,6 +62,7 @@ impl Entity for Room { impl Room { fn new( id: u64, + version: u64, live_kit_connection_info: Option, client: Arc, user_store: ModelHandle, @@ -133,6 +135,7 @@ impl Room { Self { id, + version, live_kit: live_kit_room, status: RoomStatus::Online, participant_user_ids: Default::default(), @@ -161,6 +164,7 @@ impl Room { let room = cx.add_model(|cx| { Self::new( room_proto.id, + room_proto.version, response.live_kit_connection_info, client, user_store, @@ -205,6 +209,7 @@ impl Room { let room = cx.add_model(|cx| { Self::new( room_id, + 0, response.live_kit_connection_info, client, user_store, @@ -287,8 +292,6 @@ impl Room { mut room: proto::Room, cx: &mut ModelContext, ) -> Result<()> { - // TODO: honor room version. - // Filter ourselves out from the room's participants. let local_participant_ix = room .participants @@ -318,6 +321,10 @@ impl Room { futures::join!(remote_participants, pending_participants); this.update(&mut cx, |this, cx| { + if this.version >= room.version { + return; + } + this.participant_user_ids.clear(); if let Some(participant) = local_participant { @@ -422,6 +429,7 @@ impl Room { let _ = this.leave(cx); } + this.version = room.version; this.check_invariants(); cx.notify(); }); From b9af2ae66e31b6caa81de664ceb8d37e552d4599 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 14 Nov 2022 17:16:50 +0100 Subject: [PATCH 019/240] Switch to serializable isolation Co-Authored-By: Nathan Sobo --- .../20221109000000_test_schema.sql | 1 + crates/collab/src/db.rs | 404 ++++++++++++------ crates/collab/src/lib.rs | 8 +- crates/collab/src/rpc.rs | 45 +- 4 files changed, 298 insertions(+), 160 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 2cef514e5a..d262d6a8bd 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -56,6 +56,7 @@ CREATE TABLE "project_collaborators" ( "is_host" BOOLEAN NOT NULL ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id", "replica_id"); CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index a12985b94b..b561ba045d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2,7 +2,7 @@ use crate::{Error, Result}; use anyhow::anyhow; use axum::http::StatusCode; use collections::HashMap; -use futures::StreamExt; +use futures::{future::BoxFuture, FutureExt, StreamExt}; use rpc::{proto, ConnectionId}; use serde::{Deserialize, Serialize}; use sqlx::{ @@ -10,7 +10,7 @@ use sqlx::{ types::Uuid, FromRow, }; -use std::{path::Path, time::Duration}; +use std::{future::Future, path::Path, time::Duration}; use time::{OffsetDateTime, PrimitiveDateTime}; #[cfg(test)] @@ -27,27 +27,34 @@ pub struct Db { runtime: Option, } -macro_rules! test_support { - ($self:ident, { $($token:tt)* }) => {{ - let body = async { - $($token)* - }; +pub trait BeginTransaction: Send + Sync { + type Database: sqlx::Database; - if cfg!(test) { - #[cfg(not(test))] - unreachable!(); + fn begin_transaction(&self) -> BoxFuture>>; +} - #[cfg(test)] - if let Some(background) = $self.background.as_ref() { - background.simulate_random_delay().await; - } +// In Postgres, serializable transactions are opt-in +impl BeginTransaction for Db { + type Database = sqlx::Postgres; - #[cfg(test)] - $self.runtime.as_ref().unwrap().block_on(body) - } else { - body.await + fn begin_transaction(&self) -> BoxFuture>> { + async move { + let mut tx = self.pool.begin().await?; + sqlx::Executor::execute(&mut tx, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;") + .await?; + Ok(tx) } - }}; + .boxed() + } +} + +// In Sqlite, transactions are inherently serializable. +impl BeginTransaction for Db { + type Database = sqlx::Sqlite; + + fn begin_transaction(&self) -> BoxFuture>> { + async move { Ok(self.pool.begin().await?) }.boxed() + } } pub trait RowsAffected { @@ -88,7 +95,8 @@ impl Db { } pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let query = " SELECT users.* FROM users @@ -96,13 +104,14 @@ impl Db { "; Ok(sqlx::query_as(query) .bind(&serde_json::json!(ids)) - .fetch_all(&self.pool) + .fetch_all(&mut tx) .await?) }) + .await } pub async fn get_user_metrics_id(&self, id: UserId) -> Result { - test_support!(self, { + self.transact(|mut tx| async move { let query = " SELECT metrics_id FROM users @@ -110,9 +119,10 @@ impl Db { "; Ok(sqlx::query_scalar(query) .bind(id) - .fetch_one(&self.pool) + .fetch_one(&mut tx) .await?) }) + .await } pub async fn create_user( @@ -121,7 +131,7 @@ impl Db { admin: bool, params: NewUserParams, ) -> Result { - test_support!(self, { + self.transact(|mut tx| async { let query = " INSERT INTO users (email_address, github_login, github_user_id, admin, metrics_id) VALUES ($1, $2, $3, $4, $5) @@ -131,12 +141,13 @@ impl Db { let (user_id, metrics_id): (UserId, String) = sqlx::query_as(query) .bind(email_address) - .bind(params.github_login) - .bind(params.github_user_id) + .bind(¶ms.github_login) + .bind(¶ms.github_user_id) .bind(admin) .bind(Uuid::new_v4().to_string()) - .fetch_one(&self.pool) + .fetch_one(&mut tx) .await?; + tx.commit().await?; Ok(NewUserResult { user_id, metrics_id, @@ -144,6 +155,7 @@ impl Db { inviting_user_id: None, }) }) + .await } pub async fn fuzzy_search_users(&self, _name_query: &str, _limit: u32) -> Result> { @@ -209,7 +221,8 @@ impl Db { } pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let like_string = Self::fuzzy_like_string(name_query); let query = " SELECT users.* @@ -222,27 +235,28 @@ impl Db { .bind(like_string) .bind(name_query) .bind(limit as i32) - .fetch_all(&self.pool) + .fetch_all(&mut tx) .await?) }) + .await } pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - test_support!(self, { + let ids = ids.iter().map(|id| id.0).collect::>(); + self.transact(|tx| async { + let mut tx = tx; let query = " SELECT users.* FROM users WHERE users.id = ANY ($1) "; - Ok(sqlx::query_as(query) - .bind(&ids.into_iter().map(|id| id.0).collect::>()) - .fetch_all(&self.pool) - .await?) + Ok(sqlx::query_as(query).bind(&ids).fetch_all(&mut tx).await?) }) + .await } pub async fn get_user_metrics_id(&self, id: UserId) -> Result { - test_support!(self, { + self.transact(|mut tx| async move { let query = " SELECT metrics_id::text FROM users @@ -250,9 +264,10 @@ impl Db { "; Ok(sqlx::query_scalar(query) .bind(id) - .fetch_one(&self.pool) + .fetch_one(&mut tx) .await?) }) + .await } pub async fn create_user( @@ -261,7 +276,7 @@ impl Db { admin: bool, params: NewUserParams, ) -> Result { - test_support!(self, { + self.transact(|mut tx| async { let query = " INSERT INTO users (email_address, github_login, github_user_id, admin) VALUES ($1, $2, $3, $4) @@ -271,11 +286,13 @@ impl Db { let (user_id, metrics_id): (UserId, String) = sqlx::query_as(query) .bind(email_address) - .bind(params.github_login) + .bind(¶ms.github_login) .bind(params.github_user_id) .bind(admin) - .fetch_one(&self.pool) + .fetch_one(&mut tx) .await?; + tx.commit().await?; + Ok(NewUserResult { user_id, metrics_id, @@ -283,6 +300,7 @@ impl Db { inviting_user_id: None, }) }) + .await } pub async fn create_user_from_invite( @@ -290,9 +308,7 @@ impl Db { invite: &Invite, user: NewUserParams, ) -> Result> { - test_support!(self, { - let mut tx = self.pool.begin().await?; - + self.transact(|mut tx| async { let (signup_id, existing_user_id, inviting_user_id, signup_device_id): ( i32, Option, @@ -393,10 +409,11 @@ impl Db { signup_device_id, })) }) + .await } pub async fn create_signup(&self, signup: Signup) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async { sqlx::query( " INSERT INTO signups @@ -425,10 +442,12 @@ impl Db { .bind(&signup.editor_features) .bind(&signup.programming_languages) .bind(&signup.device_id) - .execute(&self.pool) + .execute(&mut tx) .await?; + tx.commit().await?; Ok(()) }) + .await } pub async fn create_invite_from_code( @@ -437,9 +456,7 @@ impl Db { email_address: &str, device_id: Option<&str>, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; - + self.transact(|mut tx| async { let existing_user: Option = sqlx::query_scalar( " SELECT id @@ -516,10 +533,11 @@ impl Db { email_confirmation_code, }) }) + .await } pub async fn record_sent_invites(&self, invites: &[Invite]) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async { let emails = invites .iter() .map(|s| s.email_address.as_str()) @@ -532,15 +550,18 @@ impl Db { ", ) .bind(&emails) - .execute(&self.pool) + .execute(&mut tx) .await?; + tx.commit().await?; Ok(()) }) + .await } } impl Db where + Self: BeginTransaction, D: sqlx::Database + sqlx::migrate::MigrateDatabase, D::Connection: sqlx::migrate::Migrate, for<'a> >::Arguments: sqlx::IntoArguments<'a, D>, @@ -627,18 +648,21 @@ where // users pub async fn get_all_users(&self, page: u32, limit: u32) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let query = "SELECT * FROM users ORDER BY github_login ASC LIMIT $1 OFFSET $2"; Ok(sqlx::query_as(query) .bind(limit as i32) .bind((page * limit) as i32) - .fetch_all(&self.pool) + .fetch_all(&mut tx) .await?) }) + .await } pub async fn get_user_by_id(&self, id: UserId) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let query = " SELECT users.* FROM users @@ -647,16 +671,18 @@ where "; Ok(sqlx::query_as(query) .bind(&id) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await?) }) + .await } pub async fn get_users_with_no_invites( &self, invited_by_another_user: bool, ) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let query = format!( " SELECT users.* @@ -667,8 +693,9 @@ where if invited_by_another_user { " NOT" } else { "" } ); - Ok(sqlx::query_as(&query).fetch_all(&self.pool).await?) + Ok(sqlx::query_as(&query).fetch_all(&mut tx).await?) }) + .await } pub async fn get_user_by_github_account( @@ -676,7 +703,8 @@ where github_login: &str, github_user_id: Option, ) -> Result> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; if let Some(github_user_id) = github_user_id { let mut user = sqlx::query_as::<_, User>( " @@ -688,7 +716,7 @@ where ) .bind(github_login) .bind(github_user_id) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await?; if user.is_none() { @@ -702,7 +730,7 @@ where ) .bind(github_user_id) .bind(github_login) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await?; } @@ -716,58 +744,62 @@ where ", ) .bind(github_login) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await?; Ok(user) } }) + .await } pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async { let query = "UPDATE users SET admin = $1 WHERE id = $2"; - Ok(sqlx::query(query) + sqlx::query(query) .bind(is_admin) .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?) + .execute(&mut tx) + .await?; + tx.commit().await?; + Ok(()) }) + .await } pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let query = "UPDATE users SET connected_once = $1 WHERE id = $2"; - Ok(sqlx::query(query) + sqlx::query(query) .bind(connected_once) .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?) + .execute(&mut tx) + .await?; + tx.commit().await?; + Ok(()) }) + .await } pub async fn destroy_user(&self, id: UserId) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let query = "DELETE FROM access_tokens WHERE user_id = $1;"; sqlx::query(query) .bind(id.0) - .execute(&self.pool) + .execute(&mut tx) .await .map(drop)?; let query = "DELETE FROM users WHERE id = $1;"; - Ok(sqlx::query(query) - .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?) + sqlx::query(query).bind(id.0).execute(&mut tx).await?; + tx.commit().await?; + Ok(()) }) + .await } // signups pub async fn get_waitlist_summary(&self) -> Result { - test_support!(self, { + self.transact(|mut tx| async move { Ok(sqlx::query_as( " SELECT @@ -784,13 +816,14 @@ where ) AS unsent ", ) - .fetch_one(&self.pool) + .fetch_one(&mut tx) .await?) }) + .await } pub async fn get_unsent_invites(&self, count: usize) -> Result> { - test_support!(self, { + self.transact(|mut tx| async move { Ok(sqlx::query_as( " SELECT @@ -803,16 +836,16 @@ where ", ) .bind(count as i32) - .fetch_all(&self.pool) + .fetch_all(&mut tx) .await?) }) + .await } // invite codes pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { if count > 0 { sqlx::query( " @@ -841,10 +874,11 @@ where tx.commit().await?; Ok(()) }) + .await } pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { - test_support!(self, { + self.transact(|mut tx| async move { let result: Option<(String, i32)> = sqlx::query_as( " SELECT invite_code, invite_count @@ -853,7 +887,7 @@ where ", ) .bind(id) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await?; if let Some((code, count)) = result { Ok(Some((code, count.try_into().map_err(anyhow::Error::new)?))) @@ -861,10 +895,12 @@ where Ok(None) } }) + .await } pub async fn get_user_for_invite_code(&self, code: &str) -> Result { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; sqlx::query_as( " SELECT * @@ -873,7 +909,7 @@ where ", ) .bind(code) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await? .ok_or_else(|| { Error::Http( @@ -882,6 +918,7 @@ where ) }) }) + .await } pub async fn create_room( @@ -889,8 +926,7 @@ where user_id: UserId, connection_id: ConnectionId, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { let live_kit_room = nanoid::nanoid!(30); let room_id = sqlx::query_scalar( " @@ -920,7 +956,7 @@ where .await?; self.commit_room_transaction(room_id, tx).await - }) + }).await } pub async fn call( @@ -931,8 +967,7 @@ where called_user_id: UserId, initial_project_id: Option, ) -> Result<(proto::Room, proto::IncomingCall)> { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { sqlx::query( " INSERT INTO room_participants (room_id, user_id, calling_user_id, calling_connection_id, initial_project_id) @@ -951,15 +986,14 @@ where let incoming_call = Self::build_incoming_call(&room, called_user_id) .ok_or_else(|| anyhow!("failed to build incoming call"))?; Ok((room, incoming_call)) - }) + }).await } pub async fn incoming_call_for_user( &self, user_id: UserId, ) -> Result> { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { let room_id = sqlx::query_scalar::<_, RoomId>( " SELECT room_id @@ -978,6 +1012,7 @@ where Ok(None) } }) + .await } fn build_incoming_call( @@ -1013,8 +1048,7 @@ where room_id: RoomId, called_user_id: UserId, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { sqlx::query( " DELETE FROM room_participants @@ -1028,6 +1062,7 @@ where self.commit_room_transaction(room_id, tx).await }) + .await } pub async fn decline_call( @@ -1035,8 +1070,7 @@ where expected_room_id: Option, user_id: UserId, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { let room_id = sqlx::query_scalar( " DELETE FROM room_participants @@ -1053,6 +1087,7 @@ where self.commit_room_transaction(room_id, tx).await }) + .await } pub async fn cancel_call( @@ -1061,8 +1096,7 @@ where calling_connection_id: ConnectionId, called_user_id: UserId, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { let room_id = sqlx::query_scalar( " DELETE FROM room_participants @@ -1079,7 +1113,7 @@ where } self.commit_room_transaction(room_id, tx).await - }) + }).await } pub async fn join_room( @@ -1088,8 +1122,7 @@ where user_id: UserId, connection_id: ConnectionId, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { sqlx::query( " UPDATE room_participants @@ -1105,15 +1138,14 @@ where .await?; self.commit_room_transaction(room_id, tx).await }) + .await } pub async fn leave_room_for_connection( &self, connection_id: ConnectionId, ) -> Result> { - test_support!(self, { - let mut tx = self.pool.begin().await?; - + self.transact(|mut tx| async move { // Leave room. let room_id = sqlx::query_scalar::<_, RoomId>( " @@ -1198,6 +1230,7 @@ where Ok(None) } }) + .await } pub async fn update_room_participant_location( @@ -1206,13 +1239,13 @@ where connection_id: ConnectionId, location: proto::ParticipantLocation, ) -> Result { - test_support!(self, { - let mut tx = self.pool.begin().await?; - + self.transact(|tx| async { + let mut tx = tx; let location_kind; let location_project_id; match location .variant + .as_ref() .ok_or_else(|| anyhow!("invalid location"))? { proto::participant_location::Variant::SharedProject(project) => { @@ -1245,6 +1278,7 @@ where self.commit_room_transaction(room_id, tx).await }) + .await } async fn commit_room_transaction( @@ -1375,8 +1409,7 @@ where connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result<(ProjectId, proto::Room)> { - test_support!(self, { - let mut tx = self.pool.begin().await?; + self.transact(|mut tx| async move { let project_id = sqlx::query_scalar( " INSERT INTO projects (room_id, host_user_id, host_connection_id) @@ -1428,16 +1461,65 @@ where let room = self.commit_room_transaction(room_id, tx).await?; Ok((project_id, room)) }) + .await } - // pub async fn join_project( - // &self, - // user_id: UserId, - // connection_id: ConnectionId, - // project_id: ProjectId, - // ) -> Result<(Project, ReplicaId)> { - // todo!() - // } + pub async fn update_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + worktrees: &[proto::WorktreeMetadata], + ) -> Result<(proto::Room, Vec)> { + self.transact(|mut tx| async move { + let room_id: RoomId = sqlx::query_scalar( + " + SELECT room_id + FROM projects + WHERE id = $1 AND host_connection_id = $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + for worktree in worktrees { + sqlx::query( + " + INSERT INTO worktrees (project_id, id, root_name) + VALUES ($1, $2, $3) + ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name + ", + ) + .bind(project_id) + .bind(worktree.id as i32) + .bind(&worktree.root_name) + .execute(&mut tx) + .await?; + } + + let mut params = "?,".repeat(worktrees.len()); + if !worktrees.is_empty() { + params.pop(); + } + let query = format!( + " + DELETE FROM worktrees + WHERE id NOT IN ({params}) + ", + ); + + let mut query = sqlx::query(&query); + for worktree in worktrees { + query = query.bind(worktree.id as i32); + } + query.execute(&mut tx).await?; + + let room = self.commit_room_transaction(room_id, tx).await?; + todo!() + }) + .await + } pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { todo!() @@ -1459,7 +1541,7 @@ where // contacts pub async fn get_contacts(&self, user_id: UserId) -> Result> { - test_support!(self, { + self.transact(|mut tx| async move { let query = " SELECT user_id_a, user_id_b, a_to_b, accepted, should_notify FROM contacts @@ -1468,7 +1550,7 @@ where let mut rows = sqlx::query_as::<_, (UserId, UserId, bool, bool, bool)>(query) .bind(user_id) - .fetch(&self.pool); + .fetch(&mut tx); let mut contacts = Vec::new(); while let Some(row) = rows.next().await { @@ -1507,10 +1589,11 @@ where Ok(contacts) }) + .await } pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { - test_support!(self, { + self.transact(|mut tx| async move { let (id_a, id_b) = if user_id_1 < user_id_2 { (user_id_1, user_id_2) } else { @@ -1525,14 +1608,15 @@ where Ok(sqlx::query_scalar::<_, i32>(query) .bind(id_a.0) .bind(id_b.0) - .fetch_optional(&self.pool) + .fetch_optional(&mut tx) .await? .is_some()) }) + .await } pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let (id_a, id_b, a_to_b) = if sender_id < receiver_id { (sender_id, receiver_id, true) } else { @@ -1554,7 +1638,7 @@ where .bind(id_a.0) .bind(id_b.0) .bind(a_to_b) - .execute(&self.pool) + .execute(&mut tx) .await?; if result.rows_affected() == 1 { @@ -1562,11 +1646,11 @@ where } else { Err(anyhow!("contact already requested"))? } - }) + }).await } pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let (id_a, id_b) = if responder_id < requester_id { (responder_id, requester_id) } else { @@ -1579,7 +1663,7 @@ where let result = sqlx::query(query) .bind(id_a.0) .bind(id_b.0) - .execute(&self.pool) + .execute(&mut tx) .await?; if result.rows_affected() == 1 { @@ -1588,6 +1672,7 @@ where Err(anyhow!("no such contact"))? } }) + .await } pub async fn dismiss_contact_notification( @@ -1595,7 +1680,7 @@ where user_id: UserId, contact_user_id: UserId, ) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let (id_a, id_b, a_to_b) = if user_id < contact_user_id { (user_id, contact_user_id, true) } else { @@ -1617,7 +1702,7 @@ where .bind(id_a.0) .bind(id_b.0) .bind(a_to_b) - .execute(&self.pool) + .execute(&mut tx) .await?; if result.rows_affected() == 0 { @@ -1626,6 +1711,7 @@ where Ok(()) }) + .await } pub async fn respond_to_contact_request( @@ -1634,7 +1720,7 @@ where requester_id: UserId, accept: bool, ) -> Result<()> { - test_support!(self, { + self.transact(|mut tx| async move { let (id_a, id_b, a_to_b) = if responder_id < requester_id { (responder_id, requester_id, false) } else { @@ -1650,7 +1736,7 @@ where .bind(id_a.0) .bind(id_b.0) .bind(a_to_b) - .execute(&self.pool) + .execute(&mut tx) .await? } else { let query = " @@ -1661,7 +1747,7 @@ where .bind(id_a.0) .bind(id_b.0) .bind(a_to_b) - .execute(&self.pool) + .execute(&mut tx) .await? }; if result.rows_affected() == 1 { @@ -1670,6 +1756,7 @@ where Err(anyhow!("no such contact request"))? } }) + .await } // access tokens @@ -1680,7 +1767,8 @@ where access_token_hash: &str, max_access_token_count: usize, ) -> Result<()> { - test_support!(self, { + self.transact(|tx| async { + let mut tx = tx; let insert_query = " INSERT INTO access_tokens (user_id, hash) VALUES ($1, $2); @@ -1696,7 +1784,6 @@ where ) "; - let mut tx = self.pool.begin().await?; sqlx::query(insert_query) .bind(user_id.0) .bind(access_token_hash) @@ -1710,10 +1797,11 @@ where .await?; Ok(tx.commit().await?) }) + .await } pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result> { - test_support!(self, { + self.transact(|mut tx| async move { let query = " SELECT hash FROM access_tokens @@ -1722,9 +1810,51 @@ where "; Ok(sqlx::query_scalar(query) .bind(user_id.0) - .fetch_all(&self.pool) + .fetch_all(&mut tx) .await?) }) + .await + } + + async fn transact(&self, f: F) -> Result + where + F: Send + Fn(sqlx::Transaction<'static, D>) -> Fut, + Fut: Send + Future>, + { + let body = async { + loop { + let tx = self.begin_transaction().await?; + match f(tx).await { + Ok(result) => return Ok(result), + Err(error) => match error { + Error::Database(error) + if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some("hey") => + { + // Retry (don't break the loop) + } + error @ _ => return Err(error), + }, + } + } + }; + + #[cfg(test)] + { + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + self.runtime.as_ref().unwrap().block_on(body) + } + + #[cfg(not(test))] + { + body.await + } } } diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 518530c539..be21999a45 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -4,6 +4,7 @@ pub type Result = std::result::Result; pub enum Error { Http(StatusCode, String), + Database(sqlx::Error), Internal(anyhow::Error), } @@ -15,7 +16,7 @@ impl From for Error { impl From for Error { fn from(error: sqlx::Error) -> Self { - Self::Internal(error.into()) + Self::Database(error) } } @@ -41,6 +42,9 @@ impl IntoResponse for Error { fn into_response(self) -> axum::response::Response { match self { Error::Http(code, message) => (code, message).into_response(), + Error::Database(error) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() + } Error::Internal(error) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() } @@ -52,6 +56,7 @@ impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Http(code, message) => (code, message).fmt(f), + Error::Database(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } @@ -61,6 +66,7 @@ impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Http(code, message) => write!(f, "{code}: {message}"), + Error::Database(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index bed6ebf9cd..d8ca51e6cd 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1087,30 +1087,31 @@ impl Server { response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); - { - let mut state = self.store().await; - let guest_connection_ids = state - .read_project(project_id, request.sender_connection_id)? - .guest_connection_ids(); - let room = state.update_project( + let (room, guest_connection_ids) = self + .app_state + .db + .update_project( project_id, + request.sender_connection_id, &request.payload.worktrees, - request.sender_connection_id, - )?; - broadcast( - request.sender_connection_id, - guest_connection_ids, - |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) - }, - ); - self.room_updated(room); - response.send(proto::Ack {})?; - }; + ) + .await?; + broadcast( + request.sender_connection_id, + guest_connection_ids, + |connection_id| { + self.peer.send( + connection_id, + proto::ProjectUpdated { + project_id: project_id.to_proto(), + worktrees: request.payload.worktrees.clone(), + room_version: room.version, + }, + ) + }, + ); + self.room_updated(&room); + response.send(proto::Ack {})?; Ok(()) } From 42bb5f0e9f7552a861a76a4cfda02462536aba89 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 08:48:16 +0100 Subject: [PATCH 020/240] Add random delay after returning results from the database --- crates/collab/src/db.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index b561ba045d..fb91e92808 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1848,7 +1848,13 @@ where background.simulate_random_delay().await; } - self.runtime.as_ref().unwrap().block_on(body) + let result = self.runtime.as_ref().unwrap().block_on(body); + + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + result } #[cfg(not(test))] From 3e8fcb04f71f877a641f19f884f1d4f8cc3da188 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 09:00:56 +0100 Subject: [PATCH 021/240] Finish implementing `Db::update_project` --- crates/collab/src/db.rs | 17 +++++++++++- crates/collab/src/rpc.rs | 9 +++---- crates/collab/src/rpc/store.rs | 49 ---------------------------------- crates/project/src/project.rs | 6 ++--- crates/rpc/proto/zed.proto | 7 ----- crates/rpc/src/proto.rs | 2 -- 6 files changed, 22 insertions(+), 68 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index fb91e92808..ba014624af 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1515,8 +1515,23 @@ where } query.execute(&mut tx).await?; + let mut guest_connection_ids = Vec::new(); + { + let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 AND is_host = FALSE + ", + ) + .fetch(&mut tx); + while let Some(connection_id) = db_guest_connection_ids.next().await { + guest_connection_ids.push(ConnectionId(connection_id? as u32)); + } + } + let room = self.commit_room_transaction(room_id, tx).await?; - todo!() + Ok((room, guest_connection_ids)) }) .await } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d8ca51e6cd..daf898ddf6 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1100,13 +1100,10 @@ impl Server { request.sender_connection_id, guest_connection_ids, |connection_id| { - self.peer.send( + self.peer.forward_send( + request.sender_connection_id, connection_id, - proto::ProjectUpdated { - project_id: project_id.to_proto(), - worktrees: request.payload.worktrees.clone(), - room_version: room.version, - }, + request.payload.clone(), ) }, ); diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index a9793e9fb6..a9a15e7b2a 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -253,55 +253,6 @@ impl Store { } } - pub fn update_project( - &mut self, - project_id: ProjectId, - worktrees: &[proto::WorktreeMetadata], - connection_id: ConnectionId, - ) -> Result<&proto::Room> { - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id == connection_id { - let mut old_worktrees = mem::take(&mut project.worktrees); - for worktree in worktrees { - if let Some(old_worktree) = old_worktrees.remove(&worktree.id) { - project.worktrees.insert(worktree.id, old_worktree); - } else { - project.worktrees.insert( - worktree.id, - Worktree { - root_name: worktree.root_name.clone(), - visible: worktree.visible, - ..Default::default() - }, - ); - } - } - - let room = self - .rooms - .get_mut(&project.room_id) - .ok_or_else(|| anyhow!("no such room"))?; - let participant_project = room - .participants - .iter_mut() - .flat_map(|participant| &mut participant.projects) - .find(|project| project.id == project_id.to_proto()) - .ok_or_else(|| anyhow!("no such project"))?; - participant_project.worktree_root_names = worktrees - .iter() - .filter(|worktree| worktree.visible) - .map(|worktree| worktree.root_name.clone()) - .collect(); - - Ok(room) - } else { - Err(anyhow!("no such project"))? - } - } - pub fn update_diagnostic_summary( &mut self, project_id: ProjectId, diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index d01571f44b..c59b19de8f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -373,7 +373,7 @@ impl Project { client.add_model_message_handler(Self::handle_start_language_server); client.add_model_message_handler(Self::handle_update_language_server); client.add_model_message_handler(Self::handle_remove_collaborator); - client.add_model_message_handler(Self::handle_project_updated); + client.add_model_message_handler(Self::handle_update_project); client.add_model_message_handler(Self::handle_unshare_project); client.add_model_message_handler(Self::handle_create_buffer_for_peer); client.add_model_message_handler(Self::handle_update_buffer_file); @@ -4533,9 +4533,9 @@ impl Project { }) } - async fn handle_project_updated( + async fn handle_update_project( this: ModelHandle, - envelope: TypedEnvelope, + envelope: TypedEnvelope, client: Arc, mut cx: AsyncAppContext, ) -> Result<()> { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 94880ce9f5..e688cad1f8 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -48,7 +48,6 @@ message Envelope { OpenBufferForSymbolResponse open_buffer_for_symbol_response = 40; UpdateProject update_project = 41; - ProjectUpdated project_updated = 42; UpdateWorktree update_worktree = 43; CreateProjectEntry create_project_entry = 45; @@ -257,12 +256,6 @@ message UpdateProject { repeated WorktreeMetadata worktrees = 2; } -message ProjectUpdated { - uint64 project_id = 1; - repeated WorktreeMetadata worktrees = 2; - uint64 room_version = 3; -} - message JoinProject { uint64 project_id = 1; } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 31f53564a8..6d9bc9a0aa 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -144,7 +144,6 @@ messages!( (PrepareRename, Background), (PrepareRenameResponse, Background), (ProjectEntryResponse, Foreground), - (ProjectUpdated, Foreground), (RemoveContact, Foreground), (ReloadBuffers, Foreground), (ReloadBuffersResponse, Foreground), @@ -261,7 +260,6 @@ entity_messages!( OpenBufferByPath, OpenBufferForSymbol, PerformRename, - ProjectUpdated, PrepareRename, ReloadBuffers, RemoveProjectCollaborator, From 6cbf19722620c6836226a36ba5c6107d2f6d64d5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 10:41:21 +0100 Subject: [PATCH 022/240] Determine whether a contact is busy via the database --- .../20221109000000_test_schema.sql | 2 +- .../20221111092550_reconnection_support.sql | 1 + crates/collab/src/db.rs | 38 ++++++++++++--- crates/collab/src/db_tests.rs | 46 +++++++++++++------ crates/collab/src/rpc.rs | 10 ++-- crates/collab/src/rpc/store.rs | 22 ++++----- 6 files changed, 81 insertions(+), 38 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index d262d6a8bd..d6759fb524 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -56,7 +56,7 @@ CREATE TABLE "project_collaborators" ( "is_host" BOOLEAN NOT NULL ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); -CREATE UNIQUE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id", "replica_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 7b82ce9ce7..617e282a0a 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -18,6 +18,7 @@ CREATE TABLE "project_collaborators" ( "is_host" BOOLEAN NOT NULL ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); CREATE TABLE IF NOT EXISTS "worktrees" ( "id" INTEGER NOT NULL, diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index ba014624af..1df96870d6 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1558,24 +1558,25 @@ where pub async fn get_contacts(&self, user_id: UserId) -> Result> { self.transact(|mut tx| async move { let query = " - SELECT user_id_a, user_id_b, a_to_b, accepted, should_notify + SELECT user_id_a, user_id_b, a_to_b, accepted, should_notify, (room_participants.id IS NOT NULL) as busy FROM contacts + LEFT JOIN room_participants ON room_participants.user_id = $1 WHERE user_id_a = $1 OR user_id_b = $1; "; - let mut rows = sqlx::query_as::<_, (UserId, UserId, bool, bool, bool)>(query) + let mut rows = sqlx::query_as::<_, (UserId, UserId, bool, bool, bool, bool)>(query) .bind(user_id) .fetch(&mut tx); let mut contacts = Vec::new(); while let Some(row) = rows.next().await { - let (user_id_a, user_id_b, a_to_b, accepted, should_notify) = row?; - + let (user_id_a, user_id_b, a_to_b, accepted, should_notify, busy) = row?; if user_id_a == user_id { if accepted { contacts.push(Contact::Accepted { user_id: user_id_b, should_notify: should_notify && a_to_b, + busy }); } else if a_to_b { contacts.push(Contact::Outgoing { user_id: user_id_b }) @@ -1589,6 +1590,7 @@ where contacts.push(Contact::Accepted { user_id: user_id_a, should_notify: should_notify && !a_to_b, + busy }); } else if a_to_b { contacts.push(Contact::Incoming { @@ -1607,6 +1609,23 @@ where .await } + pub async fn is_user_busy(&self, user_id: UserId) -> Result { + self.transact(|mut tx| async move { + Ok(sqlx::query_scalar::<_, i32>( + " + SELECT 1 + FROM room_participants + WHERE room_participants.user_id = $1 + ", + ) + .bind(user_id) + .fetch_optional(&mut tx) + .await? + .is_some()) + }) + .await + } + pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { self.transact(|mut tx| async move { let (id_a, id_b) = if user_id_1 < user_id_2 { @@ -1657,6 +1676,7 @@ where .await?; if result.rows_affected() == 1 { + tx.commit().await?; Ok(()) } else { Err(anyhow!("contact already requested"))? @@ -1682,6 +1702,7 @@ where .await?; if result.rows_affected() == 1 { + tx.commit().await?; Ok(()) } else { Err(anyhow!("no such contact"))? @@ -1721,10 +1742,11 @@ where .await?; if result.rows_affected() == 0 { - Err(anyhow!("no such contact request"))?; + Err(anyhow!("no such contact request"))? + } else { + tx.commit().await?; + Ok(()) } - - Ok(()) }) .await } @@ -1766,6 +1788,7 @@ where .await? }; if result.rows_affected() == 1 { + tx.commit().await?; Ok(()) } else { Err(anyhow!("no such contact request"))? @@ -1977,6 +2000,7 @@ pub enum Contact { Accepted { user_id: UserId, should_notify: bool, + busy: bool, }, Outgoing { user_id: UserId, diff --git a/crates/collab/src/db_tests.rs b/crates/collab/src/db_tests.rs index 8eda7d34e2..444e60ddeb 100644 --- a/crates/collab/src/db_tests.rs +++ b/crates/collab/src/db_tests.rs @@ -258,7 +258,8 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { db.get_contacts(user_1).await.unwrap(), &[Contact::Accepted { user_id: user_2, - should_notify: true + should_notify: true, + busy: false, }], ); assert!(db.has_contact(user_1, user_2).await.unwrap()); @@ -268,6 +269,7 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { &[Contact::Accepted { user_id: user_1, should_notify: false, + busy: false, }] ); @@ -284,6 +286,7 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { &[Contact::Accepted { user_id: user_2, should_notify: true, + busy: false, }] ); @@ -296,6 +299,7 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { &[Contact::Accepted { user_id: user_2, should_notify: false, + busy: false, }] ); @@ -309,10 +313,12 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { Contact::Accepted { user_id: user_2, should_notify: false, + busy: false, }, Contact::Accepted { user_id: user_3, - should_notify: false + should_notify: false, + busy: false, } ] ); @@ -320,7 +326,8 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { db.get_contacts(user_3).await.unwrap(), &[Contact::Accepted { user_id: user_1, - should_notify: false + should_notify: false, + busy: false, }], ); @@ -335,14 +342,16 @@ test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { db.get_contacts(user_2).await.unwrap(), &[Contact::Accepted { user_id: user_1, - should_notify: false + should_notify: false, + busy: false, }] ); assert_eq!( db.get_contacts(user_3).await.unwrap(), &[Contact::Accepted { user_id: user_1, - should_notify: false + should_notify: false, + busy: false, }], ); }); @@ -504,14 +513,16 @@ async fn test_invite_codes() { db.get_contacts(user1).await.unwrap(), [Contact::Accepted { user_id: user2, - should_notify: true + should_notify: true, + busy: false, }] ); assert_eq!( db.get_contacts(user2).await.unwrap(), [Contact::Accepted { user_id: user1, - should_notify: false + should_notify: false, + busy: false, }] ); assert_eq!( @@ -550,11 +561,13 @@ async fn test_invite_codes() { [ Contact::Accepted { user_id: user2, - should_notify: true + should_notify: true, + busy: false, }, Contact::Accepted { user_id: user3, - should_notify: true + should_notify: true, + busy: false, } ] ); @@ -562,7 +575,8 @@ async fn test_invite_codes() { db.get_contacts(user3).await.unwrap(), [Contact::Accepted { user_id: user1, - should_notify: false + should_notify: false, + busy: false, }] ); assert_eq!( @@ -607,15 +621,18 @@ async fn test_invite_codes() { [ Contact::Accepted { user_id: user2, - should_notify: true + should_notify: true, + busy: false, }, Contact::Accepted { user_id: user3, - should_notify: true + should_notify: true, + busy: false, }, Contact::Accepted { user_id: user4, - should_notify: true + should_notify: true, + busy: false, } ] ); @@ -623,7 +640,8 @@ async fn test_invite_codes() { db.get_contacts(user4).await.unwrap(), [Contact::Accepted { user_id: user1, - should_notify: false + should_notify: false, + busy: false, }] ); assert_eq!( diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index daf898ddf6..627a22426a 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -465,7 +465,7 @@ impl Server { if let Some(user) = self.app_state.db.get_user_by_id(inviter_id).await? { if let Some(code) = &user.invite_code { let store = self.store().await; - let invitee_contact = store.contact_for_user(invitee_id, true); + let invitee_contact = store.contact_for_user(invitee_id, true, false); for connection_id in store.connection_ids_for_user(inviter_id) { self.peer.send( connection_id, @@ -895,8 +895,9 @@ impl Server { async fn update_user_contacts(self: &Arc, user_id: UserId) -> Result<()> { let contacts = self.app_state.db.get_contacts(user_id).await?; + let busy = self.app_state.db.is_user_busy(user_id).await?; let store = self.store().await; - let updated_contact = store.contact_for_user(user_id, false); + let updated_contact = store.contact_for_user(user_id, false, busy); for contact in contacts { if let db::Contact::Accepted { user_id: contact_user_id, @@ -1575,6 +1576,7 @@ impl Server { .db .respond_to_contact_request(responder_id, requester_id, accept) .await?; + let busy = self.app_state.db.is_user_busy(requester_id).await?; let store = self.store().await; // Update responder with new contact @@ -1582,7 +1584,7 @@ impl Server { if accept { update .contacts - .push(store.contact_for_user(requester_id, false)); + .push(store.contact_for_user(requester_id, false, busy)); } update .remove_incoming_requests @@ -1596,7 +1598,7 @@ impl Server { if accept { update .contacts - .push(store.contact_for_user(responder_id, true)); + .push(store.contact_for_user(responder_id, true, busy)); } update .remove_outgoing_requests diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index a9a15e7b2a..4be9354788 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -3,7 +3,7 @@ use anyhow::{anyhow, Result}; use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; use rpc::{proto, ConnectionId}; use serde::Serialize; -use std::{mem, path::PathBuf, str}; +use std::{path::PathBuf, str}; use tracing::instrument; pub type RoomId = u64; @@ -156,14 +156,6 @@ impl Store { .is_empty() } - fn is_user_busy(&self, user_id: UserId) -> bool { - self.connected_users - .get(&user_id) - .unwrap_or(&Default::default()) - .active_call - .is_some() - } - pub fn build_initial_contacts_update( &self, contacts: Vec, @@ -175,10 +167,11 @@ impl Store { db::Contact::Accepted { user_id, should_notify, + busy, } => { update .contacts - .push(self.contact_for_user(user_id, should_notify)); + .push(self.contact_for_user(user_id, should_notify, busy)); } db::Contact::Outgoing { user_id } => { update.outgoing_requests.push(user_id.to_proto()) @@ -198,11 +191,16 @@ impl Store { update } - pub fn contact_for_user(&self, user_id: UserId, should_notify: bool) -> proto::Contact { + pub fn contact_for_user( + &self, + user_id: UserId, + should_notify: bool, + busy: bool, + ) -> proto::Contact { proto::Contact { user_id: user_id.to_proto(), online: self.is_user_online(user_id), - busy: self.is_user_busy(user_id), + busy, should_notify, } } From be523617c98bbe63d5bf002fa4dd7e12872afdf7 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 11:44:26 +0100 Subject: [PATCH 023/240] Start reworking `join_project` to use the database --- .../20221109000000_test_schema.sql | 3 + crates/collab/src/db.rs | 152 ++++++++++++++++-- crates/collab/src/rpc.rs | 43 +++-- 3 files changed, 164 insertions(+), 34 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index d6759fb524..1a09dff780 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -62,6 +62,9 @@ CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "root_name" VARCHAR NOT NULL, + "visible" BOOL NOT NULL, + "scan_id" INTEGER NOT NULL, + "is_complete" BOOL NOT NULL, PRIMARY KEY(project_id, id) ); CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1df96870d6..88b6f20953 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,7 +1,7 @@ use crate::{Error, Result}; use anyhow::anyhow; use axum::http::StatusCode; -use collections::HashMap; +use collections::{BTreeMap, HashMap, HashSet}; use futures::{future::BoxFuture, FutureExt, StreamExt}; use rpc::{proto, ConnectionId}; use serde::{Deserialize, Serialize}; @@ -10,7 +10,11 @@ use sqlx::{ types::Uuid, FromRow, }; -use std::{future::Future, path::Path, time::Duration}; +use std::{ + future::Future, + path::{Path, PathBuf}, + time::Duration, +}; use time::{OffsetDateTime, PrimitiveDateTime}; #[cfg(test)] @@ -1404,13 +1408,26 @@ where pub async fn share_project( &self, - room_id: RoomId, - user_id: UserId, + expected_room_id: RoomId, connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result<(ProjectId, proto::Room)> { self.transact(|mut tx| async move { - let project_id = sqlx::query_scalar( + let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( + " + SELECT room_id, user_id + FROM room_participants + WHERE answering_connection_id = $1 + ", + ) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + if room_id != expected_room_id { + return Err(anyhow!("shared project on unexpected room"))?; + } + + let project_id: ProjectId = sqlx::query_scalar( " INSERT INTO projects (room_id, host_user_id, host_connection_id) VALUES ($1, $2, $3) @@ -1421,8 +1438,7 @@ where .bind(user_id) .bind(connection_id.0 as i32) .fetch_one(&mut tx) - .await - .map(ProjectId)?; + .await?; for worktree in worktrees { sqlx::query( @@ -1536,6 +1552,111 @@ where .await } + pub async fn join_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result<(Project, i32)> { + self.transact(|mut tx| async move { + let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( + " + SELECT room_id, user_id + FROM room_participants + WHERE answering_connection_id = $1 + ", + ) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + // Ensure project id was shared on this room. + sqlx::query( + " + SELECT 1 + FROM projects + WHERE project_id = $1 AND room_id = $2 + ", + ) + .bind(project_id) + .bind(room_id) + .fetch_one(&mut tx) + .await?; + + let replica_ids = sqlx::query_scalar::<_, i32>( + " + SELECT replica_id + FROM project_collaborators + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await?; + let replica_ids = HashSet::from_iter(replica_ids); + let mut replica_id = 1; + while replica_ids.contains(&replica_id) { + replica_id += 1; + } + + sqlx::query( + " + INSERT INTO project_collaborators ( + project_id, + connection_id, + user_id, + replica_id, + is_host + ) + VALUES ($1, $2, $3, $4, $5) + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .bind(user_id) + .bind(replica_id) + .bind(false) + .execute(&mut tx) + .await?; + + tx.commit().await?; + todo!() + }) + .await + // sqlx::query( + // " + // SELECT replica_id + // FROM project_collaborators + // WHERE project_id = $ + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .bind(user_id) + // .bind(0) + // .bind(true) + // .execute(&mut tx) + // .await?; + // sqlx::query( + // " + // INSERT INTO project_collaborators ( + // project_id, + // connection_id, + // user_id, + // replica_id, + // is_host + // ) + // VALUES ($1, $2, $3, $4, $5) + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .bind(user_id) + // .bind(0) + // .bind(true) + // .execute(&mut tx) + // .await?; + } + pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { todo!() // test_support!(self, { @@ -1967,11 +2088,11 @@ pub struct Room { } id_type!(ProjectId); -#[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] pub struct Project { pub id: ProjectId, - pub host_user_id: UserId, - pub unregistered: bool, + pub collaborators: Vec, + pub worktrees: BTreeMap, + pub language_servers: Vec, } #[derive(Clone, Debug, Default, FromRow, PartialEq)] @@ -1983,6 +2104,17 @@ pub struct ProjectCollaborator { pub is_host: bool, } +#[derive(Default)] +pub struct Worktree { + pub abs_path: PathBuf, + pub root_name: String, + pub visible: bool, + pub entries: BTreeMap, + pub diagnostic_summaries: BTreeMap, + pub scan_id: u64, + pub is_complete: bool, +} + pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 627a22426a..02d8f25f38 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -862,7 +862,6 @@ impl Server { .db .share_project( RoomId::from_proto(request.payload.room_id), - request.sender_user_id, request.sender_connection_id, &request.payload.worktrees, ) @@ -942,15 +941,21 @@ impl Server { tracing::info!(%project_id, %host_user_id, %host_connection_id, "join project"); - let mut store = self.store().await; - let (project, replica_id) = store.join_project(request.sender_connection_id, project_id)?; - let peer_count = project.guests.len(); - let mut collaborators = Vec::with_capacity(peer_count); - collaborators.push(proto::Collaborator { - peer_id: project.host_connection_id.0, - replica_id: 0, - user_id: project.host.user_id.to_proto(), - }); + let (project, replica_id) = self + .app_state + .db + .join_project(project_id, request.sender_connection_id) + .await?; + + let collaborators = project + .collaborators + .iter() + .map(|collaborator| proto::Collaborator { + peer_id: collaborator.connection_id as u32, + replica_id: collaborator.replica_id as u32, + user_id: collaborator.user_id.to_proto(), + }) + .collect::>(); let worktrees = project .worktrees .iter() @@ -962,22 +967,12 @@ impl Server { }) .collect::>(); - // Add all guests other than the requesting user's own connections as collaborators - for (guest_conn_id, guest) in &project.guests { - if request.sender_connection_id != *guest_conn_id { - collaborators.push(proto::Collaborator { - peer_id: guest_conn_id.0, - replica_id: guest.replica_id as u32, - user_id: guest.user_id.to_proto(), - }); - } - } - - for conn_id in project.connection_ids() { - if conn_id != request.sender_connection_id { + for collaborator in &project.collaborators { + let connection_id = ConnectionId(collaborator.connection_id as u32); + if connection_id != request.sender_connection_id { self.peer .send( - conn_id, + connection_id, proto::AddProjectCollaborator { project_id: project_id.to_proto(), collaborator: Some(proto::Collaborator { From 974ef967a313868b49f70fe1ea5491adcd9b276d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 16:37:32 +0100 Subject: [PATCH 024/240] Move `Store::join_project` to `Db::join_project` Co-Authored-By: Nathan Sobo --- .../20221109000000_test_schema.sql | 59 +++- .../20221111092550_reconnection_support.sql | 42 ++- crates/collab/src/db.rs | 277 +++++++++++++----- crates/collab/src/integration_tests.rs | 8 +- crates/collab/src/rpc.rs | 37 +-- crates/collab/src/rpc/store.rs | 49 ---- crates/rpc/proto/zed.proto | 7 - 7 files changed, 312 insertions(+), 167 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 1a09dff780..cffb549a89 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -47,9 +47,55 @@ CREATE TABLE "projects" ( "host_connection_id" INTEGER NOT NULL ); +CREATE TABLE "worktrees" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "root_name" VARCHAR NOT NULL, + "abs_path" VARCHAR NOT NULL, + "visible" BOOL NOT NULL, + "scan_id" INTEGER NOT NULL, + "is_complete" BOOL NOT NULL, + PRIMARY KEY(project_id, id) +); +CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); + +CREATE TABLE "worktree_entries" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "is_dir" BOOL NOT NULL, + "path" VARCHAR NOT NULL, + "inode" INTEGER NOT NULL, + "mtime_seconds" INTEGER NOT NULL, + "mtime_nanos" INTEGER NOT NULL, + "is_symlink" BOOL NOT NULL, + "is_ignored" BOOL NOT NULL, + PRIMARY KEY(project_id, worktree_id, id) +); +CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); + +CREATE TABLE "worktree_diagnostic_summaries" ( + "path" VARCHAR NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "language_server_id" INTEGER NOT NULL, + "error_count" INTEGER NOT NULL, + "warning_count" INTEGER NOT NULL, + PRIMARY KEY(project_id, worktree_id, path) +); +CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); + +CREATE TABLE "language_servers" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "name" VARCHAR NOT NULL, + PRIMARY KEY(project_id, id) +); +CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id"); + CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, + "project_id" INTEGER NOT NULL REFERENCES projects (id), "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, @@ -58,17 +104,6 @@ CREATE TABLE "project_collaborators" ( CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); -CREATE TABLE "worktrees" ( - "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, - "root_name" VARCHAR NOT NULL, - "visible" BOOL NOT NULL, - "scan_id" INTEGER NOT NULL, - "is_complete" BOOL NOT NULL, - PRIMARY KEY(project_id, id) -); -CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); - CREATE TABLE "room_participants" ( "id" INTEGER PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 617e282a0a..a5b49ad763 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -20,14 +20,52 @@ CREATE TABLE "project_collaborators" ( CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); -CREATE TABLE IF NOT EXISTS "worktrees" ( +CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, + "project_id" INTEGER NOT NULL REFERENCES projects (id), "root_name" VARCHAR NOT NULL, + "abs_path" VARCHAR NOT NULL, + "visible" BOOL NOT NULL, + "scan_id" INTEGER NOT NULL, + "is_complete" BOOL NOT NULL, PRIMARY KEY(project_id, id) ); CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); +CREATE TABLE "worktree_entries" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "is_dir" BOOL NOT NULL, + "path" VARCHAR NOT NULL, + "inode" INTEGER NOT NULL, + "mtime_seconds" INTEGER NOT NULL, + "mtime_nanos" INTEGER NOT NULL, + "is_symlink" BOOL NOT NULL, + "is_ignored" BOOL NOT NULL, + PRIMARY KEY(project_id, worktree_id, id) +); +CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); + +CREATE TABLE "worktree_diagnostic_summaries" ( + "path" VARCHAR NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "language_server_id" INTEGER NOT NULL, + "error_count" INTEGER NOT NULL, + "warning_count" INTEGER NOT NULL, + PRIMARY KEY(project_id, worktree_id, path) +); +CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); + +CREATE TABLE "language_servers" ( + "id" INTEGER NOT NULL, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "name" VARCHAR NOT NULL, + PRIMARY KEY(project_id, id) +); +CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id"); + CREATE TABLE IF NOT EXISTS "room_participants" ( "id" SERIAL PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 88b6f20953..6db4ad101b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -10,11 +10,7 @@ use sqlx::{ types::Uuid, FromRow, }; -use std::{ - future::Future, - path::{Path, PathBuf}, - time::Duration, -}; +use std::{future::Future, path::Path, time::Duration}; use time::{OffsetDateTime, PrimitiveDateTime}; #[cfg(test)] @@ -1443,13 +1439,17 @@ where for worktree in worktrees { sqlx::query( " - INSERT INTO worktrees (id, project_id, root_name) - VALUES ($1, $2, $3) + INSERT INTO worktrees (project_id, id, root_name, abs_path, visible, scan_id, is_complete) + VALUES ($1, $2, $3, $4, $5, $6, $7) ", ) - .bind(worktree.id as i32) .bind(project_id) + .bind(worktree.id as i32) .bind(&worktree.root_name) + .bind(&*String::from_utf8_lossy(&worktree.abs_path)) + .bind(worktree.visible) + .bind(0) + .bind(false) .execute(&mut tx) .await?; } @@ -1502,32 +1502,36 @@ where for worktree in worktrees { sqlx::query( " - INSERT INTO worktrees (project_id, id, root_name) - VALUES ($1, $2, $3) + INSERT INTO worktrees (project_id, id, root_name, abs_path, visible, scan_id, is_complete) + VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name ", ) .bind(project_id) .bind(worktree.id as i32) .bind(&worktree.root_name) + .bind(String::from_utf8_lossy(&worktree.abs_path).as_ref()) + .bind(worktree.visible) + .bind(0) + .bind(false) .execute(&mut tx) .await?; } - let mut params = "?,".repeat(worktrees.len()); + let mut params = "(?, ?),".repeat(worktrees.len()); if !worktrees.is_empty() { params.pop(); } let query = format!( " DELETE FROM worktrees - WHERE id NOT IN ({params}) + WHERE (project_id, id) NOT IN ({params}) ", ); let mut query = sqlx::query(&query); for worktree in worktrees { - query = query.bind(worktree.id as i32); + query = query.bind(project_id).bind(WorktreeId(worktree.id as i32)); } query.execute(&mut tx).await?; @@ -1556,7 +1560,7 @@ where &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result<(Project, i32)> { + ) -> Result<(Project, ReplicaId)> { self.transact(|mut tx| async move { let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( " @@ -1574,7 +1578,7 @@ where " SELECT 1 FROM projects - WHERE project_id = $1 AND room_id = $2 + WHERE id = $1 AND room_id = $2 ", ) .bind(project_id) @@ -1582,9 +1586,9 @@ where .fetch_one(&mut tx) .await?; - let replica_ids = sqlx::query_scalar::<_, i32>( + let mut collaborators = sqlx::query_as::<_, ProjectCollaborator>( " - SELECT replica_id + SELECT * FROM project_collaborators WHERE project_id = $1 ", @@ -1592,11 +1596,21 @@ where .bind(project_id) .fetch_all(&mut tx) .await?; - let replica_ids = HashSet::from_iter(replica_ids); - let mut replica_id = 1; + let replica_ids = collaborators + .iter() + .map(|c| c.replica_id) + .collect::>(); + let mut replica_id = ReplicaId(1); while replica_ids.contains(&replica_id) { - replica_id += 1; + replica_id.0 += 1; } + let new_collaborator = ProjectCollaborator { + project_id, + connection_id: connection_id.0 as i32, + user_id, + replica_id, + is_host: false, + }; sqlx::query( " @@ -1610,51 +1624,140 @@ where VALUES ($1, $2, $3, $4, $5) ", ) - .bind(project_id) - .bind(connection_id.0 as i32) - .bind(user_id) - .bind(replica_id) - .bind(false) + .bind(new_collaborator.project_id) + .bind(new_collaborator.connection_id) + .bind(new_collaborator.user_id) + .bind(new_collaborator.replica_id) + .bind(new_collaborator.is_host) .execute(&mut tx) .await?; + collaborators.push(new_collaborator); + + let worktree_rows = sqlx::query_as::<_, WorktreeRow>( + " + SELECT * + FROM worktrees + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await?; + let mut worktrees = worktree_rows + .into_iter() + .map(|worktree_row| { + ( + worktree_row.id, + Worktree { + id: worktree_row.id, + abs_path: worktree_row.abs_path, + root_name: worktree_row.root_name, + visible: worktree_row.visible, + entries: Default::default(), + diagnostic_summaries: Default::default(), + scan_id: worktree_row.scan_id as u64, + is_complete: worktree_row.is_complete, + }, + ) + }) + .collect::>(); + + let mut params = "(?, ?),".repeat(worktrees.len()); + if !worktrees.is_empty() { + params.pop(); + } + + // Populate worktree entries. + { + let query = format!( + " + SELECT * + FROM worktree_entries + WHERE (project_id, worktree_id) IN ({params}) + ", + ); + let mut entries = sqlx::query_as::<_, WorktreeEntry>(&query); + for worktree_id in worktrees.keys() { + entries = entries.bind(project_id).bind(*worktree_id); + } + let mut entries = entries.fetch(&mut tx); + while let Some(entry) = entries.next().await { + let entry = entry?; + if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { + worktree.entries.push(proto::Entry { + id: entry.id as u64, + is_dir: entry.is_dir, + path: entry.path.into_bytes(), + inode: entry.inode as u64, + mtime: Some(proto::Timestamp { + seconds: entry.mtime_seconds as u64, + nanos: entry.mtime_nanos as u32, + }), + is_symlink: entry.is_symlink, + is_ignored: entry.is_ignored, + }); + } + } + } + + // Populate worktree diagnostic summaries. + { + let query = format!( + " + SELECT * + FROM worktree_diagnostic_summaries + WHERE (project_id, worktree_id) IN ({params}) + ", + ); + let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>(&query); + for worktree_id in worktrees.keys() { + summaries = summaries.bind(project_id).bind(*worktree_id); + } + let mut summaries = summaries.fetch(&mut tx); + while let Some(summary) = summaries.next().await { + let summary = summary?; + if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) { + worktree + .diagnostic_summaries + .push(proto::DiagnosticSummary { + path: summary.path, + language_server_id: summary.language_server_id as u64, + error_count: summary.error_count as u32, + warning_count: summary.warning_count as u32, + }); + } + } + } + + // Populate language servers. + let language_servers = sqlx::query_as::<_, LanguageServer>( + " + SELECT * + FROM language_servers + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await?; tx.commit().await?; - todo!() + Ok(( + Project { + collaborators, + worktrees, + language_servers: language_servers + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id.to_proto(), + name: language_server.name, + }) + .collect(), + }, + replica_id as ReplicaId, + )) }) .await - // sqlx::query( - // " - // SELECT replica_id - // FROM project_collaborators - // WHERE project_id = $ - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .bind(user_id) - // .bind(0) - // .bind(true) - // .execute(&mut tx) - // .await?; - // sqlx::query( - // " - // INSERT INTO project_collaborators ( - // project_id, - // connection_id, - // user_id, - // replica_id, - // is_host - // ) - // VALUES ($1, $2, $3, $4, $5) - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .bind(user_id) - // .bind(0) - // .bind(true) - // .execute(&mut tx) - // .await?; } pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { @@ -2089,32 +2192,72 @@ pub struct Room { id_type!(ProjectId); pub struct Project { - pub id: ProjectId, pub collaborators: Vec, - pub worktrees: BTreeMap, + pub worktrees: BTreeMap, pub language_servers: Vec, } +id_type!(ReplicaId); #[derive(Clone, Debug, Default, FromRow, PartialEq)] pub struct ProjectCollaborator { pub project_id: ProjectId, pub connection_id: i32, pub user_id: UserId, - pub replica_id: i32, + pub replica_id: ReplicaId, pub is_host: bool, } -#[derive(Default)] -pub struct Worktree { - pub abs_path: PathBuf, +id_type!(WorktreeId); +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +struct WorktreeRow { + pub id: WorktreeId, + pub abs_path: String, pub root_name: String, pub visible: bool, - pub entries: BTreeMap, - pub diagnostic_summaries: BTreeMap, + pub scan_id: i64, + pub is_complete: bool, +} + +pub struct Worktree { + pub id: WorktreeId, + pub abs_path: String, + pub root_name: String, + pub visible: bool, + pub entries: Vec, + pub diagnostic_summaries: Vec, pub scan_id: u64, pub is_complete: bool, } +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +struct WorktreeEntry { + id: i64, + worktree_id: WorktreeId, + is_dir: bool, + path: String, + inode: i64, + mtime_seconds: i64, + mtime_nanos: i32, + is_symlink: bool, + is_ignored: bool, +} + +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +struct WorktreeDiagnosticSummary { + worktree_id: WorktreeId, + path: String, + language_server_id: i64, + error_count: i32, + warning_count: i32, +} + +id_type!(LanguageServerId); +#[derive(Clone, Debug, Default, FromRow, PartialEq)] +struct LanguageServer { + id: LanguageServerId, + name: String, +} + pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index b54f03ce53..1236af42cb 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,5 +1,5 @@ use crate::{ - db::{NewUserParams, ProjectId, SqliteTestDb as TestDb, UserId}, + db::{NewUserParams, SqliteTestDb as TestDb, UserId}, rpc::{Executor, Server}, AppState, }; @@ -2401,12 +2401,6 @@ async fn test_collaborating_with_diagnostics( // Wait for server to see the diagnostics update. deterministic.run_until_parked(); - { - let store = server.store.lock().await; - let project = store.project(ProjectId::from_proto(project_id)).unwrap(); - let worktree = project.worktrees.get(&worktree_id.to_proto()).unwrap(); - assert!(!worktree.diagnostic_summaries.is_empty()); - } // Ensure client B observes the new diagnostics. project_b.read_with(cx_b, |project, cx| { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 02d8f25f38..3c7d4ec61b 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -42,7 +42,6 @@ use std::{ marker::PhantomData, net::SocketAddr, ops::{Deref, DerefMut}, - os::unix::prelude::OsStrExt, rc::Rc, sync::{ atomic::{AtomicBool, Ordering::SeqCst}, @@ -930,16 +929,8 @@ impl Server { ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let guest_user_id = request.sender_user_id; - let host_user_id; - let host_connection_id; - { - let state = self.store().await; - let project = state.project(project_id)?; - host_user_id = project.host.user_id; - host_connection_id = project.host_connection_id; - }; - tracing::info!(%project_id, %host_user_id, %host_connection_id, "join project"); + tracing::info!(%project_id, "join project"); let (project, replica_id) = self .app_state @@ -952,7 +943,7 @@ impl Server { .iter() .map(|collaborator| proto::Collaborator { peer_id: collaborator.connection_id as u32, - replica_id: collaborator.replica_id as u32, + replica_id: collaborator.replica_id.0 as u32, user_id: collaborator.user_id.to_proto(), }) .collect::>(); @@ -960,10 +951,10 @@ impl Server { .worktrees .iter() .map(|(id, worktree)| proto::WorktreeMetadata { - id: *id, + id: id.to_proto(), root_name: worktree.root_name.clone(), visible: worktree.visible, - abs_path: worktree.abs_path.as_os_str().as_bytes().to_vec(), + abs_path: worktree.abs_path.as_bytes().to_vec(), }) .collect::>(); @@ -977,7 +968,7 @@ impl Server { project_id: project_id.to_proto(), collaborator: Some(proto::Collaborator { peer_id: request.sender_connection_id.0, - replica_id: replica_id as u32, + replica_id: replica_id.0 as u32, user_id: guest_user_id.to_proto(), }), }, @@ -989,12 +980,12 @@ impl Server { // First, we send the metadata associated with each worktree. response.send(proto::JoinProjectResponse { worktrees: worktrees.clone(), - replica_id: replica_id as u32, + replica_id: replica_id.0 as u32, collaborators: collaborators.clone(), language_servers: project.language_servers.clone(), })?; - for (worktree_id, worktree) in &project.worktrees { + for (worktree_id, worktree) in project.worktrees { #[cfg(any(test, feature = "test-support"))] const MAX_CHUNK_SIZE: usize = 2; #[cfg(not(any(test, feature = "test-support")))] @@ -1003,10 +994,10 @@ impl Server { // Stream this worktree's entries. let message = proto::UpdateWorktree { project_id: project_id.to_proto(), - worktree_id: *worktree_id, - abs_path: worktree.abs_path.as_os_str().as_bytes().to_vec(), - root_name: worktree.root_name.clone(), - updated_entries: worktree.entries.values().cloned().collect(), + worktree_id: worktree_id.to_proto(), + abs_path: worktree.abs_path.as_bytes().to_vec(), + root_name: worktree.root_name, + updated_entries: worktree.entries, removed_entries: Default::default(), scan_id: worktree.scan_id, is_last_update: worktree.is_complete, @@ -1017,13 +1008,13 @@ impl Server { } // Stream this worktree's diagnostics. - for summary in worktree.diagnostic_summaries.values() { + for summary in worktree.diagnostic_summaries { self.peer.send( request.sender_connection_id, proto::UpdateDiagnosticSummary { project_id: project_id.to_proto(), - worktree_id: *worktree_id, - summary: Some(summary.clone()), + worktree_id: worktree.id.to_proto(), + summary: Some(summary), }, )?; } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 4be9354788..a93182d50b 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -294,49 +294,6 @@ impl Store { Err(anyhow!("no such project"))? } - pub fn join_project( - &mut self, - requester_connection_id: ConnectionId, - project_id: ProjectId, - ) -> Result<(&Project, ReplicaId)> { - let connection = self - .connections - .get_mut(&requester_connection_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let user = self - .connected_users - .get(&connection.user_id) - .ok_or_else(|| anyhow!("no such connection"))?; - let active_call = user.active_call.ok_or_else(|| anyhow!("no such project"))?; - anyhow::ensure!( - active_call.connection_id == Some(requester_connection_id), - "no such project" - ); - - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - anyhow::ensure!(project.room_id == active_call.room_id, "no such project"); - - connection.projects.insert(project_id); - let mut replica_id = 1; - while project.active_replica_ids.contains(&replica_id) { - replica_id += 1; - } - project.active_replica_ids.insert(replica_id); - project.guests.insert( - requester_connection_id, - Collaborator { - replica_id, - user_id: connection.user_id, - admin: connection.admin, - }, - ); - - Ok((project, replica_id)) - } - pub fn leave_project( &mut self, project_id: ProjectId, @@ -409,12 +366,6 @@ impl Store { .connection_ids()) } - pub fn project(&self, project_id: ProjectId) -> Result<&Project> { - self.projects - .get(&project_id) - .ok_or_else(|| anyhow!("no such project")) - } - pub fn read_project( &self, project_id: ProjectId, diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index e688cad1f8..8aed5ef5cf 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -282,13 +282,6 @@ message UpdateWorktree { bytes abs_path = 8; } -message UpdateWorktreeExtensions { - uint64 project_id = 1; - uint64 worktree_id = 2; - repeated string extensions = 3; - repeated uint32 counts = 4; -} - message CreateProjectEntry { uint64 project_id = 1; uint64 worktree_id = 2; From 4b1dcf2d55002ded81dfccc4ed93193c51be184c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 16:46:17 +0100 Subject: [PATCH 025/240] Always use strings to represent paths over the wire Previously, the protocol used a mix of strings and bytes without any consistency. When we go to multiple platforms, we won't be able to mix encodings of paths anyway. We don't know this is the right approach, but it at least makes things consistent and easy to read in the database, on the wire, etc. Really, we should be using entry ids etc to refer to entries on the wire anyway, but there's a chance this is the wrong decision. Co-Authored-By: Nathan Sobo --- crates/call/src/room.rs | 4 ++-- crates/collab/src/db.rs | 6 +++--- crates/collab/src/rpc.rs | 4 ++-- crates/project/src/project.rs | 14 ++++++-------- crates/project/src/project_tests.rs | 6 +++++- crates/project/src/worktree.rs | 21 ++++++++------------- crates/rpc/proto/zed.proto | 12 ++++++------ 7 files changed, 32 insertions(+), 35 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 4ba8d8effc..8c1b0d9de0 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -10,7 +10,7 @@ use gpui::{AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext use live_kit_client::{LocalTrackPublication, LocalVideoTrack, RemoteVideoTrackUpdate}; use postage::stream::Stream; use project::Project; -use std::{mem, os::unix::prelude::OsStrExt, sync::Arc}; +use std::{mem, sync::Arc}; use util::{post_inc, ResultExt}; #[derive(Clone, Debug, PartialEq, Eq)] @@ -553,7 +553,7 @@ impl Room { id: worktree.id().to_proto(), root_name: worktree.root_name().into(), visible: worktree.is_visible(), - abs_path: worktree.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: worktree.abs_path().to_string_lossy().into(), } }) .collect(), diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6db4ad101b..4cd3ce3a7c 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1446,7 +1446,7 @@ where .bind(project_id) .bind(worktree.id as i32) .bind(&worktree.root_name) - .bind(&*String::from_utf8_lossy(&worktree.abs_path)) + .bind(&worktree.abs_path) .bind(worktree.visible) .bind(0) .bind(false) @@ -1510,7 +1510,7 @@ where .bind(project_id) .bind(worktree.id as i32) .bind(&worktree.root_name) - .bind(String::from_utf8_lossy(&worktree.abs_path).as_ref()) + .bind(&worktree.abs_path) .bind(worktree.visible) .bind(0) .bind(false) @@ -1687,7 +1687,7 @@ where worktree.entries.push(proto::Entry { id: entry.id as u64, is_dir: entry.is_dir, - path: entry.path.into_bytes(), + path: entry.path, inode: entry.inode as u64, mtime: Some(proto::Timestamp { seconds: entry.mtime_seconds as u64, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 3c7d4ec61b..5fcb8d5f9c 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -954,7 +954,7 @@ impl Server { id: id.to_proto(), root_name: worktree.root_name.clone(), visible: worktree.visible, - abs_path: worktree.abs_path.as_bytes().to_vec(), + abs_path: worktree.abs_path.clone(), }) .collect::>(); @@ -995,7 +995,7 @@ impl Server { let message = proto::UpdateWorktree { project_id: project_id.to_proto(), worktree_id: worktree_id.to_proto(), - abs_path: worktree.abs_path.as_bytes().to_vec(), + abs_path: worktree.abs_path.clone(), root_name: worktree.root_name, updated_entries: worktree.entries, removed_entries: Default::default(), diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index c59b19de8f..9ac10d1406 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -44,12 +44,10 @@ use std::{ cell::RefCell, cmp::{self, Ordering}, convert::TryInto, - ffi::OsString, hash::Hash, mem, num::NonZeroU32, ops::Range, - os::unix::{ffi::OsStrExt, prelude::OsStringExt}, path::{Component, Path, PathBuf}, rc::Rc, str, @@ -837,7 +835,7 @@ impl Project { .request(proto::CreateProjectEntry { worktree_id: project_path.worktree_id.to_proto(), project_id, - path: project_path.path.as_os_str().as_bytes().to_vec(), + path: project_path.path.to_string_lossy().into(), is_directory, }) .await?; @@ -881,7 +879,7 @@ impl Project { .request(proto::CopyProjectEntry { project_id, entry_id: entry_id.to_proto(), - new_path: new_path.as_os_str().as_bytes().to_vec(), + new_path: new_path.to_string_lossy().into(), }) .await?; let entry = response @@ -924,7 +922,7 @@ impl Project { .request(proto::RenameProjectEntry { project_id, entry_id: entry_id.to_proto(), - new_path: new_path.as_os_str().as_bytes().to_vec(), + new_path: new_path.to_string_lossy().into(), }) .await?; let entry = response @@ -4606,7 +4604,7 @@ impl Project { let entry = worktree .update(&mut cx, |worktree, cx| { let worktree = worktree.as_local_mut().unwrap(); - let path = PathBuf::from(OsString::from_vec(envelope.payload.path)); + let path = PathBuf::from(envelope.payload.path); worktree.create_entry(path, envelope.payload.is_directory, cx) }) .await?; @@ -4630,7 +4628,7 @@ impl Project { let worktree_scan_id = worktree.read_with(&cx, |worktree, _| worktree.scan_id()); let entry = worktree .update(&mut cx, |worktree, cx| { - let new_path = PathBuf::from(OsString::from_vec(envelope.payload.new_path)); + let new_path = PathBuf::from(envelope.payload.new_path); worktree .as_local_mut() .unwrap() @@ -4658,7 +4656,7 @@ impl Project { let worktree_scan_id = worktree.read_with(&cx, |worktree, _| worktree.scan_id()); let entry = worktree .update(&mut cx, |worktree, cx| { - let new_path = PathBuf::from(OsString::from_vec(envelope.payload.new_path)); + let new_path = PathBuf::from(envelope.payload.new_path); worktree .as_local_mut() .unwrap() diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index ca274b18b8..77d2a610d5 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -2166,7 +2166,11 @@ async fn test_rescan_and_remote_updates( proto::WorktreeMetadata { id: initial_snapshot.id().to_proto(), root_name: initial_snapshot.root_name().into(), - abs_path: initial_snapshot.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: initial_snapshot + .abs_path() + .as_os_str() + .to_string_lossy() + .into(), visible: true, }, rpc.clone(), diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 9e4ec3ffb9..ddd4a7a6c8 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -40,7 +40,6 @@ use std::{ future::Future, mem, ops::{Deref, DerefMut}, - os::unix::prelude::{OsStrExt, OsStringExt}, path::{Path, PathBuf}, sync::{atomic::AtomicUsize, Arc}, task::Poll, @@ -221,7 +220,7 @@ impl Worktree { let root_name = worktree.root_name.clone(); let visible = worktree.visible; - let abs_path = PathBuf::from(OsString::from_vec(worktree.abs_path)); + let abs_path = PathBuf::from(worktree.abs_path); let snapshot = Snapshot { id: WorktreeId(remote_id as usize), abs_path: Arc::from(abs_path.deref()), @@ -656,7 +655,7 @@ impl LocalWorktree { id: self.id().to_proto(), root_name: self.root_name().to_string(), visible: self.visible, - abs_path: self.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: self.abs_path().as_os_str().to_string_lossy().into(), } } @@ -990,7 +989,7 @@ impl LocalWorktree { let update = proto::UpdateWorktree { project_id, worktree_id, - abs_path: snapshot.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: snapshot.abs_path().to_string_lossy().into(), root_name: snapshot.root_name().to_string(), updated_entries: snapshot .entries_by_path @@ -1381,7 +1380,7 @@ impl LocalSnapshot { proto::UpdateWorktree { project_id, worktree_id: self.id().to_proto(), - abs_path: self.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: self.abs_path().to_string_lossy().into(), root_name, updated_entries: self.entries_by_path.iter().map(Into::into).collect(), removed_entries: Default::default(), @@ -1449,7 +1448,7 @@ impl LocalSnapshot { proto::UpdateWorktree { project_id, worktree_id, - abs_path: self.abs_path().as_os_str().as_bytes().to_vec(), + abs_path: self.abs_path().to_string_lossy().into(), root_name: self.root_name().to_string(), updated_entries, removed_entries, @@ -2928,7 +2927,7 @@ impl<'a> From<&'a Entry> for proto::Entry { Self { id: entry.id.to_proto(), is_dir: entry.is_dir(), - path: entry.path.as_os_str().as_bytes().to_vec(), + path: entry.path.to_string_lossy().into(), inode: entry.inode, mtime: Some(entry.mtime.into()), is_symlink: entry.is_symlink, @@ -2946,14 +2945,10 @@ impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry { EntryKind::Dir } else { let mut char_bag = *root_char_bag; - char_bag.extend( - String::from_utf8_lossy(&entry.path) - .chars() - .map(|c| c.to_ascii_lowercase()), - ); + char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase())); EntryKind::File(char_bag) }; - let path: Arc = PathBuf::from(OsString::from_vec(entry.path)).into(); + let path: Arc = PathBuf::from(entry.path).into(); Ok(Entry { id: ProjectEntryId::from_proto(entry.id), kind, diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 8aed5ef5cf..30c1c89e8f 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -279,26 +279,26 @@ message UpdateWorktree { repeated uint64 removed_entries = 5; uint64 scan_id = 6; bool is_last_update = 7; - bytes abs_path = 8; + string abs_path = 8; } message CreateProjectEntry { uint64 project_id = 1; uint64 worktree_id = 2; - bytes path = 3; + string path = 3; bool is_directory = 4; } message RenameProjectEntry { uint64 project_id = 1; uint64 entry_id = 2; - bytes new_path = 3; + string new_path = 3; } message CopyProjectEntry { uint64 project_id = 1; uint64 entry_id = 2; - bytes new_path = 3; + string new_path = 3; } message DeleteProjectEntry { @@ -884,7 +884,7 @@ message File { message Entry { uint64 id = 1; bool is_dir = 2; - bytes path = 3; + string path = 3; uint64 inode = 4; Timestamp mtime = 5; bool is_symlink = 6; @@ -1068,7 +1068,7 @@ message WorktreeMetadata { uint64 id = 1; string root_name = 2; bool visible = 3; - bytes abs_path = 4; + string abs_path = 4; } message UpdateDiffBase { From e9eadcaa6a61247f59d5bba629e5db64bfeef49f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 17:18:28 +0100 Subject: [PATCH 026/240] Move `Store::update_worktree` to `Db::update_worktree` --- .../20221109000000_test_schema.sql | 12 +- crates/collab/src/db.rs | 126 ++++++++++++++++++ crates/collab/src/rpc.rs | 17 +-- crates/collab/src/rpc/store.rs | 51 +------ 4 files changed, 139 insertions(+), 67 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index cffb549a89..24edd69d31 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -61,8 +61,8 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), - "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "project_id" INTEGER NOT NULL, + "worktree_id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INTEGER NOT NULL, @@ -71,17 +71,19 @@ CREATE TABLE "worktree_entries" ( "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, PRIMARY KEY(project_id, worktree_id, id) + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( "path" VARCHAR NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), - "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "project_id" INTEGER NOT NULL, + "worktree_id" INTEGER NOT NULL, "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, - PRIMARY KEY(project_id, worktree_id, path) + PRIMARY KEY(project_id, worktree_id, path), + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 4cd3ce3a7c..d61cdd334d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1556,6 +1556,132 @@ where .await } + pub async fn update_worktree( + &self, + update: &proto::UpdateWorktree, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|mut tx| async move { + let project_id = ProjectId::from_proto(update.project_id); + let worktree_id = WorktreeId::from_proto(update.worktree_id); + + // Ensure the update comes from the host. + sqlx::query( + " + SELECT 1 + FROM projects + WHERE id = $1 AND host_connection_id = $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + // Update metadata. + sqlx::query( + " + UPDATE worktrees + SET + root_name = $1, + scan_id = $2, + is_complete = $3, + abs_path = $4 + WHERE project_id = $5 AND id = $6 + RETURNING 1 + ", + ) + .bind(&update.root_name) + .bind(update.scan_id as i64) + .bind(update.is_last_update) + .bind(&update.abs_path) + .bind(project_id) + .bind(worktree_id) + .fetch_one(&mut tx) + .await?; + + if !update.updated_entries.is_empty() { + let mut params = + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?),".repeat(update.updated_entries.len()); + params.pop(); + + let query = format!( + " + INSERT INTO worktree_entries ( + project_id, + worktree_id, + id, + is_dir, + path, + inode, + mtime_seconds, + mtime_nanos, + is_symlink, + is_ignored + ) + VALUES {params} + " + ); + let mut query = sqlx::query(&query); + for entry in &update.updated_entries { + let mtime = entry.mtime.clone().unwrap_or_default(); + query = query + .bind(project_id) + .bind(worktree_id) + .bind(entry.id as i64) + .bind(entry.is_dir) + .bind(&entry.path) + .bind(entry.inode as i64) + .bind(mtime.seconds as i64) + .bind(mtime.nanos as i32) + .bind(entry.is_symlink) + .bind(entry.is_ignored); + } + query.execute(&mut tx).await?; + } + + if !update.removed_entries.is_empty() { + let mut params = "(?, ?, ?),".repeat(update.removed_entries.len()); + params.pop(); + let query = format!( + " + DELETE FROM worktree_entries + WHERE (project_id, worktree_id, entry_id) IN ({params}) + " + ); + + let mut query = sqlx::query(&query); + for entry_id in &update.removed_entries { + query = query + .bind(project_id) + .bind(worktree_id) + .bind(*entry_id as i64); + } + query.execute(&mut tx).await?; + } + + let connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 AND connection_id != $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_all(&mut tx) + .await?; + + tx.commit().await?; + + Ok(connection_ids + .into_iter() + .map(|connection_id| ConnectionId(connection_id as u32)) + .collect()) + }) + .await + } + pub async fn join_project( &self, project_id: ProjectId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 5fcb8d5f9c..1943f18ceb 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1105,18 +1105,11 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); - let worktree_id = request.payload.worktree_id; - let connection_ids = self.store().await.update_worktree( - request.sender_connection_id, - project_id, - worktree_id, - &request.payload.root_name, - &request.payload.removed_entries, - &request.payload.updated_entries, - request.payload.scan_id, - request.payload.is_last_update, - )?; + let connection_ids = self + .app_state + .db + .update_worktree(&request.payload, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index a93182d50b..e3abc8dd3c 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -3,7 +3,7 @@ use anyhow::{anyhow, Result}; use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; use rpc::{proto, ConnectionId}; use serde::Serialize; -use std::{path::PathBuf, str}; +use std::path::PathBuf; use tracing::instrument; pub type RoomId = u64; @@ -325,37 +325,6 @@ impl Store { }) } - #[allow(clippy::too_many_arguments)] - pub fn update_worktree( - &mut self, - connection_id: ConnectionId, - project_id: ProjectId, - worktree_id: u64, - worktree_root_name: &str, - removed_entries: &[u64], - updated_entries: &[proto::Entry], - scan_id: u64, - is_last_update: bool, - ) -> Result> { - let project = self.write_project(project_id, connection_id)?; - - let connection_ids = project.connection_ids(); - let mut worktree = project.worktrees.entry(worktree_id).or_default(); - worktree.root_name = worktree_root_name.to_string(); - - for entry_id in removed_entries { - worktree.entries.remove(entry_id); - } - - for entry in updated_entries { - worktree.entries.insert(entry.id, entry.clone()); - } - - worktree.scan_id = scan_id; - worktree.is_complete = is_last_update; - Ok(connection_ids) - } - pub fn project_connection_ids( &self, project_id: ProjectId, @@ -384,24 +353,6 @@ impl Store { } } - fn write_project( - &mut self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result<&mut Project> { - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id == connection_id - || project.guests.contains_key(&connection_id) - { - Ok(project) - } else { - Err(anyhow!("no such project"))? - } - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { From ad67f5e4de5c086c1c42642f6d1656d8e599c344 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 17:49:37 +0100 Subject: [PATCH 027/240] Always use the database to retrieve collaborators for a project --- crates/collab/src/db.rs | 58 +++++++++++ crates/collab/src/rpc.rs | 174 +++++++++++++++++++-------------- crates/collab/src/rpc/store.rs | 28 ------ 3 files changed, 160 insertions(+), 100 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index d61cdd334d..e503188e1d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1886,6 +1886,64 @@ where .await } + pub async fn project_collaborators( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|mut tx| async move { + let collaborators = sqlx::query_as::<_, ProjectCollaborator>( + " + SELECT * + FROM project_collaborators + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await?; + + if collaborators + .iter() + .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) + { + Ok(collaborators) + } else { + Err(anyhow!("no such project"))? + } + }) + .await + } + + pub async fn project_connection_ids( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|mut tx| async move { + let connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await?; + + if connection_ids.contains(&(connection_id.0 as i32)) { + Ok(connection_ids + .into_iter() + .map(|connection_id| ConnectionId(connection_id as u32)) + .collect()) + } else { + Err(anyhow!("no such project"))? + } + }) + .await + } + pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { todo!() // test_support!(self, { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 1943f18ceb..f0116f04f9 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1187,13 +1187,15 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let receiver_ids = self.store().await.project_connection_ids( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - )?; + let project_id = ProjectId::from_proto(request.payload.project_id); + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1214,25 +1216,25 @@ impl Server { T: EntityMessage + RequestMessage, { let project_id = ProjectId::from_proto(request.payload.remote_entity_id()); - let host_connection_id = self - .store() - .await - .read_project(project_id, request.sender_connection_id)? - .host_connection_id; + let collaborators = self + .app_state + .db + .project_collaborators(project_id, request.sender_connection_id) + .await?; + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + let payload = self .peer .forward_request( request.sender_connection_id, - host_connection_id, + ConnectionId(host.connection_id as u32), request.payload, ) .await?; - // Ensure project still exists by the time we get the response from the host. - self.store() - .await - .read_project(project_id, request.sender_connection_id)?; - response.send(payload)?; Ok(()) } @@ -1243,25 +1245,39 @@ impl Server { response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); - let host = self - .store() - .await - .read_project(project_id, request.sender_connection_id)? - .host_connection_id; + let collaborators = self + .app_state + .db + .project_collaborators(project_id, request.sender_connection_id) + .await?; + let host = collaborators + .into_iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + let host_connection_id = ConnectionId(host.connection_id as u32); let response_payload = self .peer - .forward_request(request.sender_connection_id, host, request.payload.clone()) + .forward_request( + request.sender_connection_id, + host_connection_id, + request.payload.clone(), + ) .await?; - let mut guests = self - .store() - .await - .read_project(project_id, request.sender_connection_id)? - .connection_ids(); - guests.retain(|guest_connection_id| *guest_connection_id != request.sender_connection_id); - broadcast(host, guests, |conn_id| { + let mut collaborators = self + .app_state + .db + .project_collaborators(project_id, request.sender_connection_id) + .await?; + collaborators.retain(|collaborator| { + collaborator.connection_id != request.sender_connection_id.0 as i32 + }); + let project_connection_ids = collaborators + .into_iter() + .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); + broadcast(host_connection_id, project_connection_ids, |conn_id| { self.peer - .forward_send(host, conn_id, response_payload.clone()) + .forward_send(host_connection_id, conn_id, response_payload.clone()) }); response.send(response_payload)?; Ok(()) @@ -1285,14 +1301,15 @@ impl Server { response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); - let receiver_ids = { - let store = self.store().await; - store.project_connection_ids(project_id, request.sender_connection_id)? - }; + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1309,13 +1326,16 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let receiver_ids = self.store().await.project_connection_ids( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - )?; + let project_id = ProjectId::from_proto(request.payload.project_id); + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; + broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1331,13 +1351,15 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let receiver_ids = self.store().await.project_connection_ids( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - )?; + let project_id = ProjectId::from_proto(request.payload.project_id); + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1350,13 +1372,15 @@ impl Server { } async fn buffer_saved(self: Arc, request: Message) -> Result<()> { - let receiver_ids = self.store().await.project_connection_ids( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - )?; + let project_id = ProjectId::from_proto(request.payload.project_id); + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1376,14 +1400,14 @@ impl Server { let project_id = ProjectId::from_proto(request.payload.project_id); let leader_id = ConnectionId(request.payload.leader_id); let follower_id = request.sender_connection_id; - { - let store = self.store().await; - if !store - .project_connection_ids(project_id, follower_id)? - .contains(&leader_id) - { - Err(anyhow!("no such peer"))?; - } + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; + + if !project_connection_ids.contains(&leader_id) { + Err(anyhow!("no such peer"))?; } let mut response_payload = self @@ -1400,11 +1424,12 @@ impl Server { async fn unfollow(self: Arc, request: Message) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let leader_id = ConnectionId(request.payload.leader_id); - let store = self.store().await; - if !store - .project_connection_ids(project_id, request.sender_connection_id)? - .contains(&leader_id) - { + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; + if !project_connection_ids.contains(&leader_id) { Err(anyhow!("no such peer"))?; } self.peer @@ -1417,9 +1442,12 @@ impl Server { request: Message, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); - let store = self.store().await; - let connection_ids = - store.project_connection_ids(project_id, request.sender_connection_id)?; + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; + let leader_id = request .payload .variant @@ -1431,7 +1459,7 @@ impl Server { }); for follower_id in &request.payload.follower_ids { let follower_id = ConnectionId(*follower_id); - if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { + if project_connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { self.peer.forward_send( request.sender_connection_id, follower_id, @@ -1629,13 +1657,15 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let receiver_ids = self.store().await.project_connection_ids( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - )?; + let project_id = ProjectId::from_proto(request.payload.project_id); + let project_connection_ids = self + .app_state + .db + .project_connection_ids(project_id, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + project_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index e3abc8dd3c..f694440a50 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -325,34 +325,6 @@ impl Store { }) } - pub fn project_connection_ids( - &self, - project_id: ProjectId, - acting_connection_id: ConnectionId, - ) -> Result> { - Ok(self - .read_project(project_id, acting_connection_id)? - .connection_ids()) - } - - pub fn read_project( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result<&Project> { - let project = self - .projects - .get(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id == connection_id - || project.guests.contains_key(&connection_id) - { - Ok(project) - } else { - Err(anyhow!("no such project"))? - } - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { From 0817f905a2baf20b034844beb38459a63916ccc2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 15 Nov 2022 18:02:07 +0100 Subject: [PATCH 028/240] Fix syntax error in schema --- .../20221109000000_test_schema.sql | 2 +- .../20221111092550_reconnection_support.sql | 40 ++++++++++--------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 24edd69d31..ccb09af454 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -70,7 +70,7 @@ CREATE TABLE "worktree_entries" ( "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, - PRIMARY KEY(project_id, worktree_id, id) + PRIMARY KEY(project_id, worktree_id, id), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index a5b49ad763..e0e594d46e 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -9,17 +9,6 @@ ALTER TABLE "projects" ADD "host_connection_id" INTEGER, DROP COLUMN "unregistered"; -CREATE TABLE "project_collaborators" ( - "id" SERIAL PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, - "connection_id" INTEGER NOT NULL, - "user_id" INTEGER NOT NULL, - "replica_id" INTEGER NOT NULL, - "is_host" BOOLEAN NOT NULL -); -CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); -CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); - CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL REFERENCES projects (id), @@ -34,8 +23,8 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), - "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "project_id" INTEGER NOT NULL, + "worktree_id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INTEGER NOT NULL, @@ -43,18 +32,20 @@ CREATE TABLE "worktree_entries" ( "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, - PRIMARY KEY(project_id, worktree_id, id) + PRIMARY KEY(project_id, worktree_id, id), + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( "path" VARCHAR NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), - "worktree_id" INTEGER NOT NULL REFERENCES worktrees (id), + "project_id" INTEGER NOT NULL, + "worktree_id" INTEGER NOT NULL, "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, - PRIMARY KEY(project_id, worktree_id, path) + PRIMARY KEY(project_id, worktree_id, path), + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); @@ -66,8 +57,19 @@ CREATE TABLE "language_servers" ( ); CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id"); -CREATE TABLE IF NOT EXISTS "room_participants" ( - "id" SERIAL PRIMARY KEY, +CREATE TABLE "project_collaborators" ( + "id" INTEGER PRIMARY KEY, + "project_id" INTEGER NOT NULL REFERENCES projects (id), + "connection_id" INTEGER NOT NULL, + "user_id" INTEGER NOT NULL, + "replica_id" INTEGER NOT NULL, + "is_host" BOOLEAN NOT NULL +); +CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); + +CREATE TABLE "room_participants" ( + "id" INTEGER PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, From 31902363968c95ebb364d42ebac2871a3761bbc7 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 08:57:19 +0100 Subject: [PATCH 029/240] Update worktree entry instead of erroring when it already exists --- crates/collab/src/db.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index e503188e1d..44cc382ee0 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1620,6 +1620,14 @@ where is_ignored ) VALUES {params} + ON CONFLICT (project_id, worktree_id, id) DO UPDATE SET + is_dir = excluded.is_dir, + path = excluded.path, + inode = excluded.inode, + mtime_seconds = excluded.mtime_seconds, + mtime_nanos = excluded.mtime_nanos, + is_symlink = excluded.is_symlink, + is_ignored = excluded.is_ignored " ); let mut query = sqlx::query(&query); From c151c87e12e58d3dd121857ccaeaa267a99bec52 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 10:36:48 +0100 Subject: [PATCH 030/240] Correctly leave projects when leaving room --- crates/collab/src/db.rs | 108 ++++++++++++++++++++++++++------------- crates/collab/src/rpc.rs | 72 +++++++++++++------------- 2 files changed, 107 insertions(+), 73 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 44cc382ee0..78b6547ef2 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1171,44 +1171,68 @@ where .fetch_all(&mut tx) .await?; - let mut project_collaborators = sqlx::query_as::<_, ProjectCollaborator>( + let project_ids = sqlx::query_scalar::<_, ProjectId>( " - SELECT project_collaborators.* - FROM projects, project_collaborators - WHERE - projects.room_id = $1 AND - projects.id = project_collaborators.project_id AND - project_collaborators.connection_id = $2 + SELECT project_id + FROM project_collaborators + WHERE connection_id = $1 ", ) - .bind(room_id) .bind(connection_id.0 as i32) - .fetch(&mut tx); + .fetch_all(&mut tx) + .await?; + // Leave projects. let mut left_projects = HashMap::default(); - while let Some(collaborator) = project_collaborators.next().await { - let collaborator = collaborator?; - let left_project = - left_projects - .entry(collaborator.project_id) - .or_insert(LeftProject { - id: collaborator.project_id, - host_user_id: Default::default(), - connection_ids: Default::default(), - }); - - let collaborator_connection_id = - ConnectionId(collaborator.connection_id as u32); - if collaborator_connection_id != connection_id || collaborator.is_host { - left_project.connection_ids.push(collaborator_connection_id); + if !project_ids.is_empty() { + let mut params = "?,".repeat(project_ids.len()); + params.pop(); + let query = format!( + " + SELECT * + FROM project_collaborators + WHERE project_id IN ({params}) + " + ); + let mut query = sqlx::query_as::<_, ProjectCollaborator>(&query); + for project_id in project_ids { + query = query.bind(project_id); } - if collaborator.is_host { - left_project.host_user_id = collaborator.user_id; + let mut project_collaborators = query.fetch(&mut tx); + while let Some(collaborator) = project_collaborators.next().await { + let collaborator = collaborator?; + let left_project = + left_projects + .entry(collaborator.project_id) + .or_insert(LeftProject { + id: collaborator.project_id, + host_user_id: Default::default(), + connection_ids: Default::default(), + }); + + let collaborator_connection_id = + ConnectionId(collaborator.connection_id as u32); + if collaborator_connection_id != connection_id { + left_project.connection_ids.push(collaborator_connection_id); + } + + if collaborator.is_host { + left_project.host_user_id = collaborator.user_id; + } } } - drop(project_collaborators); + sqlx::query( + " + DELETE FROM project_collaborators + WHERE connection_id = $1 + ", + ) + .bind(connection_id.0 as i32) + .execute(&mut tx) + .await?; + // Unshare projects. sqlx::query( " DELETE FROM projects @@ -1265,15 +1289,16 @@ where sqlx::query( " UPDATE room_participants - SET location_kind = $1 AND location_project_id = $2 + SET location_kind = $1, location_project_id = $2 WHERE room_id = $3 AND answering_connection_id = $4 + RETURNING 1 ", ) .bind(location_kind) .bind(location_project_id) .bind(room_id) .bind(connection_id.0 as i32) - .execute(&mut tx) + .fetch_one(&mut tx) .await?; self.commit_room_transaction(room_id, tx).await @@ -1335,21 +1360,32 @@ where let ( user_id, answering_connection_id, - _location_kind, - _location_project_id, + location_kind, + location_project_id, calling_user_id, initial_project_id, ) = participant?; if let Some(answering_connection_id) = answering_connection_id { + let location = match (location_kind, location_project_id) { + (Some(0), Some(project_id)) => { + Some(proto::participant_location::Variant::SharedProject( + proto::participant_location::SharedProject { + id: project_id.to_proto(), + }, + )) + } + (Some(1), _) => Some(proto::participant_location::Variant::UnsharedProject( + Default::default(), + )), + _ => Some(proto::participant_location::Variant::External( + Default::default(), + )), + }; participants.push(proto::Participant { user_id: user_id.to_proto(), peer_id: answering_connection_id as u32, projects: Default::default(), - location: Some(proto::ParticipantLocation { - variant: Some(proto::participant_location::Variant::External( - Default::default(), - )), - }), + location: Some(proto::ParticipantLocation { variant: location }), }); } else { pending_participants.push(proto::PendingParticipant { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index f0116f04f9..9f7d21a1a9 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -624,19 +624,19 @@ impl Server { async fn leave_room_for_connection( self: &Arc, - connection_id: ConnectionId, - user_id: UserId, + leaving_connection_id: ConnectionId, + leaving_user_id: UserId, ) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let Some(left_room) = self.app_state.db.leave_room_for_connection(connection_id).await? else { + let Some(left_room) = self.app_state.db.leave_room_for_connection(leaving_connection_id).await? else { return Err(anyhow!("no room to leave"))?; }; - contacts_to_update.insert(user_id); + contacts_to_update.insert(leaving_user_id); for project in left_room.left_projects.into_values() { - if project.host_user_id == user_id { - for connection_id in project.connection_ids { + for connection_id in project.connection_ids { + if project.host_user_id == leaving_user_id { self.peer .send( connection_id, @@ -645,29 +645,27 @@ impl Server { }, ) .trace_err(); - } - } else { - for connection_id in project.connection_ids { + } else { self.peer .send( connection_id, proto::RemoveProjectCollaborator { project_id: project.id.to_proto(), - peer_id: connection_id.0, + peer_id: leaving_connection_id.0, }, ) .trace_err(); } - - self.peer - .send( - connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); } + + self.peer + .send( + leaving_connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); } self.room_updated(&left_room.room); @@ -691,7 +689,7 @@ impl Server { live_kit .remove_participant( left_room.room.live_kit_room.clone(), - connection_id.to_string(), + leaving_connection_id.to_string(), ) .await .trace_err(); @@ -941,6 +939,9 @@ impl Server { let collaborators = project .collaborators .iter() + .filter(|collaborator| { + collaborator.connection_id != request.sender_connection_id.0 as i32 + }) .map(|collaborator| proto::Collaborator { peer_id: collaborator.connection_id as u32, replica_id: collaborator.replica_id.0 as u32, @@ -958,23 +959,20 @@ impl Server { }) .collect::>(); - for collaborator in &project.collaborators { - let connection_id = ConnectionId(collaborator.connection_id as u32); - if connection_id != request.sender_connection_id { - self.peer - .send( - connection_id, - proto::AddProjectCollaborator { - project_id: project_id.to_proto(), - collaborator: Some(proto::Collaborator { - peer_id: request.sender_connection_id.0, - replica_id: replica_id.0 as u32, - user_id: guest_user_id.to_proto(), - }), - }, - ) - .trace_err(); - } + for collaborator in &collaborators { + self.peer + .send( + ConnectionId(collaborator.peer_id), + proto::AddProjectCollaborator { + project_id: project_id.to_proto(), + collaborator: Some(proto::Collaborator { + peer_id: request.sender_connection_id.0, + replica_id: replica_id.0 as u32, + user_id: guest_user_id.to_proto(), + }), + }, + ) + .trace_err(); } // First, we send the metadata associated with each worktree. From f9567ae1166559df1bf4d66397159c24c46a3d15 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 10:41:36 +0100 Subject: [PATCH 031/240] Cascade deletes when project is deleted --- .../migrations.sqlite/20221109000000_test_schema.sql | 10 +++++----- .../migrations/20221111092550_reconnection_support.sql | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index ccb09af454..9914831bba 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -49,7 +49,7 @@ CREATE TABLE "projects" ( CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, @@ -71,7 +71,7 @@ CREATE TABLE "worktree_entries" ( "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, PRIMARY KEY(project_id, worktree_id, id), - FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); @@ -83,13 +83,13 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), - FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE TABLE "language_servers" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); @@ -97,7 +97,7 @@ CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("proj CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index e0e594d46e..8cd53726fd 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -11,7 +11,7 @@ ALTER TABLE "projects" CREATE TABLE "worktrees" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, @@ -33,7 +33,7 @@ CREATE TABLE "worktree_entries" ( "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, PRIMARY KEY(project_id, worktree_id, id), - FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); @@ -45,13 +45,13 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), - FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) + FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE TABLE "language_servers" ( "id" INTEGER NOT NULL, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); @@ -59,7 +59,7 @@ CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("proj CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY, - "project_id" INTEGER NOT NULL REFERENCES projects (id), + "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, From eeb32fa88809f04a1b40730ce1fedb171de7b551 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 11:07:39 +0100 Subject: [PATCH 032/240] Improve queries for composite primary keys --- crates/collab/src/db.rs | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 78b6547ef2..785965905a 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1554,20 +1554,20 @@ where .await?; } - let mut params = "(?, ?),".repeat(worktrees.len()); + let mut params = "?,".repeat(worktrees.len()); if !worktrees.is_empty() { params.pop(); } let query = format!( " DELETE FROM worktrees - WHERE (project_id, id) NOT IN ({params}) + WHERE project_id = ? AND worktree_id NOT IN ({params}) ", ); - let mut query = sqlx::query(&query); + let mut query = sqlx::query(&query).bind(project_id); for worktree in worktrees { - query = query.bind(project_id).bind(WorktreeId(worktree.id as i32)); + query = query.bind(WorktreeId(worktree.id as i32)); } query.execute(&mut tx).await?; @@ -1685,21 +1685,18 @@ where } if !update.removed_entries.is_empty() { - let mut params = "(?, ?, ?),".repeat(update.removed_entries.len()); + let mut params = "?,".repeat(update.removed_entries.len()); params.pop(); let query = format!( " DELETE FROM worktree_entries - WHERE (project_id, worktree_id, entry_id) IN ({params}) + WHERE project_id = ? AND worktree_id = ? AND entry_id IN ({params}) " ); - let mut query = sqlx::query(&query); + let mut query = sqlx::query(&query).bind(project_id).bind(worktree_id); for entry_id in &update.removed_entries { - query = query - .bind(project_id) - .bind(worktree_id) - .bind(*entry_id as i64); + query = query.bind(*entry_id as i64); } query.execute(&mut tx).await?; } @@ -1832,7 +1829,7 @@ where }) .collect::>(); - let mut params = "(?, ?),".repeat(worktrees.len()); + let mut params = "?,".repeat(worktrees.len()); if !worktrees.is_empty() { params.pop(); } @@ -1843,12 +1840,12 @@ where " SELECT * FROM worktree_entries - WHERE (project_id, worktree_id) IN ({params}) + WHERE project_id = ? AND worktree_id IN ({params}) ", ); - let mut entries = sqlx::query_as::<_, WorktreeEntry>(&query); + let mut entries = sqlx::query_as::<_, WorktreeEntry>(&query).bind(project_id); for worktree_id in worktrees.keys() { - entries = entries.bind(project_id).bind(*worktree_id); + entries = entries.bind(*worktree_id); } let mut entries = entries.fetch(&mut tx); while let Some(entry) = entries.next().await { @@ -1876,12 +1873,13 @@ where " SELECT * FROM worktree_diagnostic_summaries - WHERE (project_id, worktree_id) IN ({params}) + WHERE project_id = $1 AND worktree_id IN ({params}) ", ); - let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>(&query); + let mut summaries = + sqlx::query_as::<_, WorktreeDiagnosticSummary>(&query).bind(project_id); for worktree_id in worktrees.keys() { - summaries = summaries.bind(project_id).bind(*worktree_id); + summaries = summaries.bind(*worktree_id); } let mut summaries = summaries.fetch(&mut tx); while let Some(summary) = summaries.next().await { From 117458f4f6c567ae691d4a1b716a6f5e8daef717 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 14:58:11 +0100 Subject: [PATCH 033/240] Send worktree updates after project metadata has been sent --- crates/collab/src/db.rs | 3 +- crates/project/src/project.rs | 75 +++++++++++++++++++++---------- crates/workspace/src/workspace.rs | 5 ++- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 785965905a..f058d3bfe1 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1561,7 +1561,7 @@ where let query = format!( " DELETE FROM worktrees - WHERE project_id = ? AND worktree_id NOT IN ({params}) + WHERE project_id = ? AND id NOT IN ({params}) ", ); @@ -1580,6 +1580,7 @@ where WHERE project_id = $1 AND is_host = FALSE ", ) + .bind(project_id) .fetch(&mut tx); while let Some(connection_id) = db_guest_connection_ids.next().await { guest_connection_ids.push(ConnectionId(connection_id? as u32)); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 9ac10d1406..436b2d92a2 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -10,7 +10,11 @@ use anyhow::{anyhow, Context, Result}; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; -use futures::{future::Shared, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt}; +use futures::{ + channel::{mpsc, oneshot}, + future::Shared, + AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, +}; use gpui::{ AnyModelHandle, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, @@ -145,7 +149,7 @@ enum WorktreeHandle { enum ProjectClientState { Local { remote_id: u64, - metadata_changed: watch::Sender<()>, + metadata_changed: mpsc::UnboundedSender>, _maintain_metadata: Task<()>, _detect_unshare: Task>, }, @@ -533,7 +537,7 @@ impl Project { nonce: StdRng::from_entropy().gen(), }; for worktree in worktrees { - this.add_worktree(&worktree, cx); + let _ = this.add_worktree(&worktree, cx); } this }); @@ -728,14 +732,22 @@ impl Project { } } - fn metadata_changed(&mut self, cx: &mut ModelContext) { + fn metadata_changed(&mut self, cx: &mut ModelContext) -> impl Future { + let (tx, rx) = oneshot::channel(); if let Some(ProjectClientState::Local { metadata_changed, .. }) = &mut self.client_state { - *metadata_changed.borrow_mut() = (); + let _ = metadata_changed.unbounded_send(tx); } cx.notify(); + + async move { + // If the project is shared, this will resolve when the `_maintain_metadata` task has + // a chance to update the metadata. Otherwise, it will resolve right away because `tx` + // will get dropped. + let _ = rx.await; + } } pub fn collaborators(&self) -> &HashMap { @@ -1025,17 +1037,22 @@ impl Project { self.client_subscriptions .push(self.client.add_model_for_remote_entity(project_id, cx)); - self.metadata_changed(cx); + let _ = self.metadata_changed(cx); cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); let mut status = self.client.status(); - let (metadata_changed_tx, mut metadata_changed_rx) = watch::channel(); + let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, metadata_changed: metadata_changed_tx, _maintain_metadata: cx.spawn_weak(move |this, cx| async move { - while let Some(()) = metadata_changed_rx.next().await { + while let Some(tx) = metadata_changed_rx.next().await { + let mut txs = vec![tx]; + while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { + txs.push(next_tx); + } + let Some(this) = this.upgrade(&cx) else { break }; this.read_with(&cx, |this, cx| { let worktrees = this @@ -1054,6 +1071,10 @@ impl Project { }) .await .log_err(); + + for tx in txs { + let _ = tx.send(()); + } } }), _detect_unshare: cx.spawn_weak(move |this, mut cx| { @@ -1105,7 +1126,7 @@ impl Project { } } - self.metadata_changed(cx); + let _ = self.metadata_changed(cx); cx.notify(); self.client.send(proto::UnshareProject { project_id: remote_id, @@ -4162,12 +4183,13 @@ impl Project { }); let worktree = worktree?; - let project_id = project.update(&mut cx, |project, cx| { - project.add_worktree(&worktree, cx); - project.remote_id() - }); + project + .update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)) + .await; - if let Some(project_id) = project_id { + if let Some(project_id) = + project.read_with(&cx, |project, _| project.remote_id()) + { worktree .update(&mut cx, |worktree, cx| { worktree.as_local_mut().unwrap().share(project_id, cx) @@ -4191,7 +4213,11 @@ impl Project { }) } - pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut ModelContext) { + pub fn remove_worktree( + &mut self, + id_to_remove: WorktreeId, + cx: &mut ModelContext, + ) -> impl Future { self.worktrees.retain(|worktree| { if let Some(worktree) = worktree.upgrade(cx) { let id = worktree.read(cx).id(); @@ -4205,11 +4231,14 @@ impl Project { false } }); - self.metadata_changed(cx); - cx.notify(); + self.metadata_changed(cx) } - fn add_worktree(&mut self, worktree: &ModelHandle, cx: &mut ModelContext) { + fn add_worktree( + &mut self, + worktree: &ModelHandle, + cx: &mut ModelContext, + ) -> impl Future { cx.observe(worktree, |_, _, cx| cx.notify()).detach(); if worktree.read(cx).is_local() { cx.subscribe(worktree, |this, worktree, event, cx| match event { @@ -4233,15 +4262,13 @@ impl Project { .push(WorktreeHandle::Weak(worktree.downgrade())); } - self.metadata_changed(cx); cx.observe_release(worktree, |this, worktree, cx| { - this.remove_worktree(worktree.id(), cx); - cx.notify(); + let _ = this.remove_worktree(worktree.id(), cx); }) .detach(); cx.emit(Event::WorktreeAdded); - cx.notify(); + self.metadata_changed(cx) } fn update_local_worktree_buffers( @@ -4558,11 +4585,11 @@ impl Project { } else { let worktree = Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx); - this.add_worktree(&worktree, cx); + let _ = this.add_worktree(&worktree, cx); } } - this.metadata_changed(cx); + let _ = this.metadata_changed(cx); for (id, _) in old_worktrees_by_id { cx.emit(Event::WorktreeRemoved(id)); } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 9db524ee9b..2296741ed3 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1531,7 +1531,8 @@ impl Workspace { RemoveWorktreeFromProject(worktree_id): &RemoveWorktreeFromProject, cx: &mut ViewContext, ) { - self.project + let _ = self + .project .update(cx, |project, cx| project.remove_worktree(*worktree_id, cx)); } @@ -3177,7 +3178,7 @@ mod tests { // Remove a project folder project.update(cx, |project, cx| { - project.remove_worktree(worktree_id, cx); + let _ = project.remove_worktree(worktree_id, cx); }); assert_eq!( cx.current_window_title(window_id).as_deref(), From 95369f92ebb91f645dfee1eccf0f981081ef50ab Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 15:41:33 +0100 Subject: [PATCH 034/240] Move `Store::update_diagnostic_summary` to `Db` --- crates/collab/src/db.rs | 115 +++++++++++++++++++++++++-------- crates/collab/src/rpc.rs | 22 +++---- crates/collab/src/rpc/store.rs | 25 ------- 3 files changed, 97 insertions(+), 65 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index f058d3bfe1..3d913bb47d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1724,6 +1724,81 @@ where .await } + pub async fn update_diagnostic_summary( + &self, + update: &proto::UpdateDiagnosticSummary, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|mut tx| async { + let project_id = ProjectId::from_proto(update.project_id); + let worktree_id = WorktreeId::from_proto(update.worktree_id); + let summary = update + .summary + .as_ref() + .ok_or_else(|| anyhow!("invalid summary"))?; + + // Ensure the update comes from the host. + sqlx::query( + " + SELECT 1 + FROM projects + WHERE id = $1 AND host_connection_id = $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + // Update summary. + sqlx::query( + " + INSERT INTO worktree_diagnostic_summaries ( + project_id, + worktree_id, + path, + language_server_id, + error_count, + warning_count + ) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET + language_server_id = excluded.language_server_id, + error_count = excluded.error_count, + warning_count = excluded.warning_count + ", + ) + .bind(project_id) + .bind(worktree_id) + .bind(&summary.path) + .bind(summary.language_server_id as i64) + .bind(summary.error_count as i32) + .bind(summary.warning_count as i32) + .execute(&mut tx) + .await?; + + let connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 AND connection_id != $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_all(&mut tx) + .await?; + + tx.commit().await?; + + Ok(connection_ids + .into_iter() + .map(|connection_id| ConnectionId(connection_id as u32)) + .collect()) + }) + .await + } + pub async fn join_project( &self, project_id: ProjectId, @@ -1830,25 +1905,17 @@ where }) .collect::>(); - let mut params = "?,".repeat(worktrees.len()); - if !worktrees.is_empty() { - params.pop(); - } - // Populate worktree entries. { - let query = format!( + let mut entries = sqlx::query_as::<_, WorktreeEntry>( " - SELECT * - FROM worktree_entries - WHERE project_id = ? AND worktree_id IN ({params}) + SELECT * + FROM worktree_entries + WHERE project_id = $1 ", - ); - let mut entries = sqlx::query_as::<_, WorktreeEntry>(&query).bind(project_id); - for worktree_id in worktrees.keys() { - entries = entries.bind(*worktree_id); - } - let mut entries = entries.fetch(&mut tx); + ) + .bind(project_id) + .fetch(&mut tx); while let Some(entry) = entries.next().await { let entry = entry?; if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { @@ -1870,19 +1937,15 @@ where // Populate worktree diagnostic summaries. { - let query = format!( + let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>( " - SELECT * - FROM worktree_diagnostic_summaries - WHERE project_id = $1 AND worktree_id IN ({params}) + SELECT * + FROM worktree_diagnostic_summaries + WHERE project_id = $1 ", - ); - let mut summaries = - sqlx::query_as::<_, WorktreeDiagnosticSummary>(&query).bind(project_id); - for worktree_id in worktrees.keys() { - summaries = summaries.bind(*worktree_id); - } - let mut summaries = summaries.fetch(&mut tx); + ) + .bind(project_id) + .fetch(&mut tx); while let Some(summary) = summaries.next().await { let summary = summary?; if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 9f7d21a1a9..ac971f8f03 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1103,7 +1103,7 @@ impl Server { request: Message, response: Response, ) -> Result<()> { - let connection_ids = self + let guest_connection_ids = self .app_state .db .update_worktree(&request.payload, request.sender_connection_id) @@ -1111,7 +1111,7 @@ impl Server { broadcast( request.sender_connection_id, - connection_ids, + guest_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, @@ -1128,21 +1128,15 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let summary = request - .payload - .summary - .clone() - .ok_or_else(|| anyhow!("invalid summary"))?; - let receiver_ids = self.store().await.update_diagnostic_summary( - ProjectId::from_proto(request.payload.project_id), - request.payload.worktree_id, - request.sender_connection_id, - summary, - )?; + let guest_connection_ids = self + .app_state + .db + .update_diagnostic_summary(&request.payload, request.sender_connection_id) + .await?; broadcast( request.sender_connection_id, - receiver_ids, + guest_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index f694440a50..1be778e83a 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -251,31 +251,6 @@ impl Store { } } - pub fn update_diagnostic_summary( - &mut self, - project_id: ProjectId, - worktree_id: u64, - connection_id: ConnectionId, - summary: proto::DiagnosticSummary, - ) -> Result> { - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id == connection_id { - let worktree = project - .worktrees - .get_mut(&worktree_id) - .ok_or_else(|| anyhow!("no such worktree"))?; - worktree - .diagnostic_summaries - .insert(summary.path.clone().into(), summary); - return Ok(project.connection_ids()); - } - - Err(anyhow!("no such worktree"))? - } - pub fn start_language_server( &mut self, project_id: ProjectId, From 9bc57c0c61df9e8c3cf6429fb530cac77dac7577 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 15:48:26 +0100 Subject: [PATCH 035/240] Move `Store::start_language_server` to `Db` --- .../20221109000000_test_schema.sql | 6 +- .../20221111092550_reconnection_support.sql | 8 +-- crates/collab/src/db.rs | 62 +++++++++++++++++++ crates/collab/src/rpc.rs | 17 +++-- crates/collab/src/rpc/store.rs | 18 ------ 5 files changed, 76 insertions(+), 35 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 9914831bba..66925fddd5 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -48,8 +48,8 @@ CREATE TABLE "projects" ( ); CREATE TABLE "worktrees" ( - "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, + "id" INTEGER NOT NULL, "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, @@ -60,9 +60,9 @@ CREATE TABLE "worktrees" ( CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( - "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL, "worktree_id" INTEGER NOT NULL, + "id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INTEGER NOT NULL, @@ -76,9 +76,9 @@ CREATE TABLE "worktree_entries" ( CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( - "path" VARCHAR NOT NULL, "project_id" INTEGER NOT NULL, "worktree_id" INTEGER NOT NULL, + "path" VARCHAR NOT NULL, "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 8cd53726fd..4f4ad6aede 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -10,8 +10,8 @@ ALTER TABLE "projects" DROP COLUMN "unregistered"; CREATE TABLE "worktrees" ( - "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, + "id" INTEGER NOT NULL, "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, @@ -22,9 +22,9 @@ CREATE TABLE "worktrees" ( CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( - "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL, "worktree_id" INTEGER NOT NULL, + "id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INTEGER NOT NULL, @@ -38,9 +38,9 @@ CREATE TABLE "worktree_entries" ( CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( - "path" VARCHAR NOT NULL, "project_id" INTEGER NOT NULL, "worktree_id" INTEGER NOT NULL, + "path" VARCHAR NOT NULL, "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, @@ -50,8 +50,8 @@ CREATE TABLE "worktree_diagnostic_summaries" ( CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE TABLE "language_servers" ( - "id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, + "id" INTEGER NOT NULL, "name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 3d913bb47d..9163e71aa4 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1799,6 +1799,68 @@ where .await } + pub async fn start_language_server( + &self, + update: &proto::StartLanguageServer, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|mut tx| async { + let project_id = ProjectId::from_proto(update.project_id); + let server = update + .server + .as_ref() + .ok_or_else(|| anyhow!("invalid language server"))?; + + // Ensure the update comes from the host. + sqlx::query( + " + SELECT 1 + FROM projects + WHERE id = $1 AND host_connection_id = $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + + // Add the newly-started language server. + sqlx::query( + " + INSERT INTO language_servers (project_id, id, name) + VALUES ($1, $2, $3) + ON CONFLICT (project_id, id) DO UPDATE SET + name = excluded.name + ", + ) + .bind(project_id) + .bind(server.id as i64) + .bind(&server.name) + .execute(&mut tx) + .await?; + + let connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 AND connection_id != $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_all(&mut tx) + .await?; + + tx.commit().await?; + + Ok(connection_ids + .into_iter() + .map(|connection_id| ConnectionId(connection_id as u32)) + .collect()) + }) + .await + } + pub async fn join_project( &self, project_id: ProjectId, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index ac971f8f03..5e3018160c 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1152,18 +1152,15 @@ impl Server { self: Arc, request: Message, ) -> Result<()> { - let receiver_ids = self.store().await.start_language_server( - ProjectId::from_proto(request.payload.project_id), - request.sender_connection_id, - request - .payload - .server - .clone() - .ok_or_else(|| anyhow!("invalid language server"))?, - )?; + let guest_connection_ids = self + .app_state + .db + .start_language_server(&request.payload, request.sender_connection_id) + .await?; + broadcast( request.sender_connection_id, - receiver_ids, + guest_connection_ids, |connection_id| { self.peer.forward_send( request.sender_connection_id, diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 1be778e83a..57dd726d3f 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -251,24 +251,6 @@ impl Store { } } - pub fn start_language_server( - &mut self, - project_id: ProjectId, - connection_id: ConnectionId, - language_server: proto::LanguageServer, - ) -> Result> { - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id == connection_id { - project.language_servers.push(language_server); - return Ok(project.connection_ids()); - } - - Err(anyhow!("no such project"))? - } - pub fn leave_project( &mut self, project_id: ProjectId, From faf265328e9adc46423766f9275a7a7a668a99de Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 16:03:01 +0100 Subject: [PATCH 036/240] Wait for acknowledgment before sending the next diagnostic summary --- crates/collab/src/rpc.rs | 5 ++- crates/project/src/worktree.rs | 57 ++++++++++++++++++---------------- crates/rpc/src/proto.rs | 1 + 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 5e3018160c..db8f25fdb2 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -154,7 +154,7 @@ impl Server { .add_request_handler(Server::update_worktree) .add_message_handler(Server::start_language_server) .add_message_handler(Server::update_language_server) - .add_message_handler(Server::update_diagnostic_summary) + .add_request_handler(Server::update_diagnostic_summary) .add_request_handler(Server::forward_project_request::) .add_request_handler(Server::forward_project_request::) .add_request_handler(Server::forward_project_request::) @@ -1127,6 +1127,7 @@ impl Server { async fn update_diagnostic_summary( self: Arc, request: Message, + response: Response, ) -> Result<()> { let guest_connection_ids = self .app_state @@ -1145,6 +1146,8 @@ impl Server { ) }, ); + + response.send(proto::Ack {})?; Ok(()) } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index ddd4a7a6c8..836ac55b66 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -166,7 +166,9 @@ enum ScanState { struct ShareState { project_id: u64, snapshots_tx: watch::Sender, - _maintain_remote_snapshot: Option>>, + diagnostic_summaries_tx: mpsc::UnboundedSender<(Arc, DiagnosticSummary)>, + _maintain_remote_snapshot: Task>, + _maintain_remote_diagnostic_summaries: Task<()>, } pub enum Event { @@ -524,18 +526,9 @@ impl LocalWorktree { let updated = !old_summary.is_empty() || !new_summary.is_empty(); if updated { if let Some(share) = self.share.as_ref() { - self.client - .send(proto::UpdateDiagnosticSummary { - project_id: share.project_id, - worktree_id: self.id().to_proto(), - summary: Some(proto::DiagnosticSummary { - path: worktree_path.to_string_lossy().to_string(), - language_server_id: language_server_id as u64, - error_count: new_summary.error_count as u32, - warning_count: new_summary.warning_count as u32, - }), - }) - .log_err(); + let _ = share + .diagnostic_summaries_tx + .unbounded_send((worktree_path.clone(), new_summary)); } } @@ -967,22 +960,10 @@ impl LocalWorktree { let _ = share_tx.send(Ok(())); } else { let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot()); - let rpc = self.client.clone(); let worktree_id = cx.model_id() as u64; - for (path, summary) in self.diagnostic_summaries.iter() { - if let Err(e) = rpc.send(proto::UpdateDiagnosticSummary { - project_id, - worktree_id, - summary: Some(summary.to_proto(&path.0)), - }) { - return Task::ready(Err(e)); - } - } - let maintain_remote_snapshot = cx.background().spawn({ - let rpc = rpc; - + let rpc = self.client.clone(); async move { let mut prev_snapshot = match snapshots_rx.recv().await { Some(snapshot) => { @@ -1029,10 +1010,32 @@ impl LocalWorktree { } .log_err() }); + + let (diagnostic_summaries_tx, mut diagnostic_summaries_rx) = mpsc::unbounded(); + for (path, summary) in self.diagnostic_summaries.iter() { + let _ = diagnostic_summaries_tx.unbounded_send((path.0.clone(), summary.clone())); + } + let maintain_remote_diagnostic_summaries = cx.background().spawn({ + let rpc = self.client.clone(); + async move { + while let Some((path, summary)) = diagnostic_summaries_rx.next().await { + rpc.request(proto::UpdateDiagnosticSummary { + project_id, + worktree_id, + summary: Some(summary.to_proto(&path)), + }) + .await + .log_err(); + } + } + }); + self.share = Some(ShareState { project_id, snapshots_tx, - _maintain_remote_snapshot: Some(maintain_remote_snapshot), + diagnostic_summaries_tx, + _maintain_remote_snapshot: maintain_remote_snapshot, + _maintain_remote_diagnostic_summaries: maintain_remote_diagnostic_summaries, }); } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 6d9bc9a0aa..50f3c57f2a 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -228,6 +228,7 @@ request_messages!( (ShareProject, ShareProjectResponse), (Test, Test), (UpdateBuffer, Ack), + (UpdateDiagnosticSummary, Ack), (UpdateParticipantLocation, Ack), (UpdateProject, Ack), (UpdateWorktree, Ack), From adf43c87dd2f4f8a76e97ff842d2f8eac82aef4c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 17:19:06 +0100 Subject: [PATCH 037/240] Batch some of the new queries in `Db` Co-Authored-By: Nathan Sobo --- crates/collab/src/db.rs | 162 ++++++++++++++++++++++++---------------- 1 file changed, 97 insertions(+), 65 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 9163e71aa4..d517bdd1df 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1354,7 +1354,7 @@ where .bind(room_id) .fetch(&mut *tx); - let mut participants = Vec::new(); + let mut participants = HashMap::default(); let mut pending_participants = Vec::new(); while let Some(participant) = db_participants.next().await { let ( @@ -1381,12 +1381,15 @@ where Default::default(), )), }; - participants.push(proto::Participant { - user_id: user_id.to_proto(), - peer_id: answering_connection_id as u32, - projects: Default::default(), - location: Some(proto::ParticipantLocation { variant: location }), - }); + participants.insert( + answering_connection_id, + proto::Participant { + user_id: user_id.to_proto(), + peer_id: answering_connection_id as u32, + projects: Default::default(), + location: Some(proto::ParticipantLocation { variant: location }), + }, + ); } else { pending_participants.push(proto::PendingParticipant { user_id: user_id.to_proto(), @@ -1397,41 +1400,42 @@ where } drop(db_participants); - for participant in &mut participants { - let mut entries = sqlx::query_as::<_, (ProjectId, String)>( - " - SELECT projects.id, worktrees.root_name - FROM projects - LEFT JOIN worktrees ON projects.id = worktrees.project_id - WHERE room_id = $1 AND host_connection_id = $2 - ", - ) - .bind(room_id) - .bind(participant.peer_id as i32) - .fetch(&mut *tx); + let mut rows = sqlx::query_as::<_, (i32, ProjectId, Option)>( + " + SELECT host_connection_id, projects.id, worktrees.root_name + FROM projects + LEFT JOIN worktrees ON projects.id = worktrees.project_id + WHERE room_id = $1 + ", + ) + .bind(room_id) + .fetch(&mut *tx); - let mut projects = HashMap::default(); - while let Some(entry) = entries.next().await { - let (project_id, worktree_root_name) = entry?; - let participant_project = - projects - .entry(project_id) - .or_insert(proto::ParticipantProject { - id: project_id.to_proto(), - worktree_root_names: Default::default(), - }); - participant_project - .worktree_root_names - .push(worktree_root_name); + while let Some(row) = rows.next().await { + let (connection_id, project_id, worktree_root_name) = row?; + if let Some(participant) = participants.get_mut(&connection_id) { + let project = if let Some(project) = participant + .projects + .iter_mut() + .find(|project| project.id == project_id.to_proto()) + { + project + } else { + participant.projects.push(proto::ParticipantProject { + id: project_id.to_proto(), + worktree_root_names: Default::default(), + }); + participant.projects.last_mut().unwrap() + }; + project.worktree_root_names.extend(worktree_root_name); } - - participant.projects = projects.into_values().collect(); } + Ok(proto::Room { id: room.id.to_proto(), version: room.version as u64, live_kit_room: room.live_kit_room, - participants, + participants: participants.into_values().collect(), pending_participants, }) } @@ -1472,22 +1476,36 @@ where .fetch_one(&mut tx) .await?; - for worktree in worktrees { - sqlx::query( + if !worktrees.is_empty() { + let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); + params.pop(); + let query = format!( " - INSERT INTO worktrees (project_id, id, root_name, abs_path, visible, scan_id, is_complete) - VALUES ($1, $2, $3, $4, $5, $6, $7) - ", - ) - .bind(project_id) - .bind(worktree.id as i32) - .bind(&worktree.root_name) - .bind(&worktree.abs_path) - .bind(worktree.visible) - .bind(0) - .bind(false) - .execute(&mut tx) - .await?; + INSERT INTO worktrees ( + project_id, + id, + root_name, + abs_path, + visible, + scan_id, + is_complete + ) + VALUES {params} + " + ); + + let mut query = sqlx::query(&query); + for worktree in worktrees { + query = query + .bind(project_id) + .bind(worktree.id as i32) + .bind(&worktree.root_name) + .bind(&worktree.abs_path) + .bind(worktree.visible) + .bind(0) + .bind(false); + } + query.execute(&mut tx).await?; } sqlx::query( @@ -1535,23 +1553,37 @@ where .fetch_one(&mut tx) .await?; - for worktree in worktrees { - sqlx::query( + if !worktrees.is_empty() { + let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); + params.pop(); + let query = format!( " - INSERT INTO worktrees (project_id, id, root_name, abs_path, visible, scan_id, is_complete) - VALUES ($1, $2, $3, $4, $5, $6, $7) + INSERT INTO worktrees ( + project_id, + id, + root_name, + abs_path, + visible, + scan_id, + is_complete + ) + VALUES ${params} ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name - ", - ) - .bind(project_id) - .bind(worktree.id as i32) - .bind(&worktree.root_name) - .bind(&worktree.abs_path) - .bind(worktree.visible) - .bind(0) - .bind(false) - .execute(&mut tx) - .await?; + " + ); + + let mut query = sqlx::query(&query); + for worktree in worktrees { + query = query + .bind(project_id) + .bind(worktree.id as i32) + .bind(&worktree.root_name) + .bind(&worktree.abs_path) + .bind(worktree.visible) + .bind(0) + .bind(false) + } + query.execute(&mut tx).await?; } let mut params = "?,".repeat(worktrees.len()); From c1291a093b65f7db4042759557be10c539b02479 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 16 Nov 2022 19:50:57 +0100 Subject: [PATCH 038/240] WIP: Allow subscribing to remote entity before creating a model Co-Authored-By: Nathan Sobo Co-Authored-By: Max Brunsfeld --- crates/client/src/client.rs | 287 +++++++++++++++++++++------------- crates/project/src/project.rs | 30 ++-- 2 files changed, 193 insertions(+), 124 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index c943b27417..bad85384be 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -17,8 +17,7 @@ use gpui::{ actions, serde_json::{self, Value}, AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AppContext, - AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, View, ViewContext, - ViewHandle, + AsyncAppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, }; use http::HttpClient; use lazy_static::lazy_static; @@ -34,6 +33,7 @@ use std::{ convert::TryFrom, fmt::Write as _, future::Future, + marker::PhantomData, path::PathBuf, sync::{Arc, Weak}, time::{Duration, Instant}, @@ -172,7 +172,7 @@ struct ClientState { entity_id_extractors: HashMap u64>, _reconnect_task: Option>, reconnect_interval: Duration, - entities_by_type_and_remote_id: HashMap<(TypeId, u64), AnyWeakEntityHandle>, + entities_by_type_and_remote_id: HashMap<(TypeId, u64), WeakSubscriber>, models_by_message_type: HashMap, entity_types_by_message_type: HashMap, #[allow(clippy::type_complexity)] @@ -182,7 +182,7 @@ struct ClientState { dyn Send + Sync + Fn( - AnyEntityHandle, + Subscriber, Box, &Arc, AsyncAppContext, @@ -191,12 +191,13 @@ struct ClientState { >, } -enum AnyWeakEntityHandle { +enum WeakSubscriber { Model(AnyWeakModelHandle), View(AnyWeakViewHandle), + Pending(Vec>), } -enum AnyEntityHandle { +enum Subscriber { Model(AnyModelHandle), View(AnyViewHandle), } @@ -254,6 +255,54 @@ impl Drop for Subscription { } } +pub struct PendingEntitySubscription { + client: Arc, + remote_id: u64, + _entity_type: PhantomData, + consumed: bool, +} + +impl PendingEntitySubscription { + pub fn set_model(mut self, model: &ModelHandle, cx: &mut AsyncAppContext) -> Subscription { + self.consumed = true; + let mut state = self.client.state.write(); + let id = (TypeId::of::(), self.remote_id); + let Some(WeakSubscriber::Pending(messages)) = + state.entities_by_type_and_remote_id.remove(&id) + else { + unreachable!() + }; + + state + .entities_by_type_and_remote_id + .insert(id, WeakSubscriber::Model(model.downgrade().into())); + drop(state); + for message in messages { + self.client.handle_message(message, cx); + } + Subscription::Entity { + client: Arc::downgrade(&self.client), + id, + } + } +} + +impl Drop for PendingEntitySubscription { + fn drop(&mut self) { + if !self.consumed { + let mut state = self.client.state.write(); + if let Some(WeakSubscriber::Pending(messages)) = state + .entities_by_type_and_remote_id + .remove(&(TypeId::of::(), self.remote_id)) + { + for message in messages { + log::info!("unhandled message {}", message.payload_type_name()); + } + } + } + } +} + impl Client { pub fn new(http: Arc, cx: &AppContext) -> Arc { Arc::new(Self { @@ -387,26 +436,28 @@ impl Client { self.state .write() .entities_by_type_and_remote_id - .insert(id, AnyWeakEntityHandle::View(cx.weak_handle().into())); + .insert(id, WeakSubscriber::View(cx.weak_handle().into())); Subscription::Entity { client: Arc::downgrade(self), id, } } - pub fn add_model_for_remote_entity( + pub fn subscribe_to_entity( self: &Arc, remote_id: u64, - cx: &mut ModelContext, - ) -> Subscription { + ) -> PendingEntitySubscription { let id = (TypeId::of::(), remote_id); self.state .write() .entities_by_type_and_remote_id - .insert(id, AnyWeakEntityHandle::Model(cx.weak_handle().into())); - Subscription::Entity { - client: Arc::downgrade(self), - id, + .insert(id, WeakSubscriber::Pending(Default::default())); + + PendingEntitySubscription { + client: self.clone(), + remote_id, + consumed: false, + _entity_type: PhantomData, } } @@ -434,7 +485,7 @@ impl Client { let prev_handler = state.message_handlers.insert( message_type_id, Arc::new(move |handle, envelope, client, cx| { - let handle = if let AnyEntityHandle::Model(handle) = handle { + let handle = if let Subscriber::Model(handle) = handle { handle } else { unreachable!(); @@ -488,7 +539,7 @@ impl Client { F: 'static + Future>, { self.add_entity_message_handler::(move |handle, message, client, cx| { - if let AnyEntityHandle::View(handle) = handle { + if let Subscriber::View(handle) = handle { handler(handle.downcast::().unwrap(), message, client, cx) } else { unreachable!(); @@ -507,7 +558,7 @@ impl Client { F: 'static + Future>, { self.add_entity_message_handler::(move |handle, message, client, cx| { - if let AnyEntityHandle::Model(handle) = handle { + if let Subscriber::Model(handle) = handle { handler(handle.downcast::().unwrap(), message, client, cx) } else { unreachable!(); @@ -522,7 +573,7 @@ impl Client { H: 'static + Send + Sync - + Fn(AnyEntityHandle, TypedEnvelope, Arc, AsyncAppContext) -> F, + + Fn(Subscriber, TypedEnvelope, Arc, AsyncAppContext) -> F, F: 'static + Future>, { let model_type_id = TypeId::of::(); @@ -784,94 +835,8 @@ impl Client { let cx = cx.clone(); let this = self.clone(); async move { - let mut message_id = 0_usize; while let Some(message) = incoming.next().await { - let mut state = this.state.write(); - message_id += 1; - let type_name = message.payload_type_name(); - let payload_type_id = message.payload_type_id(); - let sender_id = message.original_sender_id().map(|id| id.0); - - let model = state - .models_by_message_type - .get(&payload_type_id) - .and_then(|model| model.upgrade(&cx)) - .map(AnyEntityHandle::Model) - .or_else(|| { - let entity_type_id = - *state.entity_types_by_message_type.get(&payload_type_id)?; - let entity_id = state - .entity_id_extractors - .get(&message.payload_type_id()) - .map(|extract_entity_id| { - (extract_entity_id)(message.as_ref()) - })?; - - let entity = state - .entities_by_type_and_remote_id - .get(&(entity_type_id, entity_id))?; - if let Some(entity) = entity.upgrade(&cx) { - Some(entity) - } else { - state - .entities_by_type_and_remote_id - .remove(&(entity_type_id, entity_id)); - None - } - }); - - let model = if let Some(model) = model { - model - } else { - log::info!("unhandled message {}", type_name); - continue; - }; - - let handler = state.message_handlers.get(&payload_type_id).cloned(); - // Dropping the state prevents deadlocks if the handler interacts with rpc::Client. - // It also ensures we don't hold the lock while yielding back to the executor, as - // that might cause the executor thread driving this future to block indefinitely. - drop(state); - - if let Some(handler) = handler { - let future = handler(model, message, &this, cx.clone()); - let client_id = this.id; - log::debug!( - "rpc message received. client_id:{}, message_id:{}, sender_id:{:?}, type:{}", - client_id, - message_id, - sender_id, - type_name - ); - cx.foreground() - .spawn(async move { - match future.await { - Ok(()) => { - log::debug!( - "rpc message handled. client_id:{}, message_id:{}, sender_id:{:?}, type:{}", - client_id, - message_id, - sender_id, - type_name - ); - } - Err(error) => { - log::error!( - "error handling message. client_id:{}, message_id:{}, sender_id:{:?}, type:{}, error:{:?}", - client_id, - message_id, - sender_id, - type_name, - error - ); - } - } - }) - .detach(); - } else { - log::info!("unhandled message {}", type_name); - } - + this.handle_message(message, &cx); // Don't starve the main thread when receiving lots of messages at once. smol::future::yield_now().await; } @@ -1218,6 +1183,97 @@ impl Client { self.peer.respond_with_error(receipt, error) } + fn handle_message( + self: &Arc, + message: Box, + cx: &AsyncAppContext, + ) { + let mut state = self.state.write(); + let type_name = message.payload_type_name(); + let payload_type_id = message.payload_type_id(); + let sender_id = message.original_sender_id().map(|id| id.0); + + let mut subscriber = None; + + if let Some(message_model) = state + .models_by_message_type + .get(&payload_type_id) + .and_then(|model| model.upgrade(cx)) + { + subscriber = Some(Subscriber::Model(message_model)); + } else if let Some((extract_entity_id, entity_type_id)) = + state.entity_id_extractors.get(&payload_type_id).zip( + state + .entity_types_by_message_type + .get(&payload_type_id) + .copied(), + ) + { + let entity_id = (extract_entity_id)(message.as_ref()); + + match state + .entities_by_type_and_remote_id + .get_mut(&(entity_type_id, entity_id)) + { + Some(WeakSubscriber::Pending(pending)) => { + pending.push(message); + return; + } + Some(weak_subscriber @ _) => subscriber = weak_subscriber.upgrade(cx), + _ => {} + } + } + + let subscriber = if let Some(subscriber) = subscriber { + subscriber + } else { + log::info!("unhandled message {}", type_name); + return; + }; + + let handler = state.message_handlers.get(&payload_type_id).cloned(); + // Dropping the state prevents deadlocks if the handler interacts with rpc::Client. + // It also ensures we don't hold the lock while yielding back to the executor, as + // that might cause the executor thread driving this future to block indefinitely. + drop(state); + + if let Some(handler) = handler { + let future = handler(subscriber, message, &self, cx.clone()); + let client_id = self.id; + log::debug!( + "rpc message received. client_id:{}, sender_id:{:?}, type:{}", + client_id, + sender_id, + type_name + ); + cx.foreground() + .spawn(async move { + match future.await { + Ok(()) => { + log::debug!( + "rpc message handled. client_id:{}, sender_id:{:?}, type:{}", + client_id, + sender_id, + type_name + ); + } + Err(error) => { + log::error!( + "error handling message. client_id:{}, sender_id:{:?}, type:{}, error:{:?}", + client_id, + sender_id, + type_name, + error + ); + } + } + }) + .detach(); + } else { + log::info!("unhandled message {}", type_name); + } + } + pub fn start_telemetry(&self, db: Db) { self.telemetry.start(db.clone()); } @@ -1231,11 +1287,12 @@ impl Client { } } -impl AnyWeakEntityHandle { - fn upgrade(&self, cx: &AsyncAppContext) -> Option { +impl WeakSubscriber { + fn upgrade(&self, cx: &AsyncAppContext) -> Option { match self { - AnyWeakEntityHandle::Model(handle) => handle.upgrade(cx).map(AnyEntityHandle::Model), - AnyWeakEntityHandle::View(handle) => handle.upgrade(cx).map(AnyEntityHandle::View), + WeakSubscriber::Model(handle) => handle.upgrade(cx).map(Subscriber::Model), + WeakSubscriber::View(handle) => handle.upgrade(cx).map(Subscriber::View), + WeakSubscriber::Pending(_) => None, } } } @@ -1480,11 +1537,17 @@ mod tests { subscription: None, }); - let _subscription1 = model1.update(cx, |_, cx| client.add_model_for_remote_entity(1, cx)); - let _subscription2 = model2.update(cx, |_, cx| client.add_model_for_remote_entity(2, cx)); + let _subscription1 = client + .subscribe_to_entity(1) + .set_model(&model1, &mut cx.to_async()); + let _subscription2 = client + .subscribe_to_entity(2) + .set_model(&model2, &mut cx.to_async()); // Ensure dropping a subscription for the same entity type still allows receiving of // messages for other entity IDs of the same type. - let subscription3 = model3.update(cx, |_, cx| client.add_model_for_remote_entity(3, cx)); + let subscription3 = client + .subscribe_to_entity(3) + .set_model(&model3, &mut cx.to_async()); drop(subscription3); server.send(proto::JoinProject { project_id: 1 }); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 436b2d92a2..503ae8d4b2 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -457,22 +457,23 @@ impl Project { ) -> Result, JoinProjectError> { client.authenticate_and_connect(true, &cx).await?; + let subscription = client.subscribe_to_entity(remote_id); let response = client .request(proto::JoinProject { project_id: remote_id, }) .await?; + let this = cx.add_model(|cx| { + let replica_id = response.replica_id as ReplicaId; - let replica_id = response.replica_id as ReplicaId; + let mut worktrees = Vec::new(); + for worktree in response.worktrees { + let worktree = cx.update(|cx| { + Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx) + }); + worktrees.push(worktree); + } - let mut worktrees = Vec::new(); - for worktree in response.worktrees { - let worktree = cx - .update(|cx| Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx)); - worktrees.push(worktree); - } - - let this = cx.add_model(|cx: &mut ModelContext| { let mut this = Self { worktrees: Vec::new(), loading_buffers: Default::default(), @@ -488,7 +489,7 @@ impl Project { fs, next_entry_id: Default::default(), next_diagnostic_group_id: Default::default(), - client_subscriptions: vec![client.add_model_for_remote_entity(remote_id, cx)], + client_subscriptions: Default::default(), _subscriptions: Default::default(), client: client.clone(), client_state: Some(ProjectClientState::Remote { @@ -541,6 +542,7 @@ impl Project { } this }); + let subscription = subscription.set_model(&this, &mut cx); let user_ids = response .collaborators @@ -558,6 +560,7 @@ impl Project { this.update(&mut cx, |this, _| { this.collaborators = collaborators; + this.client_subscriptions.push(subscription); }); Ok(this) @@ -1035,8 +1038,11 @@ impl Project { }); } - self.client_subscriptions - .push(self.client.add_model_for_remote_entity(project_id, cx)); + self.client_subscriptions.push( + self.client + .subscribe_to_entity(project_id) + .set_model(&cx.handle(), &mut cx.to_async()), + ); let _ = self.metadata_changed(cx); cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); From bdb521cb6beda3618bdaf868e0ca874d26f726cb Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 16 Nov 2022 14:24:26 -0700 Subject: [PATCH 039/240] Fix typo in query --- crates/collab/src/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index d517bdd1df..41cde3bf42 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1567,7 +1567,7 @@ where scan_id, is_complete ) - VALUES ${params} + VALUES {params} ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name " ); From e5f05c9f3b1f5ffa595769c235d192e1a3e5981c Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 16 Nov 2022 14:24:26 -0700 Subject: [PATCH 040/240] Move leave_project from Store to db module --- crates/collab/src/db.rs | 70 ++++++++++++++++++++++++++++++++-- crates/collab/src/rpc.rs | 27 ++++++------- crates/collab/src/rpc/store.rs | 31 --------------- crates/rpc/src/peer.rs | 2 +- 4 files changed, 82 insertions(+), 48 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 41cde3bf42..24b0feb2e9 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1209,6 +1209,7 @@ where id: collaborator.project_id, host_user_id: Default::default(), connection_ids: Default::default(), + host_connection_id: Default::default(), }); let collaborator_connection_id = @@ -1219,6 +1220,8 @@ where if collaborator.is_host { left_project.host_user_id = collaborator.user_id; + left_project.host_connection_id = + ConnectionId(collaborator.connection_id as u32); } } } @@ -1474,7 +1477,8 @@ where .bind(user_id) .bind(connection_id.0 as i32) .fetch_one(&mut tx) - .await?; + .await + .unwrap(); if !worktrees.is_empty() { let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); @@ -1505,7 +1509,7 @@ where .bind(0) .bind(false); } - query.execute(&mut tx).await?; + query.execute(&mut tx).await.unwrap(); } sqlx::query( @@ -1526,7 +1530,8 @@ where .bind(0) .bind(true) .execute(&mut tx) - .await?; + .await + .unwrap(); let room = self.commit_room_transaction(room_id, tx).await?; Ok((project_id, room)) @@ -2086,6 +2091,64 @@ where .await } + pub async fn leave_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result { + self.transact(|mut tx| async move { + let result = sqlx::query( + " + DELETE FROM project_collaborators + WHERE project_id = $1 AND connection_id = $2 + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .execute(&mut tx) + .await?; + + if result.rows_affected() != 1 { + Err(anyhow!("not a collaborator on this project"))?; + } + + let connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 + ", + ) + .bind(project_id) + .fetch_all(&mut tx) + .await? + .into_iter() + .map(|id| ConnectionId(id as u32)) + .collect(); + + let (host_user_id, host_connection_id) = sqlx::query_as::<_, (i32, i32)>( + " + SELECT host_user_id, host_connection_id + FROM projects + WHERE id = $1 + ", + ) + .bind(project_id) + .fetch_one(&mut tx) + .await?; + + tx.commit().await?; + + Ok(LeftProject { + id: project_id, + host_user_id: UserId(host_user_id), + host_connection_id: ConnectionId(host_connection_id as u32), + connection_ids, + }) + }) + .await + } + pub async fn project_collaborators( &self, project_id: ProjectId, @@ -2645,6 +2708,7 @@ struct LanguageServer { pub struct LeftProject { pub id: ProjectId, pub host_user_id: UserId, + pub host_connection_id: ConnectionId, pub connection_ids: Vec, } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index db8f25fdb2..c32bdb5008 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1041,8 +1041,11 @@ impl Server { let project_id = ProjectId::from_proto(request.payload.project_id); let project; { - let mut store = self.store().await; - project = store.leave_project(project_id, sender_id)?; + project = self + .app_state + .db + .leave_project(project_id, sender_id) + .await?; tracing::info!( %project_id, host_user_id = %project.host_user_id, @@ -1050,17 +1053,15 @@ impl Server { "leave project" ); - if project.remove_collaborator { - broadcast(sender_id, project.connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id: project_id.to_proto(), - peer_id: sender_id.0, - }, - ) - }); - } + broadcast(sender_id, project.connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::RemoveProjectCollaborator { + project_id: project_id.to_proto(), + peer_id: sender_id.0, + }, + ) + }); } Ok(()) diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 57dd726d3f..9c93f0daca 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -251,37 +251,6 @@ impl Store { } } - pub fn leave_project( - &mut self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result { - let project = self - .projects - .get_mut(&project_id) - .ok_or_else(|| anyhow!("no such project"))?; - - // If the connection leaving the project is a collaborator, remove it. - let remove_collaborator = if let Some(guest) = project.guests.remove(&connection_id) { - project.active_replica_ids.remove(&guest.replica_id); - true - } else { - false - }; - - if let Some(connection) = self.connections.get_mut(&connection_id) { - connection.projects.remove(&project_id); - } - - Ok(LeftProject { - id: project.id, - host_connection_id: project.host_connection_id, - host_user_id: project.host.user_id, - connection_ids: project.connection_ids(), - remove_collaborator, - }) - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 4dbade4fec..66ba6a4029 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -24,7 +24,7 @@ use std::{ }; use tracing::instrument; -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Serialize)] +#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Serialize)] pub struct ConnectionId(pub u32); impl fmt::Display for ConnectionId { From 94fe93c6eee43605f837a9944221085b9a0015f4 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 16 Nov 2022 14:24:26 -0700 Subject: [PATCH 041/240] Move unshare_project to db module --- crates/collab/src/db.rs | 83 ++++++++++++++++++++-------------- crates/collab/src/rpc.rs | 13 ++++-- crates/collab/src/rpc/store.rs | 66 +-------------------------- 3 files changed, 59 insertions(+), 103 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 24b0feb2e9..bc74a8e530 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1330,6 +1330,27 @@ where Ok(room) } + async fn get_guest_connection_ids( + &self, + project_id: ProjectId, + tx: &mut sqlx::Transaction<'_, D>, + ) -> Result> { + let mut guest_connection_ids = Vec::new(); + let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( + " + SELECT connection_id + FROM project_collaborators + WHERE project_id = $1 AND is_host = FALSE + ", + ) + .bind(project_id) + .fetch(tx); + while let Some(connection_id) = db_guest_connection_ids.next().await { + guest_connection_ids.push(ConnectionId(connection_id? as u32)); + } + Ok(guest_connection_ids) + } + async fn get_room( &self, room_id: RoomId, @@ -1539,6 +1560,31 @@ where .await } + pub async fn unshare_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result<(proto::Room, Vec)> { + self.transact(|mut tx| async move { + let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + let room_id: RoomId = sqlx::query_scalar( + " + DELETE FROM projects + WHERE id = $1 AND host_connection_id = $2 + RETURNING room_id + ", + ) + .bind(project_id) + .bind(connection_id.0 as i32) + .fetch_one(&mut tx) + .await?; + let room = self.commit_room_transaction(room_id, tx).await?; + + Ok((room, guest_connection_ids)) + }) + .await + } + pub async fn update_project( &self, project_id: ProjectId, @@ -1608,23 +1654,9 @@ where } query.execute(&mut tx).await?; - let mut guest_connection_ids = Vec::new(); - { - let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 AND is_host = FALSE - ", - ) - .bind(project_id) - .fetch(&mut tx); - while let Some(connection_id) = db_guest_connection_ids.next().await { - guest_connection_ids.push(ConnectionId(connection_id? as u32)); - } - } - + let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; let room = self.commit_room_transaction(room_id, tx).await?; + Ok((room, guest_connection_ids)) }) .await @@ -2108,7 +2140,7 @@ where .execute(&mut tx) .await?; - if result.rows_affected() != 1 { + if result.rows_affected() == 0 { Err(anyhow!("not a collaborator on this project"))?; } @@ -2207,23 +2239,6 @@ where .await } - pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { - todo!() - // test_support!(self, { - // sqlx::query( - // " - // UPDATE projects - // SET unregistered = TRUE - // WHERE id = $1 - // ", - // ) - // .bind(project_id) - // .execute(&self.pool) - // .await?; - // Ok(()) - // }) - } - // contacts pub async fn get_contacts(&self, user_id: UserId) -> Result> { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index c32bdb5008..45330ca858 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -877,14 +877,19 @@ impl Server { message: Message, ) -> Result<()> { let project_id = ProjectId::from_proto(message.payload.project_id); - let mut store = self.store().await; - let (room, project) = store.unshare_project(project_id, message.sender_connection_id)?; + + let (room, guest_connection_ids) = self + .app_state + .db + .unshare_project(project_id, message.sender_connection_id) + .await?; + broadcast( message.sender_connection_id, - project.guest_connection_ids(), + guest_connection_ids, |conn_id| self.peer.send(conn_id, message.payload.clone()), ); - self.room_updated(room); + self.room_updated(&room); Ok(()) } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 9c93f0daca..1aa9c709b7 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -1,6 +1,6 @@ use crate::db::{self, ProjectId, UserId}; use anyhow::{anyhow, Result}; -use collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use rpc::{proto, ConnectionId}; use serde::Serialize; use std::path::PathBuf; @@ -72,14 +72,6 @@ pub struct Worktree { pub type ReplicaId = u16; -pub struct LeftProject { - pub id: ProjectId, - pub host_user_id: UserId, - pub host_connection_id: ConnectionId, - pub connection_ids: Vec, - pub remove_collaborator: bool, -} - #[derive(Copy, Clone)] pub struct Metrics { pub connections: usize, @@ -209,48 +201,6 @@ impl Store { &self.rooms } - pub fn unshare_project( - &mut self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result<(&proto::Room, Project)> { - match self.projects.entry(project_id) { - btree_map::Entry::Occupied(e) => { - if e.get().host_connection_id == connection_id { - let project = e.remove(); - - if let Some(host_connection) = self.connections.get_mut(&connection_id) { - host_connection.projects.remove(&project_id); - } - - for guest_connection in project.guests.keys() { - if let Some(connection) = self.connections.get_mut(guest_connection) { - connection.projects.remove(&project_id); - } - } - - let room = self - .rooms - .get_mut(&project.room_id) - .ok_or_else(|| anyhow!("no such room"))?; - let participant = room - .participants - .iter_mut() - .find(|participant| participant.peer_id == connection_id.0) - .ok_or_else(|| anyhow!("no such room"))?; - participant - .projects - .retain(|project| project.id != project_id.to_proto()); - - Ok((room, project)) - } else { - Err(anyhow!("no such project"))? - } - } - btree_map::Entry::Vacant(_) => Err(anyhow!("no such project"))?, - } - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { @@ -373,17 +323,3 @@ impl Store { } } } - -impl Project { - pub fn guest_connection_ids(&self) -> Vec { - self.guests.keys().copied().collect() - } - - pub fn connection_ids(&self) -> Vec { - self.guests - .keys() - .copied() - .chain(Some(self.host_connection_id)) - .collect() - } -} From 9eee22ff0ab6856a195568409e53b6d91a48f094 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 16 Nov 2022 14:24:26 -0700 Subject: [PATCH 042/240] Fix column name in query --- crates/collab/src/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index bc74a8e530..6741afab7e 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1760,7 +1760,7 @@ where let query = format!( " DELETE FROM worktree_entries - WHERE project_id = ? AND worktree_id = ? AND entry_id IN ({params}) + WHERE project_id = ? AND worktree_id = ? AND id IN ({params}) " ); From 532a5992394d96dfaf9bb8921aab8036368a23b6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 11:38:00 +0100 Subject: [PATCH 043/240] Use `Db::get_guest_connection_ids` in other db methods --- crates/collab/src/db.rs | 57 +++++------------------------------------ 1 file changed, 6 insertions(+), 51 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6741afab7e..9485d1aae0 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1771,24 +1771,9 @@ where query.execute(&mut tx).await?; } - let connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 AND connection_id != $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; - + let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; tx.commit().await?; - - Ok(connection_ids - .into_iter() - .map(|connection_id| ConnectionId(connection_id as u32)) - .collect()) + Ok(connection_ids) }) .await } @@ -1846,24 +1831,9 @@ where .execute(&mut tx) .await?; - let connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 AND connection_id != $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; - + let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; tx.commit().await?; - - Ok(connection_ids - .into_iter() - .map(|connection_id| ConnectionId(connection_id as u32)) - .collect()) + Ok(connection_ids) }) .await } @@ -1908,24 +1878,9 @@ where .execute(&mut tx) .await?; - let connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 AND connection_id != $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; - + let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; tx.commit().await?; - - Ok(connection_ids - .into_iter() - .map(|connection_id| ConnectionId(connection_id as u32)) - .collect()) + Ok(connection_ids) }) .await } From 71eeeedc05f7ed6978f2ebfc6f169a7bc9cc8907 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 12:21:51 +0100 Subject: [PATCH 044/240] Don't replace newer diagnostics with older ones --- .../20221109000000_test_schema.sql | 1 + .../20221111092550_reconnection_support.sql | 1 + crates/collab/src/db.rs | 11 ++++++++--- crates/collab/src/integration_tests.rs | 12 ++++++++---- crates/project/src/project.rs | 4 ++++ crates/project/src/worktree.rs | 16 ++++++++++------ crates/rpc/proto/zed.proto | 1 + 7 files changed, 33 insertions(+), 13 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 66925fddd5..bb216eb32d 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -82,6 +82,7 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, + "version" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 4f4ad6aede..5696dc4a44 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -44,6 +44,7 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, + "version" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 9485d1aae0..2823b49255 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1813,13 +1813,15 @@ where path, language_server_id, error_count, - warning_count + warning_count, + version ) - VALUES ($1, $2, $3, $4, $5, $6) + VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET language_server_id = excluded.language_server_id, error_count = excluded.error_count, - warning_count = excluded.warning_count + warning_count = excluded.warning_count, + version = excluded.version ", ) .bind(project_id) @@ -1828,6 +1830,7 @@ where .bind(summary.language_server_id as i64) .bind(summary.error_count as i32) .bind(summary.warning_count as i32) + .bind(summary.version as i32) .execute(&mut tx) .await?; @@ -2042,6 +2045,7 @@ where language_server_id: summary.language_server_id as u64, error_count: summary.error_count as u32, warning_count: summary.warning_count as u32, + version: summary.version as u32, }); } } @@ -2666,6 +2670,7 @@ struct WorktreeDiagnosticSummary { language_server_id: i64, error_count: i32, warning_count: i32, + version: i32, } id_type!(LanguageServerId); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 1236af42cb..d730b5d4e7 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -2412,9 +2412,10 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { + language_server_id: 0, error_count: 1, warning_count: 0, - ..Default::default() + version: 2, }, )] ) @@ -2444,9 +2445,10 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { + language_server_id: 0, error_count: 1, warning_count: 0, - ..Default::default() + version: 2, }, )] ); @@ -2484,9 +2486,10 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { + language_server_id: 0, error_count: 1, warning_count: 1, - ..Default::default() + version: 3, }, )] ); @@ -2500,9 +2503,10 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { + language_server_id: 0, error_count: 1, warning_count: 1, - ..Default::default() + version: 3, }, )] ); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 503ae8d4b2..9d7323f989 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -223,6 +223,7 @@ pub struct DiagnosticSummary { pub language_server_id: usize, pub error_count: usize, pub warning_count: usize, + pub version: usize, } #[derive(Debug, Clone)] @@ -293,12 +294,14 @@ pub struct ProjectTransaction(pub HashMap, language::Transac impl DiagnosticSummary { fn new<'a, T: 'a>( language_server_id: usize, + version: usize, diagnostics: impl IntoIterator>, ) -> Self { let mut this = Self { language_server_id, error_count: 0, warning_count: 0, + version, }; for entry in diagnostics { @@ -324,6 +327,7 @@ impl DiagnosticSummary { language_server_id: self.language_server_id as u64, error_count: self.error_count as u32, warning_count: self.warning_count as u32, + version: self.version as u32, } } } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 836ac55b66..04e77cf09a 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -366,6 +366,7 @@ impl Worktree { Worktree::Remote(worktree) => &worktree.diagnostic_summaries, } .iter() + .filter(|(_, summary)| !summary.is_empty()) .map(|(path, summary)| (path.0.clone(), *summary)) } @@ -516,7 +517,8 @@ impl LocalWorktree { .diagnostic_summaries .remove(&PathKey(worktree_path.clone())) .unwrap_or_default(); - let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics); + let new_summary = + DiagnosticSummary::new(language_server_id, old_summary.version + 1, &diagnostics); if !new_summary.is_empty() { self.diagnostic_summaries .insert(PathKey(worktree_path.clone()), new_summary); @@ -1106,15 +1108,17 @@ impl RemoteWorktree { path: Arc, summary: &proto::DiagnosticSummary, ) { - let summary = DiagnosticSummary { + let old_summary = self.diagnostic_summaries.get(&PathKey(path.clone())); + let new_summary = DiagnosticSummary { language_server_id: summary.language_server_id as usize, error_count: summary.error_count as usize, warning_count: summary.warning_count as usize, + version: summary.version as usize, }; - if summary.is_empty() { - self.diagnostic_summaries.remove(&PathKey(path)); - } else { - self.diagnostic_summaries.insert(PathKey(path), summary); + if old_summary.map_or(true, |old_summary| { + new_summary.version >= old_summary.version + }) { + self.diagnostic_summaries.insert(PathKey(path), new_summary); } } diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 30c1c89e8f..b6d4b83b3b 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -652,6 +652,7 @@ message DiagnosticSummary { uint64 language_server_id = 2; uint32 error_count = 3; uint32 warning_count = 4; + uint32 version = 5; } message UpdateLanguageServer { From 3b34d858b5b5143a0549179a502f6a25e8e905ce Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 13:33:26 +0100 Subject: [PATCH 045/240] Remove unwrap from `Server::share_project` --- crates/collab/src/rpc.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 45330ca858..70419623ef 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -862,8 +862,7 @@ impl Server { request.sender_connection_id, &request.payload.worktrees, ) - .await - .unwrap(); + .await?; response.send(proto::ShareProjectResponse { project_id: project_id.to_proto(), })?; From fe93263ad450a1460ccb5edfde1ca868d132e8c6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 14:12:00 +0100 Subject: [PATCH 046/240] Wait for previous `UpdateFollowers` message ack before sending new ones --- crates/collab/src/integration_tests.rs | 82 +++++++++++++++++--------- crates/collab/src/rpc.rs | 4 +- crates/rpc/src/proto.rs | 1 + crates/workspace/src/workspace.rs | 76 +++++++++++++++--------- 4 files changed, 106 insertions(+), 57 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index d730b5d4e7..5118510024 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -4672,7 +4672,7 @@ async fn test_following( cx_a: &mut TestAppContext, cx_b: &mut TestAppContext, ) { - cx_a.foreground().forbid_parking(); + deterministic.forbid_parking(); cx_a.update(editor::init); cx_b.update(editor::init); @@ -4791,11 +4791,14 @@ async fn test_following( workspace_a.update(cx_a, |workspace, cx| { workspace.activate_item(&editor_a1, cx) }); - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b1.id() - }) - .await; + deterministic.run_until_parked(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_b1.id() + ); // When client A navigates back and forth, client B does so as well. workspace_a @@ -4803,49 +4806,74 @@ async fn test_following( workspace::Pane::go_back(workspace, None, cx) }) .await; - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b2.id() - }) - .await; + deterministic.run_until_parked(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_b2.id() + ); workspace_a .update(cx_a, |workspace, cx| { workspace::Pane::go_forward(workspace, None, cx) }) .await; - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b1.id() + workspace_a + .update(cx_a, |workspace, cx| { + workspace::Pane::go_back(workspace, None, cx) }) .await; + workspace_a + .update(cx_a, |workspace, cx| { + workspace::Pane::go_forward(workspace, None, cx) + }) + .await; + deterministic.run_until_parked(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_b1.id() + ); // Changes to client A's editor are reflected on client B. editor_a1.update(cx_a, |editor, cx| { editor.change_selections(None, cx, |s| s.select_ranges([1..1, 2..2])); }); - editor_b1 - .condition(cx_b, |editor, cx| { - editor.selections.ranges(cx) == vec![1..1, 2..2] - }) - .await; + deterministic.run_until_parked(); + assert_eq!( + editor_b1.read_with(cx_b, |editor, cx| editor.selections.ranges(cx)), + vec![1..1, 2..2] + ); editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); - editor_b1 - .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") - .await; + deterministic.run_until_parked(); + assert_eq!( + editor_b1.read_with(cx_b, |editor, cx| editor.text(cx)), + "TWO" + ); editor_a1.update(cx_a, |editor, cx| { editor.change_selections(None, cx, |s| s.select_ranges([3..3])); editor.set_scroll_position(vec2f(0., 100.), cx); }); - editor_b1 - .condition(cx_b, |editor, cx| { - editor.selections.ranges(cx) == vec![3..3] - }) - .await; + deterministic.run_until_parked(); + assert_eq!( + editor_b1.read_with(cx_b, |editor, cx| editor.selections.ranges(cx)), + vec![3..3] + ); // After unfollowing, client B stops receiving updates from client A. + assert_eq!( + workspace_b.read_with(cx_b, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_b1.id() + ); workspace_b.update(cx_b, |workspace, cx| { workspace.unfollow(&workspace.active_pane().clone(), cx) }); diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 70419623ef..a07a8b37c8 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -192,7 +192,7 @@ impl Server { .add_request_handler(Server::respond_to_contact_request) .add_request_handler(Server::follow) .add_message_handler(Server::unfollow) - .add_message_handler(Server::update_followers) + .add_request_handler(Server::update_followers) .add_message_handler(Server::update_diff_base) .add_request_handler(Server::get_private_user_info); @@ -1437,6 +1437,7 @@ impl Server { async fn update_followers( self: Arc, request: Message, + response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let project_connection_ids = self @@ -1464,6 +1465,7 @@ impl Server { )?; } } + response.send(proto::Ack {})?; Ok(()) } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 50f3c57f2a..8a59818fa3 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -229,6 +229,7 @@ request_messages!( (Test, Test), (UpdateBuffer, Ack), (UpdateDiagnosticSummary, Ack), + (UpdateFollowers, Ack), (UpdateParticipantLocation, Ack), (UpdateProject, Ack), (UpdateWorktree, Ack), diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 2296741ed3..5f14427fee 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -18,7 +18,10 @@ use collections::{hash_map, HashMap, HashSet}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; -use futures::{channel::oneshot, FutureExt, StreamExt}; +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, StreamExt, +}; use gpui::{ actions, elements::*, @@ -711,14 +714,13 @@ impl ItemHandle for ViewHandle { if let Some(followed_item) = self.to_followable_item_handle(cx) { if let Some(message) = followed_item.to_state_proto(cx) { - workspace.update_followers( - proto::update_followers::Variant::CreateView(proto::View { + workspace.update_followers(proto::update_followers::Variant::CreateView( + proto::View { id: followed_item.id() as u64, variant: Some(message), leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), - }), - cx, - ); + }, + )); } } @@ -762,7 +764,7 @@ impl ItemHandle for ViewHandle { cx.after_window_update({ let pending_update = pending_update.clone(); let pending_update_scheduled = pending_update_scheduled.clone(); - move |this, cx| { + move |this, _| { pending_update_scheduled.store(false, SeqCst); this.update_followers( proto::update_followers::Variant::UpdateView( @@ -772,7 +774,6 @@ impl ItemHandle for ViewHandle { leader_id: leader_id.map(|id| id.0), }, ), - cx, ); } }); @@ -1081,9 +1082,11 @@ pub struct Workspace { leader_state: LeaderState, follower_states_by_leader: FollowerStatesByLeader, last_leaders_by_pane: HashMap, PeerId>, + follower_updates: mpsc::UnboundedSender, window_edited: bool, active_call: Option<(ModelHandle, Vec)>, _observe_current_user: Task<()>, + _update_followers: Task>, } #[derive(Default)] @@ -1166,6 +1169,34 @@ impl Workspace { } }); + let (follower_updates_tx, mut follower_updates_rx) = mpsc::unbounded(); + let _update_followers = cx.spawn_weak(|this, cx| async move { + while let Some(update) = follower_updates_rx.next().await { + let this = this.upgrade(&cx)?; + let update_followers = this.read_with(&cx, |this, cx| { + if let Some(project_id) = this.project.read(cx).remote_id() { + if this.leader_state.followers.is_empty() { + None + } else { + Some(this.client.request(proto::UpdateFollowers { + project_id, + follower_ids: + this.leader_state.followers.iter().map(|f| f.0).collect(), + variant: Some(update), + })) + } + } else { + None + } + }); + + if let Some(update_followers) = update_followers { + update_followers.await.log_err(); + } + } + None + }); + let handle = cx.handle(); let weak_handle = cx.weak_handle(); @@ -1224,10 +1255,12 @@ impl Workspace { project, leader_state: Default::default(), follower_states_by_leader: Default::default(), + follower_updates: follower_updates_tx, last_leaders_by_pane: Default::default(), window_edited: false, active_call, _observe_current_user, + _update_followers, }; this.project_remote_id_changed(this.project.read(cx).remote_id(), cx); cx.defer(|this, cx| this.update_window_title(cx)); @@ -1967,13 +2000,12 @@ impl Workspace { cx.notify(); } - self.update_followers( - proto::update_followers::Variant::UpdateActiveView(proto::UpdateActiveView { + self.update_followers(proto::update_followers::Variant::UpdateActiveView( + proto::UpdateActiveView { id: self.active_item(cx).map(|item| item.id() as u64), leader_id: self.leader_for_pane(&pane).map(|id| id.0), - }), - cx, - ); + }, + )); } fn handle_pane_event( @@ -2594,22 +2626,8 @@ impl Workspace { Ok(()) } - fn update_followers( - &self, - update: proto::update_followers::Variant, - cx: &AppContext, - ) -> Option<()> { - let project_id = self.project.read(cx).remote_id()?; - if !self.leader_state.followers.is_empty() { - self.client - .send(proto::UpdateFollowers { - project_id, - follower_ids: self.leader_state.followers.iter().map(|f| f.0).collect(), - variant: Some(update), - }) - .log_err(); - } - None + fn update_followers(&self, update: proto::update_followers::Variant) { + let _ = self.follower_updates.unbounded_send(update); } pub fn leader_for_pane(&self, pane: &ViewHandle) -> Option { From 6415809b610e4bfb158ab6ea257929fb410bbb16 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 15:34:12 +0100 Subject: [PATCH 047/240] Fix errors in Postgres schema --- .../collab/migrations/20221111092550_reconnection_support.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 5696dc4a44..50a4a7154b 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -59,7 +59,7 @@ CREATE TABLE "language_servers" ( CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id"); CREATE TABLE "project_collaborators" ( - "id" INTEGER PRIMARY KEY, + "id" SERIAL PRIMARY KEY, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, @@ -70,7 +70,7 @@ CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborato CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); CREATE TABLE "room_participants" ( - "id" INTEGER PRIMARY KEY, + "id" SERIAL PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, From 0f4598a2435f34f15ed739a7dd75419eff05d4c5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 15:34:35 +0100 Subject: [PATCH 048/240] Fix seed script --- crates/collab/src/bin/seed.rs | 58 ++--------------------------------- 1 file changed, 2 insertions(+), 56 deletions(-) diff --git a/crates/collab/src/bin/seed.rs b/crates/collab/src/bin/seed.rs index cabea7d013..3b635540b3 100644 --- a/crates/collab/src/bin/seed.rs +++ b/crates/collab/src/bin/seed.rs @@ -1,9 +1,7 @@ use collab::{Error, Result}; -use db::{Db, PostgresDb, UserId}; -use rand::prelude::*; +use db::{DefaultDb, UserId}; use serde::{de::DeserializeOwned, Deserialize}; use std::fmt::Write; -use time::{Duration, OffsetDateTime}; #[allow(unused)] #[path = "../db.rs"] @@ -18,9 +16,8 @@ struct GitHubUser { #[tokio::main] async fn main() { - let mut rng = StdRng::from_entropy(); let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var"); - let db = PostgresDb::new(&database_url, 5) + let db = DefaultDb::new(&database_url, 5) .await .expect("failed to connect to postgres database"); let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var"); @@ -104,57 +101,6 @@ async fn main() { ); } } - - let zed_org_id = if let Some(org) = db - .find_org_by_slug("zed") - .await - .expect("failed to fetch org") - { - org.id - } else { - db.create_org("Zed", "zed") - .await - .expect("failed to insert org") - }; - - let general_channel_id = if let Some(channel) = db - .get_org_channels(zed_org_id) - .await - .expect("failed to fetch channels") - .iter() - .find(|c| c.name == "General") - { - channel.id - } else { - let channel_id = db - .create_org_channel(zed_org_id, "General") - .await - .expect("failed to insert channel"); - - let now = OffsetDateTime::now_utc(); - let max_seconds = Duration::days(100).as_seconds_f64(); - let mut timestamps = (0..1000) - .map(|_| now - Duration::seconds_f64(rng.gen_range(0_f64..=max_seconds))) - .collect::>(); - timestamps.sort(); - for timestamp in timestamps { - let sender_id = *zed_user_ids.choose(&mut rng).unwrap(); - let body = lipsum::lipsum_words(rng.gen_range(1..=50)); - db.create_channel_message(channel_id, sender_id, &body, timestamp, rng.gen()) - .await - .expect("failed to insert message"); - } - channel_id - }; - - for user_id in zed_user_ids { - db.add_org_member(zed_org_id, user_id, true) - .await - .expect("failed to insert org membership"); - db.add_channel_member(general_channel_id, user_id, true) - .await - .expect("failed to insert channel membership"); - } } async fn fetch_github( From 7dae21cb36f3dbf6182b0db0f9752567438c95d5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 15:35:03 +0100 Subject: [PATCH 049/240] :art: --- crates/collab/src/db.rs | 14 +++++--------- crates/collab/src/rpc.rs | 2 +- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 2823b49255..55c71ea92e 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -49,6 +49,7 @@ impl BeginTransaction for Db { } // In Sqlite, transactions are inherently serializable. +#[cfg(test)] impl BeginTransaction for Db { type Database = sqlx::Sqlite; @@ -1141,10 +1142,7 @@ where .await } - pub async fn leave_room_for_connection( - &self, - connection_id: ConnectionId, - ) -> Result> { + pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { self.transact(|mut tx| async move { // Leave room. let room_id = sqlx::query_scalar::<_, RoomId>( @@ -1498,8 +1496,7 @@ where .bind(user_id) .bind(connection_id.0 as i32) .fetch_one(&mut tx) - .await - .unwrap(); + .await?; if !worktrees.is_empty() { let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); @@ -1530,7 +1527,7 @@ where .bind(0) .bind(false); } - query.execute(&mut tx).await.unwrap(); + query.execute(&mut tx).await?; } sqlx::query( @@ -1551,8 +1548,7 @@ where .bind(0) .bind(true) .execute(&mut tx) - .await - .unwrap(); + .await?; let room = self.commit_room_transaction(room_id, tx).await?; Ok((project_id, room)) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index a07a8b37c8..9e0335ef1b 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -629,7 +629,7 @@ impl Server { ) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let Some(left_room) = self.app_state.db.leave_room_for_connection(leaving_connection_id).await? else { + let Some(left_room) = self.app_state.db.leave_room(leaving_connection_id).await? else { return Err(anyhow!("no room to leave"))?; }; contacts_to_update.insert(leaving_user_id); From 8621c88a3ce088808b64fe03a4771dac7c62de7a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 16:56:43 +0100 Subject: [PATCH 050/240] Use int8 for `scan_id` and `inode` in Postgres --- .../collab/migrations/20221111092550_reconnection_support.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 50a4a7154b..de29f0c878 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -15,7 +15,7 @@ CREATE TABLE "worktrees" ( "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, - "scan_id" INTEGER NOT NULL, + "scan_id" INT8 NOT NULL, "is_complete" BOOL NOT NULL, PRIMARY KEY(project_id, id) ); @@ -27,7 +27,7 @@ CREATE TABLE "worktree_entries" ( "id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, - "inode" INTEGER NOT NULL, + "inode" INT8 NOT NULL, "mtime_seconds" INTEGER NOT NULL, "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, From e7e45be6e141ac50db80cf66d1445afb8163d681 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 16:57:32 +0100 Subject: [PATCH 051/240] Revert "Wait for previous `UpdateFollowers` message ack before sending new ones" This reverts commit fe93263ad450a1460ccb5edfde1ca868d132e8c6. --- crates/collab/src/integration_tests.rs | 82 +++++++++----------------- crates/collab/src/rpc.rs | 4 +- crates/rpc/src/proto.rs | 1 - crates/workspace/src/workspace.rs | 76 +++++++++--------------- 4 files changed, 57 insertions(+), 106 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 5118510024..d730b5d4e7 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -4672,7 +4672,7 @@ async fn test_following( cx_a: &mut TestAppContext, cx_b: &mut TestAppContext, ) { - deterministic.forbid_parking(); + cx_a.foreground().forbid_parking(); cx_a.update(editor::init); cx_b.update(editor::init); @@ -4791,14 +4791,11 @@ async fn test_following( workspace_a.update(cx_a, |workspace, cx| { workspace.activate_item(&editor_a1, cx) }); - deterministic.run_until_parked(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_b1.id() - ); + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b1.id() + }) + .await; // When client A navigates back and forth, client B does so as well. workspace_a @@ -4806,74 +4803,49 @@ async fn test_following( workspace::Pane::go_back(workspace, None, cx) }) .await; - deterministic.run_until_parked(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_b2.id() - ); + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b2.id() + }) + .await; workspace_a .update(cx_a, |workspace, cx| { workspace::Pane::go_forward(workspace, None, cx) }) .await; - workspace_a - .update(cx_a, |workspace, cx| { - workspace::Pane::go_back(workspace, None, cx) + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b1.id() }) .await; - workspace_a - .update(cx_a, |workspace, cx| { - workspace::Pane::go_forward(workspace, None, cx) - }) - .await; - deterministic.run_until_parked(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_b1.id() - ); // Changes to client A's editor are reflected on client B. editor_a1.update(cx_a, |editor, cx| { editor.change_selections(None, cx, |s| s.select_ranges([1..1, 2..2])); }); - deterministic.run_until_parked(); - assert_eq!( - editor_b1.read_with(cx_b, |editor, cx| editor.selections.ranges(cx)), - vec![1..1, 2..2] - ); + editor_b1 + .condition(cx_b, |editor, cx| { + editor.selections.ranges(cx) == vec![1..1, 2..2] + }) + .await; editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); - deterministic.run_until_parked(); - assert_eq!( - editor_b1.read_with(cx_b, |editor, cx| editor.text(cx)), - "TWO" - ); + editor_b1 + .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") + .await; editor_a1.update(cx_a, |editor, cx| { editor.change_selections(None, cx, |s| s.select_ranges([3..3])); editor.set_scroll_position(vec2f(0., 100.), cx); }); - deterministic.run_until_parked(); - assert_eq!( - editor_b1.read_with(cx_b, |editor, cx| editor.selections.ranges(cx)), - vec![3..3] - ); + editor_b1 + .condition(cx_b, |editor, cx| { + editor.selections.ranges(cx) == vec![3..3] + }) + .await; // After unfollowing, client B stops receiving updates from client A. - assert_eq!( - workspace_b.read_with(cx_b, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_b1.id() - ); workspace_b.update(cx_b, |workspace, cx| { workspace.unfollow(&workspace.active_pane().clone(), cx) }); diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 9e0335ef1b..4375056c9a 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -192,7 +192,7 @@ impl Server { .add_request_handler(Server::respond_to_contact_request) .add_request_handler(Server::follow) .add_message_handler(Server::unfollow) - .add_request_handler(Server::update_followers) + .add_message_handler(Server::update_followers) .add_message_handler(Server::update_diff_base) .add_request_handler(Server::get_private_user_info); @@ -1437,7 +1437,6 @@ impl Server { async fn update_followers( self: Arc, request: Message, - response: Response, ) -> Result<()> { let project_id = ProjectId::from_proto(request.payload.project_id); let project_connection_ids = self @@ -1465,7 +1464,6 @@ impl Server { )?; } } - response.send(proto::Ack {})?; Ok(()) } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 8a59818fa3..50f3c57f2a 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -229,7 +229,6 @@ request_messages!( (Test, Test), (UpdateBuffer, Ack), (UpdateDiagnosticSummary, Ack), - (UpdateFollowers, Ack), (UpdateParticipantLocation, Ack), (UpdateProject, Ack), (UpdateWorktree, Ack), diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 5f14427fee..2296741ed3 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -18,10 +18,7 @@ use collections::{hash_map, HashMap, HashSet}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; -use futures::{ - channel::{mpsc, oneshot}, - FutureExt, StreamExt, -}; +use futures::{channel::oneshot, FutureExt, StreamExt}; use gpui::{ actions, elements::*, @@ -714,13 +711,14 @@ impl ItemHandle for ViewHandle { if let Some(followed_item) = self.to_followable_item_handle(cx) { if let Some(message) = followed_item.to_state_proto(cx) { - workspace.update_followers(proto::update_followers::Variant::CreateView( - proto::View { + workspace.update_followers( + proto::update_followers::Variant::CreateView(proto::View { id: followed_item.id() as u64, variant: Some(message), leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), - }, - )); + }), + cx, + ); } } @@ -764,7 +762,7 @@ impl ItemHandle for ViewHandle { cx.after_window_update({ let pending_update = pending_update.clone(); let pending_update_scheduled = pending_update_scheduled.clone(); - move |this, _| { + move |this, cx| { pending_update_scheduled.store(false, SeqCst); this.update_followers( proto::update_followers::Variant::UpdateView( @@ -774,6 +772,7 @@ impl ItemHandle for ViewHandle { leader_id: leader_id.map(|id| id.0), }, ), + cx, ); } }); @@ -1082,11 +1081,9 @@ pub struct Workspace { leader_state: LeaderState, follower_states_by_leader: FollowerStatesByLeader, last_leaders_by_pane: HashMap, PeerId>, - follower_updates: mpsc::UnboundedSender, window_edited: bool, active_call: Option<(ModelHandle, Vec)>, _observe_current_user: Task<()>, - _update_followers: Task>, } #[derive(Default)] @@ -1169,34 +1166,6 @@ impl Workspace { } }); - let (follower_updates_tx, mut follower_updates_rx) = mpsc::unbounded(); - let _update_followers = cx.spawn_weak(|this, cx| async move { - while let Some(update) = follower_updates_rx.next().await { - let this = this.upgrade(&cx)?; - let update_followers = this.read_with(&cx, |this, cx| { - if let Some(project_id) = this.project.read(cx).remote_id() { - if this.leader_state.followers.is_empty() { - None - } else { - Some(this.client.request(proto::UpdateFollowers { - project_id, - follower_ids: - this.leader_state.followers.iter().map(|f| f.0).collect(), - variant: Some(update), - })) - } - } else { - None - } - }); - - if let Some(update_followers) = update_followers { - update_followers.await.log_err(); - } - } - None - }); - let handle = cx.handle(); let weak_handle = cx.weak_handle(); @@ -1255,12 +1224,10 @@ impl Workspace { project, leader_state: Default::default(), follower_states_by_leader: Default::default(), - follower_updates: follower_updates_tx, last_leaders_by_pane: Default::default(), window_edited: false, active_call, _observe_current_user, - _update_followers, }; this.project_remote_id_changed(this.project.read(cx).remote_id(), cx); cx.defer(|this, cx| this.update_window_title(cx)); @@ -2000,12 +1967,13 @@ impl Workspace { cx.notify(); } - self.update_followers(proto::update_followers::Variant::UpdateActiveView( - proto::UpdateActiveView { + self.update_followers( + proto::update_followers::Variant::UpdateActiveView(proto::UpdateActiveView { id: self.active_item(cx).map(|item| item.id() as u64), leader_id: self.leader_for_pane(&pane).map(|id| id.0), - }, - )); + }), + cx, + ); } fn handle_pane_event( @@ -2626,8 +2594,22 @@ impl Workspace { Ok(()) } - fn update_followers(&self, update: proto::update_followers::Variant) { - let _ = self.follower_updates.unbounded_send(update); + fn update_followers( + &self, + update: proto::update_followers::Variant, + cx: &AppContext, + ) -> Option<()> { + let project_id = self.project.read(cx).remote_id()?; + if !self.leader_state.followers.is_empty() { + self.client + .send(proto::UpdateFollowers { + project_id, + follower_ids: self.leader_state.followers.iter().map(|f| f.0).collect(), + variant: Some(update), + }) + .log_err(); + } + None } pub fn leader_for_pane(&self, pane: &ViewHandle) -> Option { From 4f39181c4cbd7b1845aa9ec3ff0fea59c80d4c86 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 16:57:40 +0100 Subject: [PATCH 052/240] Revert "Don't replace newer diagnostics with older ones" This reverts commit 71eeeedc05f7ed6978f2ebfc6f169a7bc9cc8907. --- .../20221109000000_test_schema.sql | 1 - .../20221111092550_reconnection_support.sql | 1 - crates/collab/src/db.rs | 11 +++-------- crates/collab/src/integration_tests.rs | 12 ++++-------- crates/project/src/project.rs | 4 ---- crates/project/src/worktree.rs | 16 ++++++---------- crates/rpc/proto/zed.proto | 1 - 7 files changed, 13 insertions(+), 33 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index bb216eb32d..66925fddd5 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -82,7 +82,6 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, - "version" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index de29f0c878..2b8f7824cb 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -44,7 +44,6 @@ CREATE TABLE "worktree_diagnostic_summaries" ( "language_server_id" INTEGER NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, - "version" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 55c71ea92e..c97c82c656 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1809,15 +1809,13 @@ where path, language_server_id, error_count, - warning_count, - version + warning_count ) - VALUES ($1, $2, $3, $4, $5, $6, $7) + VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET language_server_id = excluded.language_server_id, error_count = excluded.error_count, - warning_count = excluded.warning_count, - version = excluded.version + warning_count = excluded.warning_count ", ) .bind(project_id) @@ -1826,7 +1824,6 @@ where .bind(summary.language_server_id as i64) .bind(summary.error_count as i32) .bind(summary.warning_count as i32) - .bind(summary.version as i32) .execute(&mut tx) .await?; @@ -2041,7 +2038,6 @@ where language_server_id: summary.language_server_id as u64, error_count: summary.error_count as u32, warning_count: summary.warning_count as u32, - version: summary.version as u32, }); } } @@ -2666,7 +2662,6 @@ struct WorktreeDiagnosticSummary { language_server_id: i64, error_count: i32, warning_count: i32, - version: i32, } id_type!(LanguageServerId); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index d730b5d4e7..1236af42cb 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -2412,10 +2412,9 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { - language_server_id: 0, error_count: 1, warning_count: 0, - version: 2, + ..Default::default() }, )] ) @@ -2445,10 +2444,9 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { - language_server_id: 0, error_count: 1, warning_count: 0, - version: 2, + ..Default::default() }, )] ); @@ -2486,10 +2484,9 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { - language_server_id: 0, error_count: 1, warning_count: 1, - version: 3, + ..Default::default() }, )] ); @@ -2503,10 +2500,9 @@ async fn test_collaborating_with_diagnostics( path: Arc::from(Path::new("a.rs")), }, DiagnosticSummary { - language_server_id: 0, error_count: 1, warning_count: 1, - version: 3, + ..Default::default() }, )] ); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 9d7323f989..503ae8d4b2 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -223,7 +223,6 @@ pub struct DiagnosticSummary { pub language_server_id: usize, pub error_count: usize, pub warning_count: usize, - pub version: usize, } #[derive(Debug, Clone)] @@ -294,14 +293,12 @@ pub struct ProjectTransaction(pub HashMap, language::Transac impl DiagnosticSummary { fn new<'a, T: 'a>( language_server_id: usize, - version: usize, diagnostics: impl IntoIterator>, ) -> Self { let mut this = Self { language_server_id, error_count: 0, warning_count: 0, - version, }; for entry in diagnostics { @@ -327,7 +324,6 @@ impl DiagnosticSummary { language_server_id: self.language_server_id as u64, error_count: self.error_count as u32, warning_count: self.warning_count as u32, - version: self.version as u32, } } } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 04e77cf09a..836ac55b66 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -366,7 +366,6 @@ impl Worktree { Worktree::Remote(worktree) => &worktree.diagnostic_summaries, } .iter() - .filter(|(_, summary)| !summary.is_empty()) .map(|(path, summary)| (path.0.clone(), *summary)) } @@ -517,8 +516,7 @@ impl LocalWorktree { .diagnostic_summaries .remove(&PathKey(worktree_path.clone())) .unwrap_or_default(); - let new_summary = - DiagnosticSummary::new(language_server_id, old_summary.version + 1, &diagnostics); + let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics); if !new_summary.is_empty() { self.diagnostic_summaries .insert(PathKey(worktree_path.clone()), new_summary); @@ -1108,17 +1106,15 @@ impl RemoteWorktree { path: Arc, summary: &proto::DiagnosticSummary, ) { - let old_summary = self.diagnostic_summaries.get(&PathKey(path.clone())); - let new_summary = DiagnosticSummary { + let summary = DiagnosticSummary { language_server_id: summary.language_server_id as usize, error_count: summary.error_count as usize, warning_count: summary.warning_count as usize, - version: summary.version as usize, }; - if old_summary.map_or(true, |old_summary| { - new_summary.version >= old_summary.version - }) { - self.diagnostic_summaries.insert(PathKey(path), new_summary); + if summary.is_empty() { + self.diagnostic_summaries.remove(&PathKey(path)); + } else { + self.diagnostic_summaries.insert(PathKey(path), summary); } } diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index b6d4b83b3b..30c1c89e8f 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -652,7 +652,6 @@ message DiagnosticSummary { uint64 language_server_id = 2; uint32 error_count = 3; uint32 warning_count = 4; - uint32 version = 5; } message UpdateLanguageServer { From c34a5f3177ee471f631e5d657c7d62673971ca05 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 17:11:06 +0100 Subject: [PATCH 053/240] Introduce a new `Session` struct to server message handlers Co-Authored-By: Nathan Sobo --- crates/collab/src/rpc.rs | 498 ++++++++++++++++++--------------------- 1 file changed, 232 insertions(+), 266 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 4375056c9a..19d45e221d 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -68,21 +68,20 @@ lazy_static! { } type MessageHandler = Box< - dyn Send + Sync + Fn(Arc, UserId, Box) -> BoxFuture<'static, ()>, + dyn Send + Sync + Fn(Arc, Box, Session) -> BoxFuture<'static, ()>, >; -struct Message { - sender_user_id: UserId, - sender_connection_id: ConnectionId, - payload: T, -} - struct Response { server: Arc, receipt: Receipt, responded: Arc, } +struct Session { + user_id: UserId, + connection_id: ConnectionId, +} + impl Response { fn send(self, payload: R::Response) -> Result<()> { self.responded.store(true, SeqCst); @@ -201,13 +200,13 @@ impl Server { fn add_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, UserId, TypedEnvelope) -> Fut, + F: 'static + Send + Sync + Fn(Arc, TypedEnvelope, Session) -> Fut, Fut: 'static + Send + Future>, M: EnvelopedMessage, { let prev_handler = self.handlers.insert( TypeId::of::(), - Box::new(move |server, sender_user_id, envelope| { + Box::new(move |server, envelope, session| { let envelope = envelope.into_any().downcast::>().unwrap(); let span = info_span!( "handle message", @@ -219,7 +218,7 @@ impl Server { "message received" ); }); - let future = (handler)(server, sender_user_id, *envelope); + let future = (handler)(server, *envelope, session); async move { if let Err(error) = future.await { tracing::error!(%error, "error handling message"); @@ -237,19 +236,12 @@ impl Server { fn add_message_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, Message) -> Fut, + F: 'static + Send + Sync + Fn(Arc, M, Session) -> Fut, Fut: 'static + Send + Future>, M: EnvelopedMessage, { - self.add_handler(move |server, sender_user_id, envelope| { - handler( - server, - Message { - sender_user_id, - sender_connection_id: envelope.sender_id, - payload: envelope.payload, - }, - ) + self.add_handler(move |server, envelope, session| { + handler(server, envelope.payload, session) }); self } @@ -258,27 +250,22 @@ impl Server { /// a connection but we want to respond on the connection before anybody else can send on it. fn add_request_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, Message, Response) -> Fut, + F: 'static + Send + Sync + Fn(Arc, M, Response, Session) -> Fut, Fut: Send + Future>, M: RequestMessage, { let handler = Arc::new(handler); - self.add_handler(move |server, sender_user_id, envelope| { + self.add_handler(move |server, envelope, session| { let receipt = envelope.receipt(); let handler = handler.clone(); async move { - let request = Message { - sender_user_id, - sender_connection_id: envelope.sender_id, - payload: envelope.payload, - }; let responded = Arc::new(AtomicBool::default()); let response = Response { server: server.clone(), responded: responded.clone(), receipt, }; - match (handler)(server.clone(), request, response).await { + match (handler)(server.clone(), envelope.payload, response, session).await { Ok(()) => { if responded.load(std::sync::atomic::Ordering::SeqCst) { Ok(()) @@ -392,7 +379,11 @@ impl Server { let span_enter = span.enter(); if let Some(handler) = this.handlers.get(&message.payload_type_id()) { let is_background = message.is_background(); - let handle_message = (handler)(this.clone(), user_id, message); + let session = Session { + user_id, + connection_id, + }; + let handle_message = (handler)(this.clone(), message, session); drop(span_enter); let handle_message = handle_message.instrument(span); @@ -509,8 +500,9 @@ impl Server { async fn ping( self: Arc, - _: Message, + _: proto::Ping, response: Response, + _session: Session, ) -> Result<()> { response.send(proto::Ack {})?; Ok(()) @@ -518,13 +510,14 @@ impl Server { async fn create_room( self: Arc, - request: Message, + _request: proto::CreateRoom, response: Response, + session: Session, ) -> Result<()> { let room = self .app_state .db - .create_room(request.sender_user_id, request.sender_connection_id) + .create_room(session.user_id, session.connection_id) .await?; let live_kit_connection_info = @@ -535,10 +528,7 @@ impl Server { .trace_err() { if let Some(token) = live_kit - .room_token( - &room.live_kit_room, - &request.sender_connection_id.to_string(), - ) + .room_token(&room.live_kit_room, &session.connection_id.to_string()) .trace_err() { Some(proto::LiveKitConnectionInfo { @@ -559,29 +549,26 @@ impl Server { room: Some(room), live_kit_connection_info, })?; - self.update_user_contacts(request.sender_user_id).await?; + self.update_user_contacts(session.user_id).await?; Ok(()) } async fn join_room( self: Arc, - request: Message, + request: proto::JoinRoom, response: Response, + session: Session, ) -> Result<()> { let room = self .app_state .db .join_room( - RoomId::from_proto(request.payload.id), - request.sender_user_id, - request.sender_connection_id, + RoomId::from_proto(request.id), + session.user_id, + session.connection_id, ) .await?; - for connection_id in self - .store() - .await - .connection_ids_for_user(request.sender_user_id) - { + for connection_id in self.store().await.connection_ids_for_user(session.user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -590,10 +577,7 @@ impl Server { let live_kit_connection_info = if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { if let Some(token) = live_kit - .room_token( - &room.live_kit_room, - &request.sender_connection_id.to_string(), - ) + .room_token(&room.live_kit_room, &session.connection_id.to_string()) .trace_err() { Some(proto::LiveKitConnectionInfo { @@ -613,12 +597,16 @@ impl Server { live_kit_connection_info, })?; - self.update_user_contacts(request.sender_user_id).await?; + self.update_user_contacts(session.user_id).await?; Ok(()) } - async fn leave_room(self: Arc, message: Message) -> Result<()> { - self.leave_room_for_connection(message.sender_connection_id, message.sender_user_id) + async fn leave_room( + self: Arc, + _message: proto::LeaveRoom, + session: Session, + ) -> Result<()> { + self.leave_room_for_connection(session.connection_id, session.user_id) .await } @@ -707,17 +695,15 @@ impl Server { async fn call( self: Arc, - request: Message, + request: proto::Call, response: Response, + session: Session, ) -> Result<()> { - let room_id = RoomId::from_proto(request.payload.room_id); - let calling_user_id = request.sender_user_id; - let calling_connection_id = request.sender_connection_id; - let called_user_id = UserId::from_proto(request.payload.called_user_id); - let initial_project_id = request - .payload - .initial_project_id - .map(ProjectId::from_proto); + let room_id = RoomId::from_proto(request.room_id); + let calling_user_id = session.user_id; + let calling_connection_id = session.connection_id; + let called_user_id = UserId::from_proto(request.called_user_id); + let initial_project_id = request.initial_project_id.map(ProjectId::from_proto); if !self .app_state .db @@ -773,15 +759,16 @@ impl Server { async fn cancel_call( self: Arc, - request: Message, + request: proto::CancelCall, response: Response, + session: Session, ) -> Result<()> { - let called_user_id = UserId::from_proto(request.payload.called_user_id); - let room_id = RoomId::from_proto(request.payload.room_id); + let called_user_id = UserId::from_proto(request.called_user_id); + let room_id = RoomId::from_proto(request.room_id); let room = self .app_state .db - .cancel_call(Some(room_id), request.sender_connection_id, called_user_id) + .cancel_call(Some(room_id), session.connection_id, called_user_id) .await?; for connection_id in self.store().await.connection_ids_for_user(called_user_id) { self.peer @@ -795,41 +782,41 @@ impl Server { Ok(()) } - async fn decline_call(self: Arc, message: Message) -> Result<()> { - let room_id = RoomId::from_proto(message.payload.room_id); + async fn decline_call( + self: Arc, + message: proto::DeclineCall, + session: Session, + ) -> Result<()> { + let room_id = RoomId::from_proto(message.room_id); let room = self .app_state .db - .decline_call(Some(room_id), message.sender_user_id) + .decline_call(Some(room_id), session.user_id) .await?; - for connection_id in self - .store() - .await - .connection_ids_for_user(message.sender_user_id) - { + for connection_id in self.store().await.connection_ids_for_user(session.user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); } self.room_updated(&room); - self.update_user_contacts(message.sender_user_id).await?; + self.update_user_contacts(session.user_id).await?; Ok(()) } async fn update_participant_location( self: Arc, - request: Message, + request: proto::UpdateParticipantLocation, response: Response, + session: Session, ) -> Result<()> { - let room_id = RoomId::from_proto(request.payload.room_id); + let room_id = RoomId::from_proto(request.room_id); let location = request - .payload .location .ok_or_else(|| anyhow!("invalid location"))?; let room = self .app_state .db - .update_room_participant_location(room_id, request.sender_connection_id, location) + .update_room_participant_location(room_id, session.connection_id, location) .await?; self.room_updated(&room); response.send(proto::Ack {})?; @@ -851,16 +838,17 @@ impl Server { async fn share_project( self: Arc, - request: Message, + request: proto::ShareProject, response: Response, + session: Session, ) -> Result<()> { let (project_id, room) = self .app_state .db .share_project( - RoomId::from_proto(request.payload.room_id), - request.sender_connection_id, - &request.payload.worktrees, + RoomId::from_proto(request.room_id), + session.connection_id, + &request.worktrees, ) .await?; response.send(proto::ShareProjectResponse { @@ -873,21 +861,20 @@ impl Server { async fn unshare_project( self: Arc, - message: Message, + message: proto::UnshareProject, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(message.payload.project_id); + let project_id = ProjectId::from_proto(message.project_id); let (room, guest_connection_ids) = self .app_state .db - .unshare_project(project_id, message.sender_connection_id) + .unshare_project(project_id, session.connection_id) .await?; - broadcast( - message.sender_connection_id, - guest_connection_ids, - |conn_id| self.peer.send(conn_id, message.payload.clone()), - ); + broadcast(session.connection_id, guest_connection_ids, |conn_id| { + self.peer.send(conn_id, message.clone()) + }); self.room_updated(&room); Ok(()) @@ -926,26 +913,25 @@ impl Server { async fn join_project( self: Arc, - request: Message, + request: proto::JoinProject, response: Response, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); - let guest_user_id = request.sender_user_id; + let project_id = ProjectId::from_proto(request.project_id); + let guest_user_id = session.user_id; tracing::info!(%project_id, "join project"); let (project, replica_id) = self .app_state .db - .join_project(project_id, request.sender_connection_id) + .join_project(project_id, session.connection_id) .await?; let collaborators = project .collaborators .iter() - .filter(|collaborator| { - collaborator.connection_id != request.sender_connection_id.0 as i32 - }) + .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) .map(|collaborator| proto::Collaborator { peer_id: collaborator.connection_id as u32, replica_id: collaborator.replica_id.0 as u32, @@ -970,7 +956,7 @@ impl Server { proto::AddProjectCollaborator { project_id: project_id.to_proto(), collaborator: Some(proto::Collaborator { - peer_id: request.sender_connection_id.0, + peer_id: session.connection_id.0, replica_id: replica_id.0 as u32, user_id: guest_user_id.to_proto(), }), @@ -1005,14 +991,13 @@ impl Server { is_last_update: worktree.is_complete, }; for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { - self.peer - .send(request.sender_connection_id, update.clone())?; + self.peer.send(session.connection_id, update.clone())?; } // Stream this worktree's diagnostics. for summary in worktree.diagnostic_summaries { self.peer.send( - request.sender_connection_id, + session.connection_id, proto::UpdateDiagnosticSummary { project_id: project_id.to_proto(), worktree_id: worktree.id.to_proto(), @@ -1024,7 +1009,7 @@ impl Server { for language_server in &project.language_servers { self.peer.send( - request.sender_connection_id, + session.connection_id, proto::UpdateLanguageServer { project_id: project_id.to_proto(), language_server_id: language_server.id, @@ -1040,9 +1025,13 @@ impl Server { Ok(()) } - async fn leave_project(self: Arc, request: Message) -> Result<()> { - let sender_id = request.sender_connection_id; - let project_id = ProjectId::from_proto(request.payload.project_id); + async fn leave_project( + self: Arc, + request: proto::LeaveProject, + session: Session, + ) -> Result<()> { + let sender_id = session.connection_id; + let project_id = ProjectId::from_proto(request.project_id); let project; { project = self @@ -1073,28 +1062,22 @@ impl Server { async fn update_project( self: Arc, - request: Message, + request: proto::UpdateProject, response: Response, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let (room, guest_connection_ids) = self .app_state .db - .update_project( - project_id, - request.sender_connection_id, - &request.payload.worktrees, - ) + .update_project(project_id, session.connection_id, &request.worktrees) .await?; broadcast( - request.sender_connection_id, + session.connection_id, guest_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); self.room_updated(&room); @@ -1105,24 +1088,22 @@ impl Server { async fn update_worktree( self: Arc, - request: Message, + request: proto::UpdateWorktree, response: Response, + session: Session, ) -> Result<()> { let guest_connection_ids = self .app_state .db - .update_worktree(&request.payload, request.sender_connection_id) + .update_worktree(&request, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, guest_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); response.send(proto::Ack {})?; @@ -1131,24 +1112,22 @@ impl Server { async fn update_diagnostic_summary( self: Arc, - request: Message, + request: proto::UpdateDiagnosticSummary, response: Response, + session: Session, ) -> Result<()> { let guest_connection_ids = self .app_state .db - .update_diagnostic_summary(&request.payload, request.sender_connection_id) + .update_diagnostic_summary(&request, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, guest_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); @@ -1158,23 +1137,21 @@ impl Server { async fn start_language_server( self: Arc, - request: Message, + request: proto::StartLanguageServer, + session: Session, ) -> Result<()> { let guest_connection_ids = self .app_state .db - .start_language_server(&request.payload, request.sender_connection_id) + .start_language_server(&request, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, guest_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) @@ -1182,23 +1159,21 @@ impl Server { async fn update_language_server( self: Arc, - request: Message, + request: proto::UpdateLanguageServer, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) @@ -1206,17 +1181,18 @@ impl Server { async fn forward_project_request( self: Arc, - request: Message, + request: T, response: Response, + session: Session, ) -> Result<()> where T: EntityMessage + RequestMessage, { - let project_id = ProjectId::from_proto(request.payload.remote_entity_id()); + let project_id = ProjectId::from_proto(request.remote_entity_id()); let collaborators = self .app_state .db - .project_collaborators(project_id, request.sender_connection_id) + .project_collaborators(project_id, session.connection_id) .await?; let host = collaborators .iter() @@ -1226,9 +1202,9 @@ impl Server { let payload = self .peer .forward_request( - request.sender_connection_id, + session.connection_id, ConnectionId(host.connection_id as u32), - request.payload, + request, ) .await?; @@ -1238,14 +1214,15 @@ impl Server { async fn save_buffer( self: Arc, - request: Message, + request: proto::SaveBuffer, response: Response, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let collaborators = self .app_state .db - .project_collaborators(project_id, request.sender_connection_id) + .project_collaborators(project_id, session.connection_id) .await?; let host = collaborators .into_iter() @@ -1254,21 +1231,16 @@ impl Server { let host_connection_id = ConnectionId(host.connection_id as u32); let response_payload = self .peer - .forward_request( - request.sender_connection_id, - host_connection_id, - request.payload.clone(), - ) + .forward_request(session.connection_id, host_connection_id, request.clone()) .await?; let mut collaborators = self .app_state .db - .project_collaborators(project_id, request.sender_connection_id) + .project_collaborators(project_id, session.connection_id) .await?; - collaborators.retain(|collaborator| { - collaborator.connection_id != request.sender_connection_id.0 as i32 - }); + collaborators + .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); let project_connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); @@ -1282,37 +1254,36 @@ impl Server { async fn create_buffer_for_peer( self: Arc, - request: Message, + request: proto::CreateBufferForPeer, + session: Session, ) -> Result<()> { self.peer.forward_send( - request.sender_connection_id, - ConnectionId(request.payload.peer_id), - request.payload, + session.connection_id, + ConnectionId(request.peer_id), + request, )?; Ok(()) } async fn update_buffer( self: Arc, - request: Message, + request: proto::UpdateBuffer, response: Response, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); response.send(proto::Ack {})?; @@ -1321,24 +1292,22 @@ impl Server { async fn update_buffer_file( self: Arc, - request: Message, + request: proto::UpdateBufferFile, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) @@ -1346,44 +1315,43 @@ impl Server { async fn buffer_reloaded( self: Arc, - request: Message, + request: proto::BufferReloaded, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) } - async fn buffer_saved(self: Arc, request: Message) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + async fn buffer_saved( + self: Arc, + request: proto::BufferSaved, + session: Session, + ) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) @@ -1391,16 +1359,17 @@ impl Server { async fn follow( self: Arc, - request: Message, + request: proto::Follow, response: Response, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); - let leader_id = ConnectionId(request.payload.leader_id); - let follower_id = request.sender_connection_id; + let project_id = ProjectId::from_proto(request.project_id); + let leader_id = ConnectionId(request.leader_id); + let follower_id = session.connection_id; let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; if !project_connection_ids.contains(&leader_id) { @@ -1409,7 +1378,7 @@ impl Server { let mut response_payload = self .peer - .forward_request(request.sender_connection_id, leader_id, request.payload) + .forward_request(session.connection_id, leader_id, request) .await?; response_payload .views @@ -1418,50 +1387,44 @@ impl Server { Ok(()) } - async fn unfollow(self: Arc, request: Message) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); - let leader_id = ConnectionId(request.payload.leader_id); + async fn unfollow(self: Arc, request: proto::Unfollow, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let leader_id = ConnectionId(request.leader_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; if !project_connection_ids.contains(&leader_id) { Err(anyhow!("no such peer"))?; } self.peer - .forward_send(request.sender_connection_id, leader_id, request.payload)?; + .forward_send(session.connection_id, leader_id, request)?; Ok(()) } async fn update_followers( self: Arc, - request: Message, + request: proto::UpdateFollowers, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; - let leader_id = request - .payload - .variant - .as_ref() - .and_then(|variant| match variant { - proto::update_followers::Variant::CreateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, - }); - for follower_id in &request.payload.follower_ids { + let leader_id = request.variant.as_ref().and_then(|variant| match variant { + proto::update_followers::Variant::CreateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, + }); + for follower_id in &request.follower_ids { let follower_id = ConnectionId(*follower_id); if project_connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { - self.peer.forward_send( - request.sender_connection_id, - follower_id, - request.payload.clone(), - )?; + self.peer + .forward_send(session.connection_id, follower_id, request.clone())?; } } Ok(()) @@ -1469,11 +1432,11 @@ impl Server { async fn get_users( self: Arc, - request: Message, + request: proto::GetUsers, response: Response, + _session: Session, ) -> Result<()> { let user_ids = request - .payload .user_ids .into_iter() .map(UserId::from_proto) @@ -1496,10 +1459,11 @@ impl Server { async fn fuzzy_search_users( self: Arc, - request: Message, + request: proto::FuzzySearchUsers, response: Response, + session: Session, ) -> Result<()> { - let query = request.payload.query; + let query = request.query; let db = &self.app_state.db; let users = match query.len() { 0 => vec![], @@ -1512,7 +1476,7 @@ impl Server { }; let users = users .into_iter() - .filter(|user| user.id != request.sender_user_id) + .filter(|user| user.id != session.user_id) .map(|user| proto::User { id: user.id.to_proto(), avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), @@ -1525,11 +1489,12 @@ impl Server { async fn request_contact( self: Arc, - request: Message, + request: proto::RequestContact, response: Response, + session: Session, ) -> Result<()> { - let requester_id = request.sender_user_id; - let responder_id = UserId::from_proto(request.payload.responder_id); + let requester_id = session.user_id; + let responder_id = UserId::from_proto(request.responder_id); if requester_id == responder_id { return Err(anyhow!("cannot add yourself as a contact"))?; } @@ -1564,18 +1529,19 @@ impl Server { async fn respond_to_contact_request( self: Arc, - request: Message, + request: proto::RespondToContactRequest, response: Response, + session: Session, ) -> Result<()> { - let responder_id = request.sender_user_id; - let requester_id = UserId::from_proto(request.payload.requester_id); - if request.payload.response == proto::ContactRequestResponse::Dismiss as i32 { + let responder_id = session.user_id; + let requester_id = UserId::from_proto(request.requester_id); + if request.response == proto::ContactRequestResponse::Dismiss as i32 { self.app_state .db .dismiss_contact_notification(responder_id, requester_id) .await?; } else { - let accept = request.payload.response == proto::ContactRequestResponse::Accept as i32; + let accept = request.response == proto::ContactRequestResponse::Accept as i32; self.app_state .db .respond_to_contact_request(responder_id, requester_id, accept) @@ -1618,11 +1584,12 @@ impl Server { async fn remove_contact( self: Arc, - request: Message, + request: proto::RemoveContact, response: Response, + session: Session, ) -> Result<()> { - let requester_id = request.sender_user_id; - let responder_id = UserId::from_proto(request.payload.user_id); + let requester_id = session.user_id; + let responder_id = UserId::from_proto(request.user_id); self.app_state .db .remove_contact(requester_id, responder_id) @@ -1652,23 +1619,21 @@ impl Server { async fn update_diff_base( self: Arc, - request: Message, + request: proto::UpdateDiffBase, + session: Session, ) -> Result<()> { - let project_id = ProjectId::from_proto(request.payload.project_id); + let project_id = ProjectId::from_proto(request.project_id); let project_connection_ids = self .app_state .db - .project_connection_ids(project_id, request.sender_connection_id) + .project_connection_ids(project_id, session.connection_id) .await?; broadcast( - request.sender_connection_id, + session.connection_id, project_connection_ids, |connection_id| { - self.peer.forward_send( - request.sender_connection_id, - connection_id, - request.payload.clone(), - ) + self.peer + .forward_send(session.connection_id, connection_id, request.clone()) }, ); Ok(()) @@ -1676,18 +1641,19 @@ impl Server { async fn get_private_user_info( self: Arc, - request: Message, + _request: proto::GetPrivateUserInfo, response: Response, + session: Session, ) -> Result<()> { let metrics_id = self .app_state .db - .get_user_metrics_id(request.sender_user_id) + .get_user_metrics_id(session.user_id) .await?; let user = self .app_state .db - .get_user_by_id(request.sender_user_id) + .get_user_by_id(session.user_id) .await? .ok_or_else(|| anyhow!("user not found"))?; response.send(proto::GetPrivateUserInfoResponse { From 0a4517f97e55ea41d6a27996a2948de669887416 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 17:30:26 +0100 Subject: [PATCH 054/240] WIP: Introduce a `db` field to `Session` Co-Authored-By: Nathan Sobo --- Cargo.lock | 6 +++--- crates/collab/Cargo.toml | 1 - crates/collab/src/rpc.rs | 21 ++++++++++++++++++--- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1cceb9f99c..b6f86980ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -266,11 +266,12 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] @@ -1031,7 +1032,6 @@ name = "collab" version = "0.2.2" dependencies = [ "anyhow", - "async-trait", "async-tungstenite", "axum", "axum-extra", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 7456cb5598..f04918605f 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -19,7 +19,6 @@ rpc = { path = "../rpc" } util = { path = "../util" } anyhow = "1.0.40" -async-trait = "0.1.50" async-tungstenite = "0.16" axum = { version = "0.5", features = ["json", "headers", "ws"] } axum-extra = { version = "0.3", features = ["erased-json"] } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 19d45e221d..0c559239f5 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2,7 +2,7 @@ mod store; use crate::{ auth, - db::{self, ProjectId, RoomId, User, UserId}, + db::{self, DefaultDb, ProjectId, RoomId, User, UserId}, AppState, Result, }; use anyhow::anyhow; @@ -80,6 +80,17 @@ struct Response { struct Session { user_id: UserId, connection_id: ConnectionId, + db: Arc>, +} + +struct DbHandle(Arc); + +impl Deref for DbHandle { + type Target = DefaultDb; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } } impl Response { @@ -352,6 +363,8 @@ impl Server { let handle_io = handle_io.fuse(); futures::pin_mut!(handle_io); + let db = Arc::new(Mutex::new(DbHandle(this.app_state.db.clone()))); + // Handlers for foreground messages are pushed into the following `FuturesUnordered`. // This prevents deadlocks when e.g., client A performs a request to client B and // client B performs a request to client A. If both clients stop processing further @@ -382,6 +395,7 @@ impl Server { let session = Session { user_id, connection_id, + db: db.clone(), }; let handle_message = (handler)(this.clone(), message, session); drop(span_enter); @@ -1409,9 +1423,10 @@ impl Server { session: Session, ) -> Result<()> { let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state + let project_connection_ids = session .db + .lock() + .await .project_connection_ids(project_id, session.connection_id) .await?; From 6c83be3f89328f1e89670cec038ff6ff9b16e98c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 18:46:39 +0100 Subject: [PATCH 055/240] Remove obsolete code from `Store` --- crates/collab/src/db.rs | 15 +++ crates/collab/src/main.rs | 53 --------- crates/collab/src/rpc.rs | 60 +++++----- crates/collab/src/rpc/store.rs | 205 ++------------------------------- 4 files changed, 58 insertions(+), 275 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index c97c82c656..6cb5373881 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1464,6 +1464,21 @@ where // projects + pub async fn project_count_excluding_admins(&self) -> Result { + self.transact(|mut tx| async move { + Ok(sqlx::query_scalar::<_, i32>( + " + SELECT COUNT(*) + FROM projects, users + WHERE projects.host_user_id = users.id AND users.admin IS FALSE + ", + ) + .fetch_one(&mut tx) + .await? as usize) + }) + .await + } + pub async fn share_project( &self, expected_room_id: RoomId, diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index dc98a2ee68..20fae38c16 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -9,7 +9,6 @@ mod db_tests; #[cfg(test)] mod integration_tests; -use crate::rpc::ResultExt as _; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; @@ -20,9 +19,7 @@ use std::{ net::{SocketAddr, TcpListener}, path::{Path, PathBuf}, sync::Arc, - time::Duration, }; -use tokio::signal; use tracing_log::LogTracer; use tracing_subscriber::{filter::EnvFilter, fmt::format::JsonFields, Layer}; use util::ResultExt; @@ -129,7 +126,6 @@ async fn main() -> Result<()> { axum::Server::from_tcp(listener)? .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(graceful_shutdown(rpc_server, state)) .await?; } _ => { @@ -174,52 +170,3 @@ pub fn init_tracing(config: &Config) -> Option<()> { None } - -async fn graceful_shutdown(rpc_server: Arc, state: Arc) { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - }; - - #[cfg(unix)] - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - #[cfg(not(unix))] - let terminate = std::future::pending::<()>(); - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, - } - - if let Some(live_kit) = state.live_kit_client.as_ref() { - let deletions = rpc_server - .store() - .await - .rooms() - .values() - .map(|room| { - let name = room.live_kit_room.clone(); - async { - live_kit.delete_room(name).await.trace_err(); - } - }) - .collect::>(); - - tracing::info!("deleting all live-kit rooms"); - if let Err(_) = tokio::time::timeout( - Duration::from_secs(10), - futures::future::join_all(deletions), - ) - .await - { - tracing::error!("timed out waiting for live-kit room deletion"); - } - } -} diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 0c559239f5..58870163f5 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -49,7 +49,7 @@ use std::{ }, time::Duration, }; -pub use store::{Store, Worktree}; +pub use store::Store; use tokio::{ sync::{Mutex, MutexGuard}, time::Sleep, @@ -437,7 +437,7 @@ impl Server { let decline_calls = { let mut store = self.store().await; store.remove_connection(connection_id)?; - let mut connections = store.connection_ids_for_user(user_id); + let mut connections = store.user_connection_ids(user_id); connections.next().is_none() }; @@ -470,7 +470,7 @@ impl Server { if let Some(code) = &user.invite_code { let store = self.store().await; let invitee_contact = store.contact_for_user(invitee_id, true, false); - for connection_id in store.connection_ids_for_user(inviter_id) { + for connection_id in store.user_connection_ids(inviter_id) { self.peer.send( connection_id, proto::UpdateContacts { @@ -495,7 +495,7 @@ impl Server { if let Some(user) = self.app_state.db.get_user_by_id(user_id).await? { if let Some(invite_code) = &user.invite_code { let store = self.store().await; - for connection_id in store.connection_ids_for_user(user_id) { + for connection_id in store.user_connection_ids(user_id) { self.peer.send( connection_id, proto::UpdateInviteInfo { @@ -582,7 +582,7 @@ impl Server { session.connection_id, ) .await?; - for connection_id in self.store().await.connection_ids_for_user(session.user_id) { + for connection_id in self.store().await.user_connection_ids(session.user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -674,7 +674,7 @@ impl Server { { let store = self.store().await; for canceled_user_id in left_room.canceled_calls_to_user_ids { - for connection_id in store.connection_ids_for_user(canceled_user_id) { + for connection_id in store.user_connection_ids(canceled_user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -744,7 +744,7 @@ impl Server { let mut calls = self .store() .await - .connection_ids_for_user(called_user_id) + .user_connection_ids(called_user_id) .map(|connection_id| self.peer.request(connection_id, incoming_call.clone())) .collect::>(); @@ -784,7 +784,7 @@ impl Server { .db .cancel_call(Some(room_id), session.connection_id, called_user_id) .await?; - for connection_id in self.store().await.connection_ids_for_user(called_user_id) { + for connection_id in self.store().await.user_connection_ids(called_user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -807,7 +807,7 @@ impl Server { .db .decline_call(Some(room_id), session.user_id) .await?; - for connection_id in self.store().await.connection_ids_for_user(session.user_id) { + for connection_id in self.store().await.user_connection_ids(session.user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -905,7 +905,7 @@ impl Server { .. } = contact { - for contact_conn_id in store.connection_ids_for_user(contact_user_id) { + for contact_conn_id in store.user_connection_ids(contact_user_id) { self.peer .send( contact_conn_id, @@ -1522,7 +1522,7 @@ impl Server { // Update outgoing contact requests of requester let mut update = proto::UpdateContacts::default(); update.outgoing_requests.push(responder_id.to_proto()); - for connection_id in self.store().await.connection_ids_for_user(requester_id) { + for connection_id in self.store().await.user_connection_ids(requester_id) { self.peer.send(connection_id, update.clone())?; } @@ -1534,7 +1534,7 @@ impl Server { requester_id: requester_id.to_proto(), should_notify: true, }); - for connection_id in self.store().await.connection_ids_for_user(responder_id) { + for connection_id in self.store().await.user_connection_ids(responder_id) { self.peer.send(connection_id, update.clone())?; } @@ -1574,7 +1574,7 @@ impl Server { update .remove_incoming_requests .push(requester_id.to_proto()); - for connection_id in store.connection_ids_for_user(responder_id) { + for connection_id in store.user_connection_ids(responder_id) { self.peer.send(connection_id, update.clone())?; } @@ -1588,7 +1588,7 @@ impl Server { update .remove_outgoing_requests .push(responder_id.to_proto()); - for connection_id in store.connection_ids_for_user(requester_id) { + for connection_id in store.user_connection_ids(requester_id) { self.peer.send(connection_id, update.clone())?; } } @@ -1615,7 +1615,7 @@ impl Server { update .remove_outgoing_requests .push(responder_id.to_proto()); - for connection_id in self.store().await.connection_ids_for_user(requester_id) { + for connection_id in self.store().await.user_connection_ids(requester_id) { self.peer.send(connection_id, update.clone())?; } @@ -1624,7 +1624,7 @@ impl Server { update .remove_incoming_requests .push(requester_id.to_proto()); - for connection_id in self.store().await.connection_ids_for_user(responder_id) { + for connection_id in self.store().await.user_connection_ids(responder_id) { self.peer.send(connection_id, update.clone())?; } @@ -1819,21 +1819,25 @@ pub async fn handle_websocket_request( }) } -pub async fn handle_metrics(Extension(server): Extension>) -> axum::response::Response { - let metrics = server.store().await.metrics(); - METRIC_CONNECTIONS.set(metrics.connections as _); - METRIC_SHARED_PROJECTS.set(metrics.shared_projects as _); +pub async fn handle_metrics(Extension(server): Extension>) -> Result { + let connections = server + .store() + .await + .connections() + .filter(|connection| !connection.admin) + .count(); + + METRIC_CONNECTIONS.set(connections as _); + + let shared_projects = server.app_state.db.project_count_excluding_admins().await?; + METRIC_SHARED_PROJECTS.set(shared_projects as _); let encoder = prometheus::TextEncoder::new(); let metric_families = prometheus::gather(); - match encoder.encode_to_string(&metric_families) { - Ok(string) => (StatusCode::OK, string).into_response(), - Err(error) => ( - StatusCode::INTERNAL_SERVER_ERROR, - format!("failed to encode metrics {:?}", error), - ) - .into_response(), - } + let encoded_metrics = encoder + .encode_to_string(&metric_families) + .map_err(|err| anyhow!("{}", err))?; + Ok(encoded_metrics) } fn to_axum_message(message: TungsteniteMessage) -> AxumMessage { diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 1aa9c709b7..2bb6d89f40 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -1,111 +1,32 @@ -use crate::db::{self, ProjectId, UserId}; +use crate::db::{self, UserId}; use anyhow::{anyhow, Result}; -use collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use collections::{BTreeMap, HashSet}; use rpc::{proto, ConnectionId}; use serde::Serialize; -use std::path::PathBuf; use tracing::instrument; -pub type RoomId = u64; - #[derive(Default, Serialize)] pub struct Store { - connections: BTreeMap, + connections: BTreeMap, connected_users: BTreeMap, - next_room_id: RoomId, - rooms: BTreeMap, - projects: BTreeMap, } #[derive(Default, Serialize)] struct ConnectedUser { connection_ids: HashSet, - active_call: Option, } #[derive(Serialize)] -struct ConnectionState { - user_id: UserId, - admin: bool, - projects: BTreeSet, -} - -#[derive(Copy, Clone, Eq, PartialEq, Serialize)] -pub struct Call { - pub calling_user_id: UserId, - pub room_id: RoomId, - pub connection_id: Option, - pub initial_project_id: Option, -} - -#[derive(Serialize)] -pub struct Project { - pub id: ProjectId, - pub room_id: RoomId, - pub host_connection_id: ConnectionId, - pub host: Collaborator, - pub guests: HashMap, - pub active_replica_ids: HashSet, - pub worktrees: BTreeMap, - pub language_servers: Vec, -} - -#[derive(Serialize)] -pub struct Collaborator { - pub replica_id: ReplicaId, +pub struct Connection { pub user_id: UserId, pub admin: bool, } -#[derive(Default, Serialize)] -pub struct Worktree { - pub abs_path: PathBuf, - pub root_name: String, - pub visible: bool, - #[serde(skip)] - pub entries: BTreeMap, - #[serde(skip)] - pub diagnostic_summaries: BTreeMap, - pub scan_id: u64, - pub is_complete: bool, -} - -pub type ReplicaId = u16; - -#[derive(Copy, Clone)] -pub struct Metrics { - pub connections: usize, - pub shared_projects: usize, -} - impl Store { - pub fn metrics(&self) -> Metrics { - let connections = self.connections.values().filter(|c| !c.admin).count(); - let mut shared_projects = 0; - for project in self.projects.values() { - if let Some(connection) = self.connections.get(&project.host_connection_id) { - if !connection.admin { - shared_projects += 1; - } - } - } - - Metrics { - connections, - shared_projects, - } - } - #[instrument(skip(self))] pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId, admin: bool) { - self.connections.insert( - connection_id, - ConnectionState { - user_id, - admin, - projects: Default::default(), - }, - ); + self.connections + .insert(connection_id, Connection { user_id, admin }); let connected_user = self.connected_users.entry(user_id).or_default(); connected_user.connection_ids.insert(connection_id); } @@ -127,10 +48,11 @@ impl Store { Ok(()) } - pub fn connection_ids_for_user( - &self, - user_id: UserId, - ) -> impl Iterator + '_ { + pub fn connections(&self) -> impl Iterator { + self.connections.values() + } + + pub fn user_connection_ids(&self, user_id: UserId) -> impl Iterator + '_ { self.connected_users .get(&user_id) .into_iter() @@ -197,35 +119,9 @@ impl Store { } } - pub fn rooms(&self) -> &BTreeMap { - &self.rooms - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { - for project_id in &connection.projects { - let project = &self.projects.get(project_id).unwrap(); - if project.host_connection_id != *connection_id { - assert!(project.guests.contains_key(connection_id)); - } - - for (worktree_id, worktree) in project.worktrees.iter() { - let mut paths = HashMap::default(); - for entry in worktree.entries.values() { - let prev_entry = paths.insert(&entry.path, entry); - assert_eq!( - prev_entry, - None, - "worktree {:?}, duplicate path for entries {:?} and {:?}", - worktree_id, - prev_entry.unwrap(), - entry - ); - } - } - } - assert!(self .connected_users .get(&connection.user_id) @@ -241,85 +137,6 @@ impl Store { *user_id ); } - - if let Some(active_call) = state.active_call.as_ref() { - if let Some(active_call_connection_id) = active_call.connection_id { - assert!( - state.connection_ids.contains(&active_call_connection_id), - "call is active on a dead connection" - ); - assert!( - state.connection_ids.contains(&active_call_connection_id), - "call is active on a dead connection" - ); - } - } - } - - for (room_id, room) in &self.rooms { - // for pending_user_id in &room.pending_participant_user_ids { - // assert!( - // self.connected_users - // .contains_key(&UserId::from_proto(*pending_user_id)), - // "call is active on a user that has disconnected" - // ); - // } - - for participant in &room.participants { - assert!( - self.connections - .contains_key(&ConnectionId(participant.peer_id)), - "room {} contains participant {:?} that has disconnected", - room_id, - participant - ); - - for participant_project in &participant.projects { - let project = &self.projects[&ProjectId::from_proto(participant_project.id)]; - assert_eq!( - project.room_id, *room_id, - "project was shared on a different room" - ); - } - } - - // assert!( - // !room.pending_participant_user_ids.is_empty() || !room.participants.is_empty(), - // "room can't be empty" - // ); - } - - for (project_id, project) in &self.projects { - let host_connection = self.connections.get(&project.host_connection_id).unwrap(); - assert!(host_connection.projects.contains(project_id)); - - for guest_connection_id in project.guests.keys() { - let guest_connection = self.connections.get(guest_connection_id).unwrap(); - assert!(guest_connection.projects.contains(project_id)); - } - assert_eq!(project.active_replica_ids.len(), project.guests.len()); - assert_eq!( - project.active_replica_ids, - project - .guests - .values() - .map(|guest| guest.replica_id) - .collect::>(), - ); - - let room = &self.rooms[&project.room_id]; - let room_participant = room - .participants - .iter() - .find(|participant| participant.peer_id == project.host_connection_id.0) - .unwrap(); - assert!( - room_participant - .projects - .iter() - .any(|project| project.id == project_id.to_proto()), - "project was not shared in room" - ); } } } From 44bb2ce024a2b9afe747023f6a6a01068eccef67 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 17 Nov 2022 19:03:50 +0100 Subject: [PATCH 056/240] Rename `Store` to `ConnectionPool` --- crates/collab/src/integration_tests.rs | 21 +-- crates/collab/src/rpc.rs | 167 +++++++++++++----- .../src/rpc/{store.rs => connection_pool.rs} | 57 +----- 3 files changed, 133 insertions(+), 112 deletions(-) rename crates/collab/src/rpc/{store.rs => connection_pool.rs} (64%) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 1236af42cb..006598a6b1 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,5 +1,5 @@ use crate::{ - db::{NewUserParams, SqliteTestDb as TestDb, UserId}, + db::{self, NewUserParams, SqliteTestDb as TestDb, UserId}, rpc::{Executor, Server}, AppState, }; @@ -5469,18 +5469,15 @@ async fn test_random_collaboration( } for user_id in &user_ids { let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); - let contacts = server - .store - .lock() - .await - .build_initial_contacts_update(contacts) - .contacts; + let pool = server.connection_pool.lock().await; for contact in contacts { - if contact.online { - assert_ne!( - contact.user_id, removed_guest_id.0 as u64, - "removed guest is still a contact of another peer" - ); + if let db::Contact::Accepted { user_id, .. } = contact { + if pool.is_user_online(user_id) { + assert_ne!( + user_id, removed_guest_id, + "removed guest is still a contact of another peer" + ); + } } } } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 58870163f5..175e3604c0 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1,4 +1,4 @@ -mod store; +mod connection_pool; use crate::{ auth, @@ -23,6 +23,7 @@ use axum::{ Extension, Router, TypedHeader, }; use collections::{HashMap, HashSet}; +pub use connection_pool::ConnectionPool; use futures::{ channel::oneshot, future::{self, BoxFuture}, @@ -49,7 +50,6 @@ use std::{ }, time::Duration, }; -pub use store::Store; use tokio::{ sync::{Mutex, MutexGuard}, time::Sleep, @@ -103,7 +103,7 @@ impl Response { pub struct Server { peer: Arc, - pub(crate) store: Mutex, + pub(crate) connection_pool: Mutex, app_state: Arc, handlers: HashMap, } @@ -117,8 +117,8 @@ pub trait Executor: Send + Clone { #[derive(Clone)] pub struct RealExecutor; -pub(crate) struct StoreGuard<'a> { - guard: MutexGuard<'a, Store>, +pub(crate) struct ConnectionPoolGuard<'a> { + guard: MutexGuard<'a, ConnectionPool>, _not_send: PhantomData>, } @@ -126,7 +126,7 @@ pub(crate) struct StoreGuard<'a> { pub struct ServerSnapshot<'a> { peer: &'a Peer, #[serde(serialize_with = "serialize_deref")] - store: StoreGuard<'a>, + connection_pool: ConnectionPoolGuard<'a>, } pub fn serialize_deref(value: &T, serializer: S) -> Result @@ -143,7 +143,7 @@ impl Server { let mut server = Self { peer: Peer::new(), app_state, - store: Default::default(), + connection_pool: Default::default(), handlers: Default::default(), }; @@ -257,8 +257,6 @@ impl Server { self } - /// Handle a request while holding a lock to the store. This is useful when we're registering - /// a connection but we want to respond on the connection before anybody else can send on it. fn add_request_handler(&mut self, handler: F) -> &mut Self where F: 'static + Send + Sync + Fn(Arc, M, Response, Session) -> Fut, @@ -342,9 +340,9 @@ impl Server { ).await?; { - let mut store = this.store().await; - store.add_connection(connection_id, user_id, user.admin); - this.peer.send(connection_id, store.build_initial_contacts_update(contacts))?; + let mut pool = this.connection_pool().await; + pool.add_connection(connection_id, user_id, user.admin); + this.peer.send(connection_id, build_initial_contacts_update(contacts, &pool))?; if let Some((code, count)) = invite_code { this.peer.send(connection_id, proto::UpdateInviteInfo { @@ -435,9 +433,9 @@ impl Server { ) -> Result<()> { self.peer.disconnect(connection_id); let decline_calls = { - let mut store = self.store().await; - store.remove_connection(connection_id)?; - let mut connections = store.user_connection_ids(user_id); + let mut pool = self.connection_pool().await; + pool.remove_connection(connection_id)?; + let mut connections = pool.user_connection_ids(user_id); connections.next().is_none() }; @@ -468,9 +466,9 @@ impl Server { ) -> Result<()> { if let Some(user) = self.app_state.db.get_user_by_id(inviter_id).await? { if let Some(code) = &user.invite_code { - let store = self.store().await; - let invitee_contact = store.contact_for_user(invitee_id, true, false); - for connection_id in store.user_connection_ids(inviter_id) { + let pool = self.connection_pool().await; + let invitee_contact = contact_for_user(invitee_id, true, false, &pool); + for connection_id in pool.user_connection_ids(inviter_id) { self.peer.send( connection_id, proto::UpdateContacts { @@ -494,8 +492,8 @@ impl Server { pub async fn invite_count_updated(self: &Arc, user_id: UserId) -> Result<()> { if let Some(user) = self.app_state.db.get_user_by_id(user_id).await? { if let Some(invite_code) = &user.invite_code { - let store = self.store().await; - for connection_id in store.user_connection_ids(user_id) { + let pool = self.connection_pool().await; + for connection_id in pool.user_connection_ids(user_id) { self.peer.send( connection_id, proto::UpdateInviteInfo { @@ -582,7 +580,11 @@ impl Server { session.connection_id, ) .await?; - for connection_id in self.store().await.user_connection_ids(session.user_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(session.user_id) + { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -672,9 +674,9 @@ impl Server { self.room_updated(&left_room.room); { - let store = self.store().await; + let pool = self.connection_pool().await; for canceled_user_id in left_room.canceled_calls_to_user_ids { - for connection_id in store.user_connection_ids(canceled_user_id) { + for connection_id in pool.user_connection_ids(canceled_user_id) { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -742,7 +744,7 @@ impl Server { self.update_user_contacts(called_user_id).await?; let mut calls = self - .store() + .connection_pool() .await .user_connection_ids(called_user_id) .map(|connection_id| self.peer.request(connection_id, incoming_call.clone())) @@ -784,7 +786,11 @@ impl Server { .db .cancel_call(Some(room_id), session.connection_id, called_user_id) .await?; - for connection_id in self.store().await.user_connection_ids(called_user_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(called_user_id) + { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -807,7 +813,11 @@ impl Server { .db .decline_call(Some(room_id), session.user_id) .await?; - for connection_id in self.store().await.user_connection_ids(session.user_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(session.user_id) + { self.peer .send(connection_id, proto::CallCanceled {}) .trace_err(); @@ -897,15 +907,15 @@ impl Server { async fn update_user_contacts(self: &Arc, user_id: UserId) -> Result<()> { let contacts = self.app_state.db.get_contacts(user_id).await?; let busy = self.app_state.db.is_user_busy(user_id).await?; - let store = self.store().await; - let updated_contact = store.contact_for_user(user_id, false, busy); + let pool = self.connection_pool().await; + let updated_contact = contact_for_user(user_id, false, busy, &pool); for contact in contacts { if let db::Contact::Accepted { user_id: contact_user_id, .. } = contact { - for contact_conn_id in store.user_connection_ids(contact_user_id) { + for contact_conn_id in pool.user_connection_ids(contact_user_id) { self.peer .send( contact_conn_id, @@ -1522,7 +1532,11 @@ impl Server { // Update outgoing contact requests of requester let mut update = proto::UpdateContacts::default(); update.outgoing_requests.push(responder_id.to_proto()); - for connection_id in self.store().await.user_connection_ids(requester_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(requester_id) + { self.peer.send(connection_id, update.clone())?; } @@ -1534,7 +1548,11 @@ impl Server { requester_id: requester_id.to_proto(), should_notify: true, }); - for connection_id in self.store().await.user_connection_ids(responder_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(responder_id) + { self.peer.send(connection_id, update.clone())?; } @@ -1563,18 +1581,18 @@ impl Server { .await?; let busy = self.app_state.db.is_user_busy(requester_id).await?; - let store = self.store().await; + let pool = self.connection_pool().await; // Update responder with new contact let mut update = proto::UpdateContacts::default(); if accept { update .contacts - .push(store.contact_for_user(requester_id, false, busy)); + .push(contact_for_user(requester_id, false, busy, &pool)); } update .remove_incoming_requests .push(requester_id.to_proto()); - for connection_id in store.user_connection_ids(responder_id) { + for connection_id in pool.user_connection_ids(responder_id) { self.peer.send(connection_id, update.clone())?; } @@ -1583,12 +1601,12 @@ impl Server { if accept { update .contacts - .push(store.contact_for_user(responder_id, true, busy)); + .push(contact_for_user(responder_id, true, busy, &pool)); } update .remove_outgoing_requests .push(responder_id.to_proto()); - for connection_id in store.user_connection_ids(requester_id) { + for connection_id in pool.user_connection_ids(requester_id) { self.peer.send(connection_id, update.clone())?; } } @@ -1615,7 +1633,11 @@ impl Server { update .remove_outgoing_requests .push(responder_id.to_proto()); - for connection_id in self.store().await.user_connection_ids(requester_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(requester_id) + { self.peer.send(connection_id, update.clone())?; } @@ -1624,7 +1646,11 @@ impl Server { update .remove_incoming_requests .push(requester_id.to_proto()); - for connection_id in self.store().await.user_connection_ids(responder_id) { + for connection_id in self + .connection_pool() + .await + .user_connection_ids(responder_id) + { self.peer.send(connection_id, update.clone())?; } @@ -1678,13 +1704,13 @@ impl Server { Ok(()) } - pub(crate) async fn store(&self) -> StoreGuard<'_> { + pub(crate) async fn connection_pool(&self) -> ConnectionPoolGuard<'_> { #[cfg(test)] tokio::task::yield_now().await; - let guard = self.store.lock().await; + let guard = self.connection_pool.lock().await; #[cfg(test)] tokio::task::yield_now().await; - StoreGuard { + ConnectionPoolGuard { guard, _not_send: PhantomData, } @@ -1692,27 +1718,27 @@ impl Server { pub async fn snapshot<'a>(self: &'a Arc) -> ServerSnapshot<'a> { ServerSnapshot { - store: self.store().await, + connection_pool: self.connection_pool().await, peer: &self.peer, } } } -impl<'a> Deref for StoreGuard<'a> { - type Target = Store; +impl<'a> Deref for ConnectionPoolGuard<'a> { + type Target = ConnectionPool; fn deref(&self) -> &Self::Target { &*self.guard } } -impl<'a> DerefMut for StoreGuard<'a> { +impl<'a> DerefMut for ConnectionPoolGuard<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut *self.guard } } -impl<'a> Drop for StoreGuard<'a> { +impl<'a> Drop for ConnectionPoolGuard<'a> { fn drop(&mut self) { #[cfg(test)] self.check_invariants(); @@ -1821,7 +1847,7 @@ pub async fn handle_websocket_request( pub async fn handle_metrics(Extension(server): Extension>) -> Result { let connections = server - .store() + .connection_pool() .await .connections() .filter(|connection| !connection.admin) @@ -1868,6 +1894,53 @@ fn to_tungstenite_message(message: AxumMessage) -> TungsteniteMessage { } } +fn build_initial_contacts_update( + contacts: Vec, + pool: &ConnectionPool, +) -> proto::UpdateContacts { + let mut update = proto::UpdateContacts::default(); + + for contact in contacts { + match contact { + db::Contact::Accepted { + user_id, + should_notify, + busy, + } => { + update + .contacts + .push(contact_for_user(user_id, should_notify, busy, &pool)); + } + db::Contact::Outgoing { user_id } => update.outgoing_requests.push(user_id.to_proto()), + db::Contact::Incoming { + user_id, + should_notify, + } => update + .incoming_requests + .push(proto::IncomingContactRequest { + requester_id: user_id.to_proto(), + should_notify, + }), + } + } + + update +} + +fn contact_for_user( + user_id: UserId, + should_notify: bool, + busy: bool, + pool: &ConnectionPool, +) -> proto::Contact { + proto::Contact { + user_id: user_id.to_proto(), + online: pool.is_user_online(user_id), + busy, + should_notify, + } +} + pub trait ResultExt { type Ok; diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/connection_pool.rs similarity index 64% rename from crates/collab/src/rpc/store.rs rename to crates/collab/src/rpc/connection_pool.rs index 2bb6d89f40..ac7632f7da 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/connection_pool.rs @@ -1,12 +1,12 @@ -use crate::db::{self, UserId}; +use crate::db::UserId; use anyhow::{anyhow, Result}; use collections::{BTreeMap, HashSet}; -use rpc::{proto, ConnectionId}; +use rpc::ConnectionId; use serde::Serialize; use tracing::instrument; #[derive(Default, Serialize)] -pub struct Store { +pub struct ConnectionPool { connections: BTreeMap, connected_users: BTreeMap, } @@ -22,7 +22,7 @@ pub struct Connection { pub admin: bool, } -impl Store { +impl ConnectionPool { #[instrument(skip(self))] pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId, admin: bool) { self.connections @@ -70,55 +70,6 @@ impl Store { .is_empty() } - pub fn build_initial_contacts_update( - &self, - contacts: Vec, - ) -> proto::UpdateContacts { - let mut update = proto::UpdateContacts::default(); - - for contact in contacts { - match contact { - db::Contact::Accepted { - user_id, - should_notify, - busy, - } => { - update - .contacts - .push(self.contact_for_user(user_id, should_notify, busy)); - } - db::Contact::Outgoing { user_id } => { - update.outgoing_requests.push(user_id.to_proto()) - } - db::Contact::Incoming { - user_id, - should_notify, - } => update - .incoming_requests - .push(proto::IncomingContactRequest { - requester_id: user_id.to_proto(), - should_notify, - }), - } - } - - update - } - - pub fn contact_for_user( - &self, - user_id: UserId, - should_notify: bool, - busy: bool, - ) -> proto::Contact { - proto::Contact { - user_id: user_id.to_proto(), - online: self.is_user_online(user_id), - busy, - should_notify, - } - } - #[cfg(test)] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { From c3d556d9bdf6a924e07b945c06f882bed93cfbce Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 18 Nov 2022 11:45:42 +0100 Subject: [PATCH 057/240] Don't take an `Arc` in message handlers --- crates/collab/src/rpc.rs | 2641 +++++++++++++++++++------------------- 1 file changed, 1303 insertions(+), 1338 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 175e3604c0..ba97b09acd 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -39,6 +39,7 @@ use rpc::{ use serde::{Serialize, Serializer}; use std::{ any::TypeId, + fmt, future::Future, marker::PhantomData, net::SocketAddr, @@ -67,20 +68,63 @@ lazy_static! { .unwrap(); } -type MessageHandler = Box< - dyn Send + Sync + Fn(Arc, Box, Session) -> BoxFuture<'static, ()>, ->; +type MessageHandler = + Box, Session) -> BoxFuture<'static, ()>>; struct Response { - server: Arc, + peer: Arc, receipt: Receipt, responded: Arc, } +impl Response { + fn send(self, payload: R::Response) -> Result<()> { + self.responded.store(true, SeqCst); + self.peer.respond(self.receipt, payload)?; + Ok(()) + } +} + +#[derive(Clone)] struct Session { user_id: UserId, connection_id: ConnectionId, db: Arc>, + peer: Arc, + connection_pool: Arc>, + live_kit_client: Option>, +} + +impl Session { + async fn db(&self) -> MutexGuard { + #[cfg(test)] + tokio::task::yield_now().await; + let guard = self.db.lock().await; + #[cfg(test)] + tokio::task::yield_now().await; + guard + } + + async fn connection_pool(&self) -> ConnectionPoolGuard<'_> { + #[cfg(test)] + tokio::task::yield_now().await; + let guard = self.connection_pool.lock().await; + #[cfg(test)] + tokio::task::yield_now().await; + ConnectionPoolGuard { + guard, + _not_send: PhantomData, + } + } +} + +impl fmt::Debug for Session { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Session") + .field("user_id", &self.user_id) + .field("connection_id", &self.connection_id) + .finish() + } } struct DbHandle(Arc); @@ -93,17 +137,9 @@ impl Deref for DbHandle { } } -impl Response { - fn send(self, payload: R::Response) -> Result<()> { - self.responded.store(true, SeqCst); - self.server.peer.respond(self.receipt, payload)?; - Ok(()) - } -} - pub struct Server { peer: Arc, - pub(crate) connection_pool: Mutex, + pub(crate) connection_pool: Arc>, app_state: Arc, handlers: HashMap, } @@ -148,76 +184,74 @@ impl Server { }; server - .add_request_handler(Server::ping) - .add_request_handler(Server::create_room) - .add_request_handler(Server::join_room) - .add_message_handler(Server::leave_room) - .add_request_handler(Server::call) - .add_request_handler(Server::cancel_call) - .add_message_handler(Server::decline_call) - .add_request_handler(Server::update_participant_location) - .add_request_handler(Server::share_project) - .add_message_handler(Server::unshare_project) - .add_request_handler(Server::join_project) - .add_message_handler(Server::leave_project) - .add_request_handler(Server::update_project) - .add_request_handler(Server::update_worktree) - .add_message_handler(Server::start_language_server) - .add_message_handler(Server::update_language_server) - .add_request_handler(Server::update_diagnostic_summary) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler( - Server::forward_project_request::, - ) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_message_handler(Server::create_buffer_for_peer) - .add_request_handler(Server::update_buffer) - .add_message_handler(Server::update_buffer_file) - .add_message_handler(Server::buffer_reloaded) - .add_message_handler(Server::buffer_saved) - .add_request_handler(Server::save_buffer) - .add_request_handler(Server::get_users) - .add_request_handler(Server::fuzzy_search_users) - .add_request_handler(Server::request_contact) - .add_request_handler(Server::remove_contact) - .add_request_handler(Server::respond_to_contact_request) - .add_request_handler(Server::follow) - .add_message_handler(Server::unfollow) - .add_message_handler(Server::update_followers) - .add_message_handler(Server::update_diff_base) - .add_request_handler(Server::get_private_user_info); + .add_request_handler(ping) + .add_request_handler(create_room) + .add_request_handler(join_room) + .add_message_handler(leave_room) + .add_request_handler(call) + .add_request_handler(cancel_call) + .add_message_handler(decline_call) + .add_request_handler(update_participant_location) + .add_request_handler(share_project) + .add_message_handler(unshare_project) + .add_request_handler(join_project) + .add_message_handler(leave_project) + .add_request_handler(update_project) + .add_request_handler(update_worktree) + .add_message_handler(start_language_server) + .add_message_handler(update_language_server) + .add_request_handler(update_diagnostic_summary) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) + .add_message_handler(create_buffer_for_peer) + .add_request_handler(update_buffer) + .add_message_handler(update_buffer_file) + .add_message_handler(buffer_reloaded) + .add_message_handler(buffer_saved) + .add_request_handler(save_buffer) + .add_request_handler(get_users) + .add_request_handler(fuzzy_search_users) + .add_request_handler(request_contact) + .add_request_handler(remove_contact) + .add_request_handler(respond_to_contact_request) + .add_request_handler(follow) + .add_message_handler(unfollow) + .add_message_handler(update_followers) + .add_message_handler(update_diff_base) + .add_request_handler(get_private_user_info); Arc::new(server) } fn add_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, TypedEnvelope, Session) -> Fut, + F: 'static + Send + Sync + Fn(TypedEnvelope, Session) -> Fut, Fut: 'static + Send + Future>, M: EnvelopedMessage, { let prev_handler = self.handlers.insert( TypeId::of::(), - Box::new(move |server, envelope, session| { + Box::new(move |envelope, session| { let envelope = envelope.into_any().downcast::>().unwrap(); let span = info_span!( "handle message", @@ -229,7 +263,7 @@ impl Server { "message received" ); }); - let future = (handler)(server, *envelope, session); + let future = (handler)(*envelope, session); async move { if let Err(error) = future.await { tracing::error!(%error, "error handling message"); @@ -247,34 +281,33 @@ impl Server { fn add_message_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, M, Session) -> Fut, + F: 'static + Send + Sync + Fn(M, Session) -> Fut, Fut: 'static + Send + Future>, M: EnvelopedMessage, { - self.add_handler(move |server, envelope, session| { - handler(server, envelope.payload, session) - }); + self.add_handler(move |envelope, session| handler(envelope.payload, session)); self } fn add_request_handler(&mut self, handler: F) -> &mut Self where - F: 'static + Send + Sync + Fn(Arc, M, Response, Session) -> Fut, + F: 'static + Send + Sync + Fn(M, Response, Session) -> Fut, Fut: Send + Future>, M: RequestMessage, { let handler = Arc::new(handler); - self.add_handler(move |server, envelope, session| { + self.add_handler(move |envelope, session| { let receipt = envelope.receipt(); let handler = handler.clone(); async move { + let peer = session.peer.clone(); let responded = Arc::new(AtomicBool::default()); let response = Response { - server: server.clone(), + peer: peer.clone(), responded: responded.clone(), receipt, }; - match (handler)(server.clone(), envelope.payload, response, session).await { + match (handler)(envelope.payload, response, session).await { Ok(()) => { if responded.load(std::sync::atomic::Ordering::SeqCst) { Ok(()) @@ -283,7 +316,7 @@ impl Server { } } Err(error) => { - server.peer.respond_with_error( + peer.respond_with_error( receipt, proto::Error { message: error.to_string(), @@ -304,7 +337,7 @@ impl Server { mut send_connection_id: Option>, executor: E, ) -> impl Future> { - let mut this = self.clone(); + let this = self.clone(); let user_id = user.id; let login = user.github_login; let span = info_span!("handle connection", %user_id, %login, %address); @@ -340,7 +373,7 @@ impl Server { ).await?; { - let mut pool = this.connection_pool().await; + let mut pool = this.connection_pool.lock().await; pool.add_connection(connection_id, user_id, user.admin); this.peer.send(connection_id, build_initial_contacts_update(contacts, &pool))?; @@ -356,13 +389,19 @@ impl Server { this.peer.send(connection_id, incoming_call)?; } - this.update_user_contacts(user_id).await?; + let session = Session { + user_id, + connection_id, + db: Arc::new(Mutex::new(DbHandle(this.app_state.db.clone()))), + peer: this.peer.clone(), + connection_pool: this.connection_pool.clone(), + live_kit_client: this.app_state.live_kit_client.clone() + }; + update_user_contacts(user_id, &session).await?; let handle_io = handle_io.fuse(); futures::pin_mut!(handle_io); - let db = Arc::new(Mutex::new(DbHandle(this.app_state.db.clone()))); - // Handlers for foreground messages are pushed into the following `FuturesUnordered`. // This prevents deadlocks when e.g., client A performs a request to client B and // client B performs a request to client A. If both clients stop processing further @@ -390,12 +429,7 @@ impl Server { let span_enter = span.enter(); if let Some(handler) = this.handlers.get(&message.payload_type_id()) { let is_background = message.is_background(); - let session = Session { - user_id, - connection_id, - db: db.clone(), - }; - let handle_message = (handler)(this.clone(), message, session); + let handle_message = (handler)(message, session.clone()); drop(span_enter); let handle_message = handle_message.instrument(span); @@ -417,7 +451,7 @@ impl Server { drop(foreground_message_handlers); tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); - if let Err(error) = this.sign_out(connection_id, user_id).await { + if let Err(error) = sign_out(session).await { tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } @@ -425,40 +459,6 @@ impl Server { }.instrument(span) } - #[instrument(skip(self), err)] - async fn sign_out( - self: &mut Arc, - connection_id: ConnectionId, - user_id: UserId, - ) -> Result<()> { - self.peer.disconnect(connection_id); - let decline_calls = { - let mut pool = self.connection_pool().await; - pool.remove_connection(connection_id)?; - let mut connections = pool.user_connection_ids(user_id); - connections.next().is_none() - }; - - self.leave_room_for_connection(connection_id, user_id) - .await - .trace_err(); - if decline_calls { - if let Some(room) = self - .app_state - .db - .decline_call(None, user_id) - .await - .trace_err() - { - self.room_updated(&room); - } - } - - self.update_user_contacts(user_id).await?; - - Ok(()) - } - pub async fn invite_code_redeemed( self: &Arc, inviter_id: UserId, @@ -466,7 +466,7 @@ impl Server { ) -> Result<()> { if let Some(user) = self.app_state.db.get_user_by_id(inviter_id).await? { if let Some(code) = &user.invite_code { - let pool = self.connection_pool().await; + let pool = self.connection_pool.lock().await; let invitee_contact = contact_for_user(invitee_id, true, false, &pool); for connection_id in pool.user_connection_ids(inviter_id) { self.peer.send( @@ -492,7 +492,7 @@ impl Server { pub async fn invite_count_updated(self: &Arc, user_id: UserId) -> Result<()> { if let Some(user) = self.app_state.db.get_user_by_id(user_id).await? { if let Some(invite_code) = &user.invite_code { - let pool = self.connection_pool().await; + let pool = self.connection_pool.lock().await; for connection_id in pool.user_connection_ids(user_id) { self.peer.send( connection_id, @@ -510,1215 +510,12 @@ impl Server { Ok(()) } - async fn ping( - self: Arc, - _: proto::Ping, - response: Response, - _session: Session, - ) -> Result<()> { - response.send(proto::Ack {})?; - Ok(()) - } - - async fn create_room( - self: Arc, - _request: proto::CreateRoom, - response: Response, - session: Session, - ) -> Result<()> { - let room = self - .app_state - .db - .create_room(session.user_id, session.connection_id) - .await?; - - let live_kit_connection_info = - if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { - if let Some(_) = live_kit - .create_room(room.live_kit_room.clone()) - .await - .trace_err() - { - if let Some(token) = live_kit - .room_token(&room.live_kit_room, &session.connection_id.to_string()) - .trace_err() - { - Some(proto::LiveKitConnectionInfo { - server_url: live_kit.url().into(), - token, - }) - } else { - None - } - } else { - None - } - } else { - None - }; - - response.send(proto::CreateRoomResponse { - room: Some(room), - live_kit_connection_info, - })?; - self.update_user_contacts(session.user_id).await?; - Ok(()) - } - - async fn join_room( - self: Arc, - request: proto::JoinRoom, - response: Response, - session: Session, - ) -> Result<()> { - let room = self - .app_state - .db - .join_room( - RoomId::from_proto(request.id), - session.user_id, - session.connection_id, - ) - .await?; - for connection_id in self - .connection_pool() - .await - .user_connection_ids(session.user_id) - { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - } - - let live_kit_connection_info = - if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { - if let Some(token) = live_kit - .room_token(&room.live_kit_room, &session.connection_id.to_string()) - .trace_err() - { - Some(proto::LiveKitConnectionInfo { - server_url: live_kit.url().into(), - token, - }) - } else { - None - } - } else { - None - }; - - self.room_updated(&room); - response.send(proto::JoinRoomResponse { - room: Some(room), - live_kit_connection_info, - })?; - - self.update_user_contacts(session.user_id).await?; - Ok(()) - } - - async fn leave_room( - self: Arc, - _message: proto::LeaveRoom, - session: Session, - ) -> Result<()> { - self.leave_room_for_connection(session.connection_id, session.user_id) - .await - } - - async fn leave_room_for_connection( - self: &Arc, - leaving_connection_id: ConnectionId, - leaving_user_id: UserId, - ) -> Result<()> { - let mut contacts_to_update = HashSet::default(); - - let Some(left_room) = self.app_state.db.leave_room(leaving_connection_id).await? else { - return Err(anyhow!("no room to leave"))?; - }; - contacts_to_update.insert(leaving_user_id); - - for project in left_room.left_projects.into_values() { - for connection_id in project.connection_ids { - if project.host_user_id == leaving_user_id { - self.peer - .send( - connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); - } else { - self.peer - .send( - connection_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: leaving_connection_id.0, - }, - ) - .trace_err(); - } - } - - self.peer - .send( - leaving_connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); - } - - self.room_updated(&left_room.room); - { - let pool = self.connection_pool().await; - for canceled_user_id in left_room.canceled_calls_to_user_ids { - for connection_id in pool.user_connection_ids(canceled_user_id) { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - } - contacts_to_update.insert(canceled_user_id); - } - } - - for contact_user_id in contacts_to_update { - self.update_user_contacts(contact_user_id).await?; - } - - if let Some(live_kit) = self.app_state.live_kit_client.as_ref() { - live_kit - .remove_participant( - left_room.room.live_kit_room.clone(), - leaving_connection_id.to_string(), - ) - .await - .trace_err(); - - if left_room.room.participants.is_empty() { - live_kit - .delete_room(left_room.room.live_kit_room) - .await - .trace_err(); - } - } - - Ok(()) - } - - async fn call( - self: Arc, - request: proto::Call, - response: Response, - session: Session, - ) -> Result<()> { - let room_id = RoomId::from_proto(request.room_id); - let calling_user_id = session.user_id; - let calling_connection_id = session.connection_id; - let called_user_id = UserId::from_proto(request.called_user_id); - let initial_project_id = request.initial_project_id.map(ProjectId::from_proto); - if !self - .app_state - .db - .has_contact(calling_user_id, called_user_id) - .await? - { - return Err(anyhow!("cannot call a user who isn't a contact"))?; - } - - let (room, incoming_call) = self - .app_state - .db - .call( - room_id, - calling_user_id, - calling_connection_id, - called_user_id, - initial_project_id, - ) - .await?; - self.room_updated(&room); - self.update_user_contacts(called_user_id).await?; - - let mut calls = self - .connection_pool() - .await - .user_connection_ids(called_user_id) - .map(|connection_id| self.peer.request(connection_id, incoming_call.clone())) - .collect::>(); - - while let Some(call_response) = calls.next().await { - match call_response.as_ref() { - Ok(_) => { - response.send(proto::Ack {})?; - return Ok(()); - } - Err(_) => { - call_response.trace_err(); - } - } - } - - let room = self - .app_state - .db - .call_failed(room_id, called_user_id) - .await?; - self.room_updated(&room); - self.update_user_contacts(called_user_id).await?; - - Err(anyhow!("failed to ring user"))? - } - - async fn cancel_call( - self: Arc, - request: proto::CancelCall, - response: Response, - session: Session, - ) -> Result<()> { - let called_user_id = UserId::from_proto(request.called_user_id); - let room_id = RoomId::from_proto(request.room_id); - let room = self - .app_state - .db - .cancel_call(Some(room_id), session.connection_id, called_user_id) - .await?; - for connection_id in self - .connection_pool() - .await - .user_connection_ids(called_user_id) - { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - } - self.room_updated(&room); - response.send(proto::Ack {})?; - - self.update_user_contacts(called_user_id).await?; - Ok(()) - } - - async fn decline_call( - self: Arc, - message: proto::DeclineCall, - session: Session, - ) -> Result<()> { - let room_id = RoomId::from_proto(message.room_id); - let room = self - .app_state - .db - .decline_call(Some(room_id), session.user_id) - .await?; - for connection_id in self - .connection_pool() - .await - .user_connection_ids(session.user_id) - { - self.peer - .send(connection_id, proto::CallCanceled {}) - .trace_err(); - } - self.room_updated(&room); - self.update_user_contacts(session.user_id).await?; - Ok(()) - } - - async fn update_participant_location( - self: Arc, - request: proto::UpdateParticipantLocation, - response: Response, - session: Session, - ) -> Result<()> { - let room_id = RoomId::from_proto(request.room_id); - let location = request - .location - .ok_or_else(|| anyhow!("invalid location"))?; - let room = self - .app_state - .db - .update_room_participant_location(room_id, session.connection_id, location) - .await?; - self.room_updated(&room); - response.send(proto::Ack {})?; - Ok(()) - } - - fn room_updated(&self, room: &proto::Room) { - for participant in &room.participants { - self.peer - .send( - ConnectionId(participant.peer_id), - proto::RoomUpdated { - room: Some(room.clone()), - }, - ) - .trace_err(); - } - } - - async fn share_project( - self: Arc, - request: proto::ShareProject, - response: Response, - session: Session, - ) -> Result<()> { - let (project_id, room) = self - .app_state - .db - .share_project( - RoomId::from_proto(request.room_id), - session.connection_id, - &request.worktrees, - ) - .await?; - response.send(proto::ShareProjectResponse { - project_id: project_id.to_proto(), - })?; - self.room_updated(&room); - - Ok(()) - } - - async fn unshare_project( - self: Arc, - message: proto::UnshareProject, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(message.project_id); - - let (room, guest_connection_ids) = self - .app_state - .db - .unshare_project(project_id, session.connection_id) - .await?; - - broadcast(session.connection_id, guest_connection_ids, |conn_id| { - self.peer.send(conn_id, message.clone()) - }); - self.room_updated(&room); - - Ok(()) - } - - async fn update_user_contacts(self: &Arc, user_id: UserId) -> Result<()> { - let contacts = self.app_state.db.get_contacts(user_id).await?; - let busy = self.app_state.db.is_user_busy(user_id).await?; - let pool = self.connection_pool().await; - let updated_contact = contact_for_user(user_id, false, busy, &pool); - for contact in contacts { - if let db::Contact::Accepted { - user_id: contact_user_id, - .. - } = contact - { - for contact_conn_id in pool.user_connection_ids(contact_user_id) { - self.peer - .send( - contact_conn_id, - proto::UpdateContacts { - contacts: vec![updated_contact.clone()], - remove_contacts: Default::default(), - incoming_requests: Default::default(), - remove_incoming_requests: Default::default(), - outgoing_requests: Default::default(), - remove_outgoing_requests: Default::default(), - }, - ) - .trace_err(); - } - } - } - Ok(()) - } - - async fn join_project( - self: Arc, - request: proto::JoinProject, - response: Response, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let guest_user_id = session.user_id; - - tracing::info!(%project_id, "join project"); - - let (project, replica_id) = self - .app_state - .db - .join_project(project_id, session.connection_id) - .await?; - - let collaborators = project - .collaborators - .iter() - .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) - .map(|collaborator| proto::Collaborator { - peer_id: collaborator.connection_id as u32, - replica_id: collaborator.replica_id.0 as u32, - user_id: collaborator.user_id.to_proto(), - }) - .collect::>(); - let worktrees = project - .worktrees - .iter() - .map(|(id, worktree)| proto::WorktreeMetadata { - id: id.to_proto(), - root_name: worktree.root_name.clone(), - visible: worktree.visible, - abs_path: worktree.abs_path.clone(), - }) - .collect::>(); - - for collaborator in &collaborators { - self.peer - .send( - ConnectionId(collaborator.peer_id), - proto::AddProjectCollaborator { - project_id: project_id.to_proto(), - collaborator: Some(proto::Collaborator { - peer_id: session.connection_id.0, - replica_id: replica_id.0 as u32, - user_id: guest_user_id.to_proto(), - }), - }, - ) - .trace_err(); - } - - // First, we send the metadata associated with each worktree. - response.send(proto::JoinProjectResponse { - worktrees: worktrees.clone(), - replica_id: replica_id.0 as u32, - collaborators: collaborators.clone(), - language_servers: project.language_servers.clone(), - })?; - - for (worktree_id, worktree) in project.worktrees { - #[cfg(any(test, feature = "test-support"))] - const MAX_CHUNK_SIZE: usize = 2; - #[cfg(not(any(test, feature = "test-support")))] - const MAX_CHUNK_SIZE: usize = 256; - - // Stream this worktree's entries. - let message = proto::UpdateWorktree { - project_id: project_id.to_proto(), - worktree_id: worktree_id.to_proto(), - abs_path: worktree.abs_path.clone(), - root_name: worktree.root_name, - updated_entries: worktree.entries, - removed_entries: Default::default(), - scan_id: worktree.scan_id, - is_last_update: worktree.is_complete, - }; - for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { - self.peer.send(session.connection_id, update.clone())?; - } - - // Stream this worktree's diagnostics. - for summary in worktree.diagnostic_summaries { - self.peer.send( - session.connection_id, - proto::UpdateDiagnosticSummary { - project_id: project_id.to_proto(), - worktree_id: worktree.id.to_proto(), - summary: Some(summary), - }, - )?; - } - } - - for language_server in &project.language_servers { - self.peer.send( - session.connection_id, - proto::UpdateLanguageServer { - project_id: project_id.to_proto(), - language_server_id: language_server.id, - variant: Some( - proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated( - proto::LspDiskBasedDiagnosticsUpdated {}, - ), - ), - }, - )?; - } - - Ok(()) - } - - async fn leave_project( - self: Arc, - request: proto::LeaveProject, - session: Session, - ) -> Result<()> { - let sender_id = session.connection_id; - let project_id = ProjectId::from_proto(request.project_id); - let project; - { - project = self - .app_state - .db - .leave_project(project_id, sender_id) - .await?; - tracing::info!( - %project_id, - host_user_id = %project.host_user_id, - host_connection_id = %project.host_connection_id, - "leave project" - ); - - broadcast(sender_id, project.connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id: project_id.to_proto(), - peer_id: sender_id.0, - }, - ) - }); - } - - Ok(()) - } - - async fn update_project( - self: Arc, - request: proto::UpdateProject, - response: Response, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let (room, guest_connection_ids) = self - .app_state - .db - .update_project(project_id, session.connection_id, &request.worktrees) - .await?; - broadcast( - session.connection_id, - guest_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - self.room_updated(&room); - response.send(proto::Ack {})?; - - Ok(()) - } - - async fn update_worktree( - self: Arc, - request: proto::UpdateWorktree, - response: Response, - session: Session, - ) -> Result<()> { - let guest_connection_ids = self - .app_state - .db - .update_worktree(&request, session.connection_id) - .await?; - - broadcast( - session.connection_id, - guest_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - response.send(proto::Ack {})?; - Ok(()) - } - - async fn update_diagnostic_summary( - self: Arc, - request: proto::UpdateDiagnosticSummary, - response: Response, - session: Session, - ) -> Result<()> { - let guest_connection_ids = self - .app_state - .db - .update_diagnostic_summary(&request, session.connection_id) - .await?; - - broadcast( - session.connection_id, - guest_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - - response.send(proto::Ack {})?; - Ok(()) - } - - async fn start_language_server( - self: Arc, - request: proto::StartLanguageServer, - session: Session, - ) -> Result<()> { - let guest_connection_ids = self - .app_state - .db - .start_language_server(&request, session.connection_id) - .await?; - - broadcast( - session.connection_id, - guest_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn update_language_server( - self: Arc, - request: proto::UpdateLanguageServer, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn forward_project_request( - self: Arc, - request: T, - response: Response, - session: Session, - ) -> Result<()> - where - T: EntityMessage + RequestMessage, - { - let project_id = ProjectId::from_proto(request.remote_entity_id()); - let collaborators = self - .app_state - .db - .project_collaborators(project_id, session.connection_id) - .await?; - let host = collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - - let payload = self - .peer - .forward_request( - session.connection_id, - ConnectionId(host.connection_id as u32), - request, - ) - .await?; - - response.send(payload)?; - Ok(()) - } - - async fn save_buffer( - self: Arc, - request: proto::SaveBuffer, - response: Response, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let collaborators = self - .app_state - .db - .project_collaborators(project_id, session.connection_id) - .await?; - let host = collaborators - .into_iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - let host_connection_id = ConnectionId(host.connection_id as u32); - let response_payload = self - .peer - .forward_request(session.connection_id, host_connection_id, request.clone()) - .await?; - - let mut collaborators = self - .app_state - .db - .project_collaborators(project_id, session.connection_id) - .await?; - collaborators - .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); - let project_connection_ids = collaborators - .into_iter() - .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); - broadcast(host_connection_id, project_connection_ids, |conn_id| { - self.peer - .forward_send(host_connection_id, conn_id, response_payload.clone()) - }); - response.send(response_payload)?; - Ok(()) - } - - async fn create_buffer_for_peer( - self: Arc, - request: proto::CreateBufferForPeer, - session: Session, - ) -> Result<()> { - self.peer.forward_send( - session.connection_id, - ConnectionId(request.peer_id), - request, - )?; - Ok(()) - } - - async fn update_buffer( - self: Arc, - request: proto::UpdateBuffer, - response: Response, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - response.send(proto::Ack {})?; - Ok(()) - } - - async fn update_buffer_file( - self: Arc, - request: proto::UpdateBufferFile, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn buffer_reloaded( - self: Arc, - request: proto::BufferReloaded, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn buffer_saved( - self: Arc, - request: proto::BufferSaved, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn follow( - self: Arc, - request: proto::Follow, - response: Response, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let leader_id = ConnectionId(request.leader_id); - let follower_id = session.connection_id; - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - - if !project_connection_ids.contains(&leader_id) { - Err(anyhow!("no such peer"))?; - } - - let mut response_payload = self - .peer - .forward_request(session.connection_id, leader_id, request) - .await?; - response_payload - .views - .retain(|view| view.leader_id != Some(follower_id.0)); - response.send(response_payload)?; - Ok(()) - } - - async fn unfollow(self: Arc, request: proto::Unfollow, session: Session) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let leader_id = ConnectionId(request.leader_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - if !project_connection_ids.contains(&leader_id) { - Err(anyhow!("no such peer"))?; - } - self.peer - .forward_send(session.connection_id, leader_id, request)?; - Ok(()) - } - - async fn update_followers( - self: Arc, - request: proto::UpdateFollowers, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = session - .db - .lock() - .await - .project_connection_ids(project_id, session.connection_id) - .await?; - - let leader_id = request.variant.as_ref().and_then(|variant| match variant { - proto::update_followers::Variant::CreateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, - }); - for follower_id in &request.follower_ids { - let follower_id = ConnectionId(*follower_id); - if project_connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { - self.peer - .forward_send(session.connection_id, follower_id, request.clone())?; - } - } - Ok(()) - } - - async fn get_users( - self: Arc, - request: proto::GetUsers, - response: Response, - _session: Session, - ) -> Result<()> { - let user_ids = request - .user_ids - .into_iter() - .map(UserId::from_proto) - .collect(); - let users = self - .app_state - .db - .get_users_by_ids(user_ids) - .await? - .into_iter() - .map(|user| proto::User { - id: user.id.to_proto(), - avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), - github_login: user.github_login, - }) - .collect(); - response.send(proto::UsersResponse { users })?; - Ok(()) - } - - async fn fuzzy_search_users( - self: Arc, - request: proto::FuzzySearchUsers, - response: Response, - session: Session, - ) -> Result<()> { - let query = request.query; - let db = &self.app_state.db; - let users = match query.len() { - 0 => vec![], - 1 | 2 => db - .get_user_by_github_account(&query, None) - .await? - .into_iter() - .collect(), - _ => db.fuzzy_search_users(&query, 10).await?, - }; - let users = users - .into_iter() - .filter(|user| user.id != session.user_id) - .map(|user| proto::User { - id: user.id.to_proto(), - avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), - github_login: user.github_login, - }) - .collect(); - response.send(proto::UsersResponse { users })?; - Ok(()) - } - - async fn request_contact( - self: Arc, - request: proto::RequestContact, - response: Response, - session: Session, - ) -> Result<()> { - let requester_id = session.user_id; - let responder_id = UserId::from_proto(request.responder_id); - if requester_id == responder_id { - return Err(anyhow!("cannot add yourself as a contact"))?; - } - - self.app_state - .db - .send_contact_request(requester_id, responder_id) - .await?; - - // Update outgoing contact requests of requester - let mut update = proto::UpdateContacts::default(); - update.outgoing_requests.push(responder_id.to_proto()); - for connection_id in self - .connection_pool() - .await - .user_connection_ids(requester_id) - { - self.peer.send(connection_id, update.clone())?; - } - - // Update incoming contact requests of responder - let mut update = proto::UpdateContacts::default(); - update - .incoming_requests - .push(proto::IncomingContactRequest { - requester_id: requester_id.to_proto(), - should_notify: true, - }); - for connection_id in self - .connection_pool() - .await - .user_connection_ids(responder_id) - { - self.peer.send(connection_id, update.clone())?; - } - - response.send(proto::Ack {})?; - Ok(()) - } - - async fn respond_to_contact_request( - self: Arc, - request: proto::RespondToContactRequest, - response: Response, - session: Session, - ) -> Result<()> { - let responder_id = session.user_id; - let requester_id = UserId::from_proto(request.requester_id); - if request.response == proto::ContactRequestResponse::Dismiss as i32 { - self.app_state - .db - .dismiss_contact_notification(responder_id, requester_id) - .await?; - } else { - let accept = request.response == proto::ContactRequestResponse::Accept as i32; - self.app_state - .db - .respond_to_contact_request(responder_id, requester_id, accept) - .await?; - let busy = self.app_state.db.is_user_busy(requester_id).await?; - - let pool = self.connection_pool().await; - // Update responder with new contact - let mut update = proto::UpdateContacts::default(); - if accept { - update - .contacts - .push(contact_for_user(requester_id, false, busy, &pool)); - } - update - .remove_incoming_requests - .push(requester_id.to_proto()); - for connection_id in pool.user_connection_ids(responder_id) { - self.peer.send(connection_id, update.clone())?; - } - - // Update requester with new contact - let mut update = proto::UpdateContacts::default(); - if accept { - update - .contacts - .push(contact_for_user(responder_id, true, busy, &pool)); - } - update - .remove_outgoing_requests - .push(responder_id.to_proto()); - for connection_id in pool.user_connection_ids(requester_id) { - self.peer.send(connection_id, update.clone())?; - } - } - - response.send(proto::Ack {})?; - Ok(()) - } - - async fn remove_contact( - self: Arc, - request: proto::RemoveContact, - response: Response, - session: Session, - ) -> Result<()> { - let requester_id = session.user_id; - let responder_id = UserId::from_proto(request.user_id); - self.app_state - .db - .remove_contact(requester_id, responder_id) - .await?; - - // Update outgoing contact requests of requester - let mut update = proto::UpdateContacts::default(); - update - .remove_outgoing_requests - .push(responder_id.to_proto()); - for connection_id in self - .connection_pool() - .await - .user_connection_ids(requester_id) - { - self.peer.send(connection_id, update.clone())?; - } - - // Update incoming contact requests of responder - let mut update = proto::UpdateContacts::default(); - update - .remove_incoming_requests - .push(requester_id.to_proto()); - for connection_id in self - .connection_pool() - .await - .user_connection_ids(responder_id) - { - self.peer.send(connection_id, update.clone())?; - } - - response.send(proto::Ack {})?; - Ok(()) - } - - async fn update_diff_base( - self: Arc, - request: proto::UpdateDiffBase, - session: Session, - ) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = self - .app_state - .db - .project_connection_ids(project_id, session.connection_id) - .await?; - broadcast( - session.connection_id, - project_connection_ids, - |connection_id| { - self.peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); - Ok(()) - } - - async fn get_private_user_info( - self: Arc, - _request: proto::GetPrivateUserInfo, - response: Response, - session: Session, - ) -> Result<()> { - let metrics_id = self - .app_state - .db - .get_user_metrics_id(session.user_id) - .await?; - let user = self - .app_state - .db - .get_user_by_id(session.user_id) - .await? - .ok_or_else(|| anyhow!("user not found"))?; - response.send(proto::GetPrivateUserInfoResponse { - metrics_id, - staff: user.admin, - })?; - Ok(()) - } - - pub(crate) async fn connection_pool(&self) -> ConnectionPoolGuard<'_> { - #[cfg(test)] - tokio::task::yield_now().await; - let guard = self.connection_pool.lock().await; - #[cfg(test)] - tokio::task::yield_now().await; - ConnectionPoolGuard { - guard, - _not_send: PhantomData, - } - } - pub async fn snapshot<'a>(self: &'a Arc) -> ServerSnapshot<'a> { ServerSnapshot { - connection_pool: self.connection_pool().await, + connection_pool: ConnectionPoolGuard { + guard: self.connection_pool.lock().await, + _not_send: PhantomData, + }, peer: &self.peer, } } @@ -1847,7 +644,8 @@ pub async fn handle_websocket_request( pub async fn handle_metrics(Extension(server): Extension>) -> Result { let connections = server - .connection_pool() + .connection_pool + .lock() .await .connections() .filter(|connection| !connection.admin) @@ -1866,6 +664,1042 @@ pub async fn handle_metrics(Extension(server): Extension>) -> Result Ok(encoded_metrics) } +#[instrument(err)] +async fn sign_out(session: Session) -> Result<()> { + session.peer.disconnect(session.connection_id); + let decline_calls = { + let mut pool = session.connection_pool().await; + pool.remove_connection(session.connection_id)?; + let mut connections = pool.user_connection_ids(session.user_id); + connections.next().is_none() + }; + + leave_room_for_session(&session).await.trace_err(); + if decline_calls { + if let Some(room) = session + .db() + .await + .decline_call(None, session.user_id) + .await + .trace_err() + { + room_updated(&room, &session); + } + } + + update_user_contacts(session.user_id, &session).await?; + + Ok(()) +} + +async fn ping(_: proto::Ping, response: Response, _session: Session) -> Result<()> { + response.send(proto::Ack {})?; + Ok(()) +} + +async fn create_room( + _request: proto::CreateRoom, + response: Response, + session: Session, +) -> Result<()> { + let room = session + .db() + .await + .create_room(session.user_id, session.connection_id) + .await?; + + let live_kit_connection_info = if let Some(live_kit) = session.live_kit_client.as_ref() { + if let Some(_) = live_kit + .create_room(room.live_kit_room.clone()) + .await + .trace_err() + { + if let Some(token) = live_kit + .room_token(&room.live_kit_room, &session.connection_id.to_string()) + .trace_err() + { + Some(proto::LiveKitConnectionInfo { + server_url: live_kit.url().into(), + token, + }) + } else { + None + } + } else { + None + } + } else { + None + }; + + response.send(proto::CreateRoomResponse { + room: Some(room), + live_kit_connection_info, + })?; + update_user_contacts(session.user_id, &session).await?; + Ok(()) +} + +async fn join_room( + request: proto::JoinRoom, + response: Response, + session: Session, +) -> Result<()> { + let room = session + .db() + .await + .join_room( + RoomId::from_proto(request.id), + session.user_id, + session.connection_id, + ) + .await?; + for connection_id in session + .connection_pool() + .await + .user_connection_ids(session.user_id) + { + session + .peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); + } + + let live_kit_connection_info = if let Some(live_kit) = session.live_kit_client.as_ref() { + if let Some(token) = live_kit + .room_token(&room.live_kit_room, &session.connection_id.to_string()) + .trace_err() + { + Some(proto::LiveKitConnectionInfo { + server_url: live_kit.url().into(), + token, + }) + } else { + None + } + } else { + None + }; + + room_updated(&room, &session); + response.send(proto::JoinRoomResponse { + room: Some(room), + live_kit_connection_info, + })?; + + update_user_contacts(session.user_id, &session).await?; + Ok(()) +} + +async fn leave_room(_message: proto::LeaveRoom, session: Session) -> Result<()> { + leave_room_for_session(&session).await +} + +async fn call( + request: proto::Call, + response: Response, + session: Session, +) -> Result<()> { + let room_id = RoomId::from_proto(request.room_id); + let calling_user_id = session.user_id; + let calling_connection_id = session.connection_id; + let called_user_id = UserId::from_proto(request.called_user_id); + let initial_project_id = request.initial_project_id.map(ProjectId::from_proto); + if !session + .db() + .await + .has_contact(calling_user_id, called_user_id) + .await? + { + return Err(anyhow!("cannot call a user who isn't a contact"))?; + } + + let (room, incoming_call) = session + .db() + .await + .call( + room_id, + calling_user_id, + calling_connection_id, + called_user_id, + initial_project_id, + ) + .await?; + room_updated(&room, &session); + update_user_contacts(called_user_id, &session).await?; + + let mut calls = session + .connection_pool() + .await + .user_connection_ids(called_user_id) + .map(|connection_id| session.peer.request(connection_id, incoming_call.clone())) + .collect::>(); + + while let Some(call_response) = calls.next().await { + match call_response.as_ref() { + Ok(_) => { + response.send(proto::Ack {})?; + return Ok(()); + } + Err(_) => { + call_response.trace_err(); + } + } + } + + let room = session + .db() + .await + .call_failed(room_id, called_user_id) + .await?; + room_updated(&room, &session); + update_user_contacts(called_user_id, &session).await?; + + Err(anyhow!("failed to ring user"))? +} + +async fn cancel_call( + request: proto::CancelCall, + response: Response, + session: Session, +) -> Result<()> { + let called_user_id = UserId::from_proto(request.called_user_id); + let room_id = RoomId::from_proto(request.room_id); + let room = session + .db() + .await + .cancel_call(Some(room_id), session.connection_id, called_user_id) + .await?; + for connection_id in session + .connection_pool() + .await + .user_connection_ids(called_user_id) + { + session + .peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); + } + room_updated(&room, &session); + response.send(proto::Ack {})?; + + update_user_contacts(called_user_id, &session).await?; + Ok(()) +} + +async fn decline_call(message: proto::DeclineCall, session: Session) -> Result<()> { + let room_id = RoomId::from_proto(message.room_id); + let room = session + .db() + .await + .decline_call(Some(room_id), session.user_id) + .await?; + for connection_id in session + .connection_pool() + .await + .user_connection_ids(session.user_id) + { + session + .peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); + } + room_updated(&room, &session); + update_user_contacts(session.user_id, &session).await?; + Ok(()) +} + +async fn update_participant_location( + request: proto::UpdateParticipantLocation, + response: Response, + session: Session, +) -> Result<()> { + let room_id = RoomId::from_proto(request.room_id); + let location = request + .location + .ok_or_else(|| anyhow!("invalid location"))?; + let room = session + .db() + .await + .update_room_participant_location(room_id, session.connection_id, location) + .await?; + room_updated(&room, &session); + response.send(proto::Ack {})?; + Ok(()) +} + +async fn share_project( + request: proto::ShareProject, + response: Response, + session: Session, +) -> Result<()> { + let (project_id, room) = session + .db() + .await + .share_project( + RoomId::from_proto(request.room_id), + session.connection_id, + &request.worktrees, + ) + .await?; + response.send(proto::ShareProjectResponse { + project_id: project_id.to_proto(), + })?; + room_updated(&room, &session); + + Ok(()) +} + +async fn unshare_project(message: proto::UnshareProject, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(message.project_id); + + let (room, guest_connection_ids) = session + .db() + .await + .unshare_project(project_id, session.connection_id) + .await?; + + broadcast(session.connection_id, guest_connection_ids, |conn_id| { + session.peer.send(conn_id, message.clone()) + }); + room_updated(&room, &session); + + Ok(()) +} + +async fn join_project( + request: proto::JoinProject, + response: Response, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let guest_user_id = session.user_id; + + tracing::info!(%project_id, "join project"); + + let (project, replica_id) = session + .db() + .await + .join_project(project_id, session.connection_id) + .await?; + + let collaborators = project + .collaborators + .iter() + .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) + .map(|collaborator| proto::Collaborator { + peer_id: collaborator.connection_id as u32, + replica_id: collaborator.replica_id.0 as u32, + user_id: collaborator.user_id.to_proto(), + }) + .collect::>(); + let worktrees = project + .worktrees + .iter() + .map(|(id, worktree)| proto::WorktreeMetadata { + id: id.to_proto(), + root_name: worktree.root_name.clone(), + visible: worktree.visible, + abs_path: worktree.abs_path.clone(), + }) + .collect::>(); + + for collaborator in &collaborators { + session + .peer + .send( + ConnectionId(collaborator.peer_id), + proto::AddProjectCollaborator { + project_id: project_id.to_proto(), + collaborator: Some(proto::Collaborator { + peer_id: session.connection_id.0, + replica_id: replica_id.0 as u32, + user_id: guest_user_id.to_proto(), + }), + }, + ) + .trace_err(); + } + + // First, we send the metadata associated with each worktree. + response.send(proto::JoinProjectResponse { + worktrees: worktrees.clone(), + replica_id: replica_id.0 as u32, + collaborators: collaborators.clone(), + language_servers: project.language_servers.clone(), + })?; + + for (worktree_id, worktree) in project.worktrees { + #[cfg(any(test, feature = "test-support"))] + const MAX_CHUNK_SIZE: usize = 2; + #[cfg(not(any(test, feature = "test-support")))] + const MAX_CHUNK_SIZE: usize = 256; + + // Stream this worktree's entries. + let message = proto::UpdateWorktree { + project_id: project_id.to_proto(), + worktree_id: worktree_id.to_proto(), + abs_path: worktree.abs_path.clone(), + root_name: worktree.root_name, + updated_entries: worktree.entries, + removed_entries: Default::default(), + scan_id: worktree.scan_id, + is_last_update: worktree.is_complete, + }; + for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) { + session.peer.send(session.connection_id, update.clone())?; + } + + // Stream this worktree's diagnostics. + for summary in worktree.diagnostic_summaries { + session.peer.send( + session.connection_id, + proto::UpdateDiagnosticSummary { + project_id: project_id.to_proto(), + worktree_id: worktree.id.to_proto(), + summary: Some(summary), + }, + )?; + } + } + + for language_server in &project.language_servers { + session.peer.send( + session.connection_id, + proto::UpdateLanguageServer { + project_id: project_id.to_proto(), + language_server_id: language_server.id, + variant: Some( + proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated( + proto::LspDiskBasedDiagnosticsUpdated {}, + ), + ), + }, + )?; + } + + Ok(()) +} + +async fn leave_project(request: proto::LeaveProject, session: Session) -> Result<()> { + let sender_id = session.connection_id; + let project_id = ProjectId::from_proto(request.project_id); + let project; + { + project = session + .db() + .await + .leave_project(project_id, sender_id) + .await?; + tracing::info!( + %project_id, + host_user_id = %project.host_user_id, + host_connection_id = %project.host_connection_id, + "leave project" + ); + + broadcast(sender_id, project.connection_ids, |conn_id| { + session.peer.send( + conn_id, + proto::RemoveProjectCollaborator { + project_id: project_id.to_proto(), + peer_id: sender_id.0, + }, + ) + }); + } + + Ok(()) +} + +async fn update_project( + request: proto::UpdateProject, + response: Response, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let (room, guest_connection_ids) = session + .db() + .await + .update_project(project_id, session.connection_id, &request.worktrees) + .await?; + broadcast( + session.connection_id, + guest_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + room_updated(&room, &session); + response.send(proto::Ack {})?; + + Ok(()) +} + +async fn update_worktree( + request: proto::UpdateWorktree, + response: Response, + session: Session, +) -> Result<()> { + let guest_connection_ids = session + .db() + .await + .update_worktree(&request, session.connection_id) + .await?; + + broadcast( + session.connection_id, + guest_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + response.send(proto::Ack {})?; + Ok(()) +} + +async fn update_diagnostic_summary( + request: proto::UpdateDiagnosticSummary, + response: Response, + session: Session, +) -> Result<()> { + let guest_connection_ids = session + .db() + .await + .update_diagnostic_summary(&request, session.connection_id) + .await?; + + broadcast( + session.connection_id, + guest_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + + response.send(proto::Ack {})?; + Ok(()) +} + +async fn start_language_server( + request: proto::StartLanguageServer, + session: Session, +) -> Result<()> { + let guest_connection_ids = session + .db() + .await + .start_language_server(&request, session.connection_id) + .await?; + + broadcast( + session.connection_id, + guest_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn update_language_server( + request: proto::UpdateLanguageServer, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn forward_project_request( + request: T, + response: Response, + session: Session, +) -> Result<()> +where + T: EntityMessage + RequestMessage, +{ + let project_id = ProjectId::from_proto(request.remote_entity_id()); + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + + let payload = session + .peer + .forward_request( + session.connection_id, + ConnectionId(host.connection_id as u32), + request, + ) + .await?; + + response.send(payload)?; + Ok(()) +} + +async fn save_buffer( + request: proto::SaveBuffer, + response: Response, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + let host = collaborators + .into_iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + let host_connection_id = ConnectionId(host.connection_id as u32); + let response_payload = session + .peer + .forward_request(session.connection_id, host_connection_id, request.clone()) + .await?; + + let mut collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + collaborators + .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); + let project_connection_ids = collaborators + .into_iter() + .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); + broadcast(host_connection_id, project_connection_ids, |conn_id| { + session + .peer + .forward_send(host_connection_id, conn_id, response_payload.clone()) + }); + response.send(response_payload)?; + Ok(()) +} + +async fn create_buffer_for_peer( + request: proto::CreateBufferForPeer, + session: Session, +) -> Result<()> { + session.peer.forward_send( + session.connection_id, + ConnectionId(request.peer_id), + request, + )?; + Ok(()) +} + +async fn update_buffer( + request: proto::UpdateBuffer, + response: Response, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + response.send(proto::Ack {})?; + Ok(()) +} + +async fn update_buffer_file(request: proto::UpdateBufferFile, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn buffer_reloaded(request: proto::BufferReloaded, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn buffer_saved(request: proto::BufferSaved, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn follow( + request: proto::Follow, + response: Response, + session: Session, +) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let leader_id = ConnectionId(request.leader_id); + let follower_id = session.connection_id; + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + + if !project_connection_ids.contains(&leader_id) { + Err(anyhow!("no such peer"))?; + } + + let mut response_payload = session + .peer + .forward_request(session.connection_id, leader_id, request) + .await?; + response_payload + .views + .retain(|view| view.leader_id != Some(follower_id.0)); + response.send(response_payload)?; + Ok(()) +} + +async fn unfollow(request: proto::Unfollow, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let leader_id = ConnectionId(request.leader_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + if !project_connection_ids.contains(&leader_id) { + Err(anyhow!("no such peer"))?; + } + session + .peer + .forward_send(session.connection_id, leader_id, request)?; + Ok(()) +} + +async fn update_followers(request: proto::UpdateFollowers, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db + .lock() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + + let leader_id = request.variant.as_ref().and_then(|variant| match variant { + proto::update_followers::Variant::CreateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, + }); + for follower_id in &request.follower_ids { + let follower_id = ConnectionId(*follower_id); + if project_connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { + session + .peer + .forward_send(session.connection_id, follower_id, request.clone())?; + } + } + Ok(()) +} + +async fn get_users( + request: proto::GetUsers, + response: Response, + session: Session, +) -> Result<()> { + let user_ids = request + .user_ids + .into_iter() + .map(UserId::from_proto) + .collect(); + let users = session + .db() + .await + .get_users_by_ids(user_ids) + .await? + .into_iter() + .map(|user| proto::User { + id: user.id.to_proto(), + avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), + github_login: user.github_login, + }) + .collect(); + response.send(proto::UsersResponse { users })?; + Ok(()) +} + +async fn fuzzy_search_users( + request: proto::FuzzySearchUsers, + response: Response, + session: Session, +) -> Result<()> { + let query = request.query; + let users = match query.len() { + 0 => vec![], + 1 | 2 => session + .db() + .await + .get_user_by_github_account(&query, None) + .await? + .into_iter() + .collect(), + _ => session.db().await.fuzzy_search_users(&query, 10).await?, + }; + let users = users + .into_iter() + .filter(|user| user.id != session.user_id) + .map(|user| proto::User { + id: user.id.to_proto(), + avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), + github_login: user.github_login, + }) + .collect(); + response.send(proto::UsersResponse { users })?; + Ok(()) +} + +async fn request_contact( + request: proto::RequestContact, + response: Response, + session: Session, +) -> Result<()> { + let requester_id = session.user_id; + let responder_id = UserId::from_proto(request.responder_id); + if requester_id == responder_id { + return Err(anyhow!("cannot add yourself as a contact"))?; + } + + session + .db() + .await + .send_contact_request(requester_id, responder_id) + .await?; + + // Update outgoing contact requests of requester + let mut update = proto::UpdateContacts::default(); + update.outgoing_requests.push(responder_id.to_proto()); + for connection_id in session + .connection_pool() + .await + .user_connection_ids(requester_id) + { + session.peer.send(connection_id, update.clone())?; + } + + // Update incoming contact requests of responder + let mut update = proto::UpdateContacts::default(); + update + .incoming_requests + .push(proto::IncomingContactRequest { + requester_id: requester_id.to_proto(), + should_notify: true, + }); + for connection_id in session + .connection_pool() + .await + .user_connection_ids(responder_id) + { + session.peer.send(connection_id, update.clone())?; + } + + response.send(proto::Ack {})?; + Ok(()) +} + +async fn respond_to_contact_request( + request: proto::RespondToContactRequest, + response: Response, + session: Session, +) -> Result<()> { + let responder_id = session.user_id; + let requester_id = UserId::from_proto(request.requester_id); + let db = session.db().await; + if request.response == proto::ContactRequestResponse::Dismiss as i32 { + db.dismiss_contact_notification(responder_id, requester_id) + .await?; + } else { + let accept = request.response == proto::ContactRequestResponse::Accept as i32; + + db.respond_to_contact_request(responder_id, requester_id, accept) + .await?; + let busy = db.is_user_busy(requester_id).await?; + + let pool = session.connection_pool().await; + // Update responder with new contact + let mut update = proto::UpdateContacts::default(); + if accept { + update + .contacts + .push(contact_for_user(requester_id, false, busy, &pool)); + } + update + .remove_incoming_requests + .push(requester_id.to_proto()); + for connection_id in pool.user_connection_ids(responder_id) { + session.peer.send(connection_id, update.clone())?; + } + + // Update requester with new contact + let mut update = proto::UpdateContacts::default(); + if accept { + update + .contacts + .push(contact_for_user(responder_id, true, busy, &pool)); + } + update + .remove_outgoing_requests + .push(responder_id.to_proto()); + for connection_id in pool.user_connection_ids(requester_id) { + session.peer.send(connection_id, update.clone())?; + } + } + + response.send(proto::Ack {})?; + Ok(()) +} + +async fn remove_contact( + request: proto::RemoveContact, + response: Response, + session: Session, +) -> Result<()> { + let requester_id = session.user_id; + let responder_id = UserId::from_proto(request.user_id); + let db = session.db().await; + db.remove_contact(requester_id, responder_id).await?; + + let pool = session.connection_pool().await; + // Update outgoing contact requests of requester + let mut update = proto::UpdateContacts::default(); + update + .remove_outgoing_requests + .push(responder_id.to_proto()); + for connection_id in pool.user_connection_ids(requester_id) { + session.peer.send(connection_id, update.clone())?; + } + + // Update incoming contact requests of responder + let mut update = proto::UpdateContacts::default(); + update + .remove_incoming_requests + .push(requester_id.to_proto()); + for connection_id in pool.user_connection_ids(responder_id) { + session.peer.send(connection_id, update.clone())?; + } + + response.send(proto::Ack {})?; + Ok(()) +} + +async fn update_diff_base(request: proto::UpdateDiffBase, session: Session) -> Result<()> { + let project_id = ProjectId::from_proto(request.project_id); + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; + broadcast( + session.connection_id, + project_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); + Ok(()) +} + +async fn get_private_user_info( + _request: proto::GetPrivateUserInfo, + response: Response, + session: Session, +) -> Result<()> { + let metrics_id = session + .db() + .await + .get_user_metrics_id(session.user_id) + .await?; + let user = session + .db() + .await + .get_user_by_id(session.user_id) + .await? + .ok_or_else(|| anyhow!("user not found"))?; + response.send(proto::GetPrivateUserInfoResponse { + metrics_id, + staff: user.admin, + })?; + Ok(()) +} + fn to_axum_message(message: TungsteniteMessage) -> AxumMessage { match message { TungsteniteMessage::Text(payload) => AxumMessage::Text(payload), @@ -1941,6 +1775,137 @@ fn contact_for_user( } } +fn room_updated(room: &proto::Room, session: &Session) { + for participant in &room.participants { + session + .peer + .send( + ConnectionId(participant.peer_id), + proto::RoomUpdated { + room: Some(room.clone()), + }, + ) + .trace_err(); + } +} + +async fn update_user_contacts(user_id: UserId, session: &Session) -> Result<()> { + let db = session.db().await; + let contacts = db.get_contacts(user_id).await?; + let busy = db.is_user_busy(user_id).await?; + + let pool = session.connection_pool().await; + let updated_contact = contact_for_user(user_id, false, busy, &pool); + for contact in contacts { + if let db::Contact::Accepted { + user_id: contact_user_id, + .. + } = contact + { + for contact_conn_id in pool.user_connection_ids(contact_user_id) { + session + .peer + .send( + contact_conn_id, + proto::UpdateContacts { + contacts: vec![updated_contact.clone()], + remove_contacts: Default::default(), + incoming_requests: Default::default(), + remove_incoming_requests: Default::default(), + outgoing_requests: Default::default(), + remove_outgoing_requests: Default::default(), + }, + ) + .trace_err(); + } + } + } + Ok(()) +} + +async fn leave_room_for_session(session: &Session) -> Result<()> { + let mut contacts_to_update = HashSet::default(); + + let Some(left_room) = session.db().await.leave_room(session.connection_id).await? else { + return Err(anyhow!("no room to leave"))?; + }; + contacts_to_update.insert(session.user_id); + + for project in left_room.left_projects.into_values() { + for connection_id in project.connection_ids { + if project.host_user_id == session.user_id { + session + .peer + .send( + connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); + } else { + session + .peer + .send( + connection_id, + proto::RemoveProjectCollaborator { + project_id: project.id.to_proto(), + peer_id: session.connection_id.0, + }, + ) + .trace_err(); + } + } + + session + .peer + .send( + session.connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); + } + + room_updated(&left_room.room, &session); + { + let pool = session.connection_pool().await; + for canceled_user_id in left_room.canceled_calls_to_user_ids { + for connection_id in pool.user_connection_ids(canceled_user_id) { + session + .peer + .send(connection_id, proto::CallCanceled {}) + .trace_err(); + } + contacts_to_update.insert(canceled_user_id); + } + } + + for contact_user_id in contacts_to_update { + update_user_contacts(contact_user_id, &session).await?; + } + + if let Some(live_kit) = session.live_kit_client.as_ref() { + live_kit + .remove_participant( + left_room.room.live_kit_room.clone(), + session.connection_id.to_string(), + ) + .await + .trace_err(); + + if left_room.room.participants.is_empty() { + live_kit + .delete_room(left_room.room.live_kit_room) + .await + .trace_err(); + } + } + + Ok(()) +} + pub trait ResultExt { type Ok; From 4c1b4953c17b48c19b57d6e9eb0247059f5de85f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 18 Nov 2022 20:18:48 +0100 Subject: [PATCH 058/240] Remove version from `Room` We won't need it once we add the per-room lock. --- crates/call/src/room.rs | 10 --- .../20221109000000_test_schema.sql | 1 - .../20221111092550_reconnection_support.sql | 1 - crates/collab/src/db.rs | 71 +++++++++---------- crates/rpc/proto/zed.proto | 7 +- 5 files changed, 37 insertions(+), 53 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 8c1b0d9de0..f8a55a3a93 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -34,7 +34,6 @@ pub enum Event { pub struct Room { id: u64, - version: u64, live_kit: Option, status: RoomStatus, local_participant: LocalParticipant, @@ -62,7 +61,6 @@ impl Entity for Room { impl Room { fn new( id: u64, - version: u64, live_kit_connection_info: Option, client: Arc, user_store: ModelHandle, @@ -135,7 +133,6 @@ impl Room { Self { id, - version, live_kit: live_kit_room, status: RoomStatus::Online, participant_user_ids: Default::default(), @@ -164,7 +161,6 @@ impl Room { let room = cx.add_model(|cx| { Self::new( room_proto.id, - room_proto.version, response.live_kit_connection_info, client, user_store, @@ -209,7 +205,6 @@ impl Room { let room = cx.add_model(|cx| { Self::new( room_id, - 0, response.live_kit_connection_info, client, user_store, @@ -321,10 +316,6 @@ impl Room { futures::join!(remote_participants, pending_participants); this.update(&mut cx, |this, cx| { - if this.version >= room.version { - return; - } - this.participant_user_ids.clear(); if let Some(participant) = local_participant { @@ -429,7 +420,6 @@ impl Room { let _ = this.leave(cx); } - this.version = room.version; this.check_invariants(); cx.notify(); }); diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 66925fddd5..02ca0c75a9 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -36,7 +36,6 @@ CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b"); CREATE TABLE "rooms" ( "id" INTEGER PRIMARY KEY, - "version" INTEGER NOT NULL, "live_kit_room" VARCHAR NOT NULL ); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 2b8f7824cb..b742f8e0cd 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -1,6 +1,5 @@ CREATE TABLE IF NOT EXISTS "rooms" ( "id" SERIAL PRIMARY KEY, - "version" INTEGER NOT NULL, "live_kit_room" VARCHAR NOT NULL ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6cb5373881..54d4497833 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -931,13 +931,12 @@ where let live_kit_room = nanoid::nanoid!(30); let room_id = sqlx::query_scalar( " - INSERT INTO rooms (live_kit_room, version) - VALUES ($1, $2) + INSERT INTO rooms (live_kit_room) + VALUES ($1) RETURNING id ", ) .bind(&live_kit_room) - .bind(0) .fetch_one(&mut tx) .await .map(RoomId)?; @@ -956,7 +955,9 @@ where .execute(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }).await } @@ -983,7 +984,9 @@ where .execute(&mut tx) .await?; - let room = self.commit_room_transaction(room_id, tx).await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + let incoming_call = Self::build_incoming_call(&room, called_user_id) .ok_or_else(|| anyhow!("failed to build incoming call"))?; Ok((room, incoming_call)) @@ -1061,7 +1064,9 @@ where .execute(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }) .await } @@ -1086,7 +1091,9 @@ where return Err(anyhow!("declining call on unexpected room"))?; } - self.commit_room_transaction(room_id, tx).await + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }) .await } @@ -1113,7 +1120,9 @@ where return Err(anyhow!("canceling call on unexpected room"))?; } - self.commit_room_transaction(room_id, tx).await + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }).await } @@ -1137,7 +1146,10 @@ where .bind(user_id) .fetch_one(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await + + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }) .await } @@ -1245,7 +1257,9 @@ where .execute(&mut tx) .await?; - let room = self.commit_room_transaction(room_id, tx).await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(Some(LeftRoom { room, left_projects, @@ -1302,32 +1316,13 @@ where .fetch_one(&mut tx) .await?; - self.commit_room_transaction(room_id, tx).await + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok(room) }) .await } - async fn commit_room_transaction( - &self, - room_id: RoomId, - mut tx: sqlx::Transaction<'_, D>, - ) -> Result { - sqlx::query( - " - UPDATE rooms - SET version = version + 1 - WHERE id = $1 - ", - ) - .bind(room_id) - .execute(&mut tx) - .await?; - let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - - Ok(room) - } - async fn get_guest_connection_ids( &self, project_id: ProjectId, @@ -1455,7 +1450,6 @@ where Ok(proto::Room { id: room.id.to_proto(), - version: room.version as u64, live_kit_room: room.live_kit_room, participants: participants.into_values().collect(), pending_participants, @@ -1565,7 +1559,9 @@ where .execute(&mut tx) .await?; - let room = self.commit_room_transaction(room_id, tx).await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; + Ok((project_id, room)) }) .await @@ -1589,7 +1585,8 @@ where .bind(connection_id.0 as i32) .fetch_one(&mut tx) .await?; - let room = self.commit_room_transaction(room_id, tx).await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; Ok((room, guest_connection_ids)) }) @@ -1666,7 +1663,8 @@ where query.execute(&mut tx).await?; let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - let room = self.commit_room_transaction(room_id, tx).await?; + let room = self.get_room(room_id, &mut tx).await?; + tx.commit().await?; Ok((room, guest_connection_ids)) }) @@ -2614,7 +2612,6 @@ id_type!(RoomId); #[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] pub struct Room { pub id: RoomId, - pub version: i32, pub live_kit_room: String, } diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 30c1c89e8f..6f26e0dfa1 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -160,10 +160,9 @@ message LeaveRoom {} message Room { uint64 id = 1; - uint64 version = 2; - repeated Participant participants = 3; - repeated PendingParticipant pending_participants = 4; - string live_kit_room = 5; + repeated Participant participants = 2; + repeated PendingParticipant pending_participants = 3; + string live_kit_room = 4; } message Participant { From ae11e4f798f8a0af13f4bd46bf32ddd33602cd3a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 13:56:03 +0100 Subject: [PATCH 059/240] Check the correct serialization failure code when retrying transaction --- crates/collab/src/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 54d4497833..295234af61 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2520,7 +2520,7 @@ where .as_database_error() .and_then(|error| error.code()) .as_deref() - == Some("hey") => + == Some("40001") => { // Retry (don't break the loop) } From b0e1d6bc7f5cd6986ab1666639d866207d72ee44 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 13:57:15 +0100 Subject: [PATCH 060/240] Fix integration test incorrectly assuming a certain ordering --- crates/collab/src/integration_tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 006598a6b1..cf6bb8af3a 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -2422,7 +2422,10 @@ async fn test_collaborating_with_diagnostics( // Join project as client C and observe the diagnostics. let project_c = client_c.build_remote_project(project_id, cx_c).await; - let project_c_diagnostic_summaries = Rc::new(RefCell::new(Vec::new())); + let project_c_diagnostic_summaries = + Rc::new(RefCell::new(project_c.read_with(cx_c, |project, cx| { + project.diagnostic_summaries(cx).collect::>() + }))); project_c.update(cx_c, |_, cx| { let summaries = project_c_diagnostic_summaries.clone(); cx.subscribe(&project_c, { From 5581674f8f4a8b256d986f20e0ddb4c1d84bc0af Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 14:39:27 +0100 Subject: [PATCH 061/240] After completing LSP request, return an error if guest is disconnected --- crates/project/src/project.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 503ae8d4b2..30b0ac2506 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4109,9 +4109,13 @@ impl Project { let message = request.to_proto(project_id, buffer); return cx.spawn(|this, cx| async move { let response = rpc.request(message).await?; - request - .response_from_proto(response, this, buffer_handle, cx) - .await + if this.read_with(&cx, |this, _| this.is_read_only()) { + Err(anyhow!("disconnected before completing request")) + } else { + request + .response_from_proto(response, this, buffer_handle, cx) + .await + } }); } Task::ready(Ok(Default::default())) From 2a0ddd99d28ab53d0e5df72145584f9a8949a48f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 15:05:34 +0100 Subject: [PATCH 062/240] Error if project is disconnected after getting code actions response --- crates/project/src/project.rs | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 30b0ac2506..fb77da9347 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -3579,7 +3579,7 @@ impl Project { } else if let Some(project_id) = self.remote_id() { let rpc = self.client.clone(); let version = buffer.version(); - cx.spawn_weak(|_, mut cx| async move { + cx.spawn_weak(|this, mut cx| async move { let response = rpc .request(proto::GetCodeActions { project_id, @@ -3590,17 +3590,27 @@ impl Project { }) .await?; - buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) - }) - .await; + if this + .upgrade(&cx) + .ok_or_else(|| anyhow!("project was dropped"))? + .read_with(&cx, |this, _| this.is_read_only()) + { + return Err(anyhow!( + "failed to get code actions: project was disconnected" + )); + } else { + buffer_handle + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(response.version)) + }) + .await; - response - .actions - .into_iter() - .map(language::proto::deserialize_code_action) - .collect() + response + .actions + .into_iter() + .map(language::proto::deserialize_code_action) + .collect() + } }) } else { Task::ready(Ok(Default::default())) From cd0b663f6285f24f74a6445bc870b2e94ab610cd Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 17:00:47 +0100 Subject: [PATCH 063/240] Introduce per-room lock acquired before committing a transaction --- Cargo.lock | 14 ++ crates/collab/Cargo.toml | 1 + crates/collab/src/db.rs | 254 ++++++++++++++++++++++------------- crates/collab/src/rpc.rs | 281 +++++++++++++++++++++------------------ 4 files changed, 327 insertions(+), 223 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6f86980ae..8cd5e7d6d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1041,6 +1041,7 @@ dependencies = [ "client", "collections", "ctor", + "dashmap", "editor", "env_logger", "envy", @@ -1536,6 +1537,19 @@ dependencies = [ "syn", ] +[[package]] +name = "dashmap" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown 0.12.3", + "lock_api", + "once_cell", + "parking_lot_core 0.9.4", +] + [[package]] name = "data-url" version = "0.1.1" diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index f04918605f..e5a97b9764 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -24,6 +24,7 @@ axum = { version = "0.5", features = ["json", "headers", "ws"] } axum-extra = { version = "0.3", features = ["erased-json"] } base64 = "0.13" clap = { version = "3.1", features = ["derive"], optional = true } +dashmap = "5.4" envy = "0.4.2" futures = "0.3" hyper = "0.14" diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 295234af61..84ad5082d0 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2,6 +2,7 @@ use crate::{Error, Result}; use anyhow::anyhow; use axum::http::StatusCode; use collections::{BTreeMap, HashMap, HashSet}; +use dashmap::DashMap; use futures::{future::BoxFuture, FutureExt, StreamExt}; use rpc::{proto, ConnectionId}; use serde::{Deserialize, Serialize}; @@ -10,8 +11,17 @@ use sqlx::{ types::Uuid, FromRow, }; -use std::{future::Future, path::Path, time::Duration}; +use std::{ + future::Future, + marker::PhantomData, + ops::{Deref, DerefMut}, + path::Path, + rc::Rc, + sync::Arc, + time::Duration, +}; use time::{OffsetDateTime, PrimitiveDateTime}; +use tokio::sync::{Mutex, OwnedMutexGuard}; #[cfg(test)] pub type DefaultDb = Db; @@ -21,12 +31,33 @@ pub type DefaultDb = Db; pub struct Db { pool: sqlx::Pool, + rooms: DashMap>>, #[cfg(test)] background: Option>, #[cfg(test)] runtime: Option, } +pub struct RoomGuard { + data: T, + _guard: OwnedMutexGuard<()>, + _not_send: PhantomData>, +} + +impl Deref for RoomGuard { + type Target = T; + + fn deref(&self) -> &T { + &self.data + } +} + +impl DerefMut for RoomGuard { + fn deref_mut(&mut self) -> &mut T { + &mut self.data + } +} + pub trait BeginTransaction: Send + Sync { type Database: sqlx::Database; @@ -90,6 +121,7 @@ impl Db { .await?; Ok(Self { pool, + rooms: Default::default(), background: None, runtime: None, }) @@ -197,6 +229,7 @@ impl Db { .await?; Ok(Self { pool, + rooms: DashMap::with_capacity(16384), #[cfg(test)] background: None, #[cfg(test)] @@ -922,13 +955,29 @@ where .await } + async fn commit_room_transaction<'a, T>( + &'a self, + room_id: RoomId, + tx: sqlx::Transaction<'static, D>, + data: T, + ) -> Result> { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + tx.commit().await?; + Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, + }) + } + pub async fn create_room( &self, user_id: UserId, connection_id: ConnectionId, - ) -> Result { + live_kit_room: &str, + ) -> Result> { self.transact(|mut tx| async move { - let live_kit_room = nanoid::nanoid!(30); let room_id = sqlx::query_scalar( " INSERT INTO rooms (live_kit_room) @@ -956,8 +1005,7 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }).await } @@ -968,11 +1016,17 @@ where calling_connection_id: ConnectionId, called_user_id: UserId, initial_project_id: Option, - ) -> Result<(proto::Room, proto::IncomingCall)> { + ) -> Result> { self.transact(|mut tx| async move { sqlx::query( " - INSERT INTO room_participants (room_id, user_id, calling_user_id, calling_connection_id, initial_project_id) + INSERT INTO room_participants ( + room_id, + user_id, + calling_user_id, + calling_connection_id, + initial_project_id + ) VALUES ($1, $2, $3, $4, $5) ", ) @@ -985,12 +1039,12 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - let incoming_call = Self::build_incoming_call(&room, called_user_id) .ok_or_else(|| anyhow!("failed to build incoming call"))?; - Ok((room, incoming_call)) - }).await + self.commit_room_transaction(room_id, tx, (room, incoming_call)) + .await + }) + .await } pub async fn incoming_call_for_user( @@ -1051,7 +1105,7 @@ where &self, room_id: RoomId, called_user_id: UserId, - ) -> Result { + ) -> Result> { self.transact(|mut tx| async move { sqlx::query( " @@ -1065,8 +1119,7 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1075,7 +1128,7 @@ where &self, expected_room_id: Option, user_id: UserId, - ) -> Result { + ) -> Result> { self.transact(|mut tx| async move { let room_id = sqlx::query_scalar( " @@ -1092,8 +1145,7 @@ where } let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1103,7 +1155,7 @@ where expected_room_id: Option, calling_connection_id: ConnectionId, called_user_id: UserId, - ) -> Result { + ) -> Result> { self.transact(|mut tx| async move { let room_id = sqlx::query_scalar( " @@ -1121,8 +1173,7 @@ where } let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }).await } @@ -1131,7 +1182,7 @@ where room_id: RoomId, user_id: UserId, connection_id: ConnectionId, - ) -> Result { + ) -> Result> { self.transact(|mut tx| async move { sqlx::query( " @@ -1148,13 +1199,15 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }) .await } - pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { + pub async fn leave_room( + &self, + connection_id: ConnectionId, + ) -> Result>> { self.transact(|mut tx| async move { // Leave room. let room_id = sqlx::query_scalar::<_, RoomId>( @@ -1258,13 +1311,18 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - - Ok(Some(LeftRoom { - room, - left_projects, - canceled_calls_to_user_ids, - })) + Ok(Some( + self.commit_room_transaction( + room_id, + tx, + LeftRoom { + room, + left_projects, + canceled_calls_to_user_ids, + }, + ) + .await?, + )) } else { Ok(None) } @@ -1277,7 +1335,7 @@ where room_id: RoomId, connection_id: ConnectionId, location: proto::ParticipantLocation, - ) -> Result { + ) -> Result> { self.transact(|tx| async { let mut tx = tx; let location_kind; @@ -1317,8 +1375,7 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - Ok(room) + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1478,7 +1535,7 @@ where expected_room_id: RoomId, connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], - ) -> Result<(ProjectId, proto::Room)> { + ) -> Result> { self.transact(|mut tx| async move { let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( " @@ -1560,9 +1617,8 @@ where .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - - Ok((project_id, room)) + self.commit_room_transaction(room_id, tx, (project_id, room)) + .await }) .await } @@ -1571,7 +1627,7 @@ where &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result<(proto::Room, Vec)> { + ) -> Result)>> { self.transact(|mut tx| async move { let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; let room_id: RoomId = sqlx::query_scalar( @@ -1586,9 +1642,8 @@ where .fetch_one(&mut tx) .await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - - Ok((room, guest_connection_ids)) + self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) + .await }) .await } @@ -1598,7 +1653,7 @@ where project_id: ProjectId, connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], - ) -> Result<(proto::Room, Vec)> { + ) -> Result)>> { self.transact(|mut tx| async move { let room_id: RoomId = sqlx::query_scalar( " @@ -1664,9 +1719,8 @@ where let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; let room = self.get_room(room_id, &mut tx).await?; - tx.commit().await?; - - Ok((room, guest_connection_ids)) + self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) + .await }) .await } @@ -1675,15 +1729,15 @@ where &self, update: &proto::UpdateWorktree, connection_id: ConnectionId, - ) -> Result> { + ) -> Result>> { self.transact(|mut tx| async move { let project_id = ProjectId::from_proto(update.project_id); let worktree_id = WorktreeId::from_proto(update.worktree_id); // Ensure the update comes from the host. - sqlx::query( + let room_id: RoomId = sqlx::query_scalar( " - SELECT 1 + SELECT room_id FROM projects WHERE id = $1 AND host_connection_id = $2 ", @@ -1781,8 +1835,8 @@ where } let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - tx.commit().await?; - Ok(connection_ids) + self.commit_room_transaction(room_id, tx, connection_ids) + .await }) .await } @@ -1791,7 +1845,7 @@ where &self, update: &proto::UpdateDiagnosticSummary, connection_id: ConnectionId, - ) -> Result> { + ) -> Result>> { self.transact(|mut tx| async { let project_id = ProjectId::from_proto(update.project_id); let worktree_id = WorktreeId::from_proto(update.worktree_id); @@ -1801,9 +1855,9 @@ where .ok_or_else(|| anyhow!("invalid summary"))?; // Ensure the update comes from the host. - sqlx::query( + let room_id: RoomId = sqlx::query_scalar( " - SELECT 1 + SELECT room_id FROM projects WHERE id = $1 AND host_connection_id = $2 ", @@ -1841,8 +1895,8 @@ where .await?; let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - tx.commit().await?; - Ok(connection_ids) + self.commit_room_transaction(room_id, tx, connection_ids) + .await }) .await } @@ -1851,7 +1905,7 @@ where &self, update: &proto::StartLanguageServer, connection_id: ConnectionId, - ) -> Result> { + ) -> Result>> { self.transact(|mut tx| async { let project_id = ProjectId::from_proto(update.project_id); let server = update @@ -1860,9 +1914,9 @@ where .ok_or_else(|| anyhow!("invalid language server"))?; // Ensure the update comes from the host. - sqlx::query( + let room_id: RoomId = sqlx::query_scalar( " - SELECT 1 + SELECT room_id FROM projects WHERE id = $1 AND host_connection_id = $2 ", @@ -1888,8 +1942,8 @@ where .await?; let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - tx.commit().await?; - Ok(connection_ids) + self.commit_room_transaction(room_id, tx, connection_ids) + .await }) .await } @@ -1898,7 +1952,7 @@ where &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result<(Project, ReplicaId)> { + ) -> Result> { self.transact(|mut tx| async move { let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( " @@ -2068,21 +2122,25 @@ where .fetch_all(&mut tx) .await?; - tx.commit().await?; - Ok(( - Project { - collaborators, - worktrees, - language_servers: language_servers - .into_iter() - .map(|language_server| proto::LanguageServer { - id: language_server.id.to_proto(), - name: language_server.name, - }) - .collect(), - }, - replica_id as ReplicaId, - )) + self.commit_room_transaction( + room_id, + tx, + ( + Project { + collaborators, + worktrees, + language_servers: language_servers + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id.to_proto(), + name: language_server.name, + }) + .collect(), + }, + replica_id as ReplicaId, + ), + ) + .await }) .await } @@ -2091,7 +2149,7 @@ where &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result { + ) -> Result> { self.transact(|mut tx| async move { let result = sqlx::query( " @@ -2122,25 +2180,29 @@ where .map(|id| ConnectionId(id as u32)) .collect(); - let (host_user_id, host_connection_id) = sqlx::query_as::<_, (i32, i32)>( - " - SELECT host_user_id, host_connection_id + let (room_id, host_user_id, host_connection_id) = + sqlx::query_as::<_, (RoomId, i32, i32)>( + " + SELECT room_id, host_user_id, host_connection_id FROM projects WHERE id = $1 ", + ) + .bind(project_id) + .fetch_one(&mut tx) + .await?; + + self.commit_room_transaction( + room_id, + tx, + LeftProject { + id: project_id, + host_user_id: UserId(host_user_id), + host_connection_id: ConnectionId(host_connection_id as u32), + connection_ids, + }, ) - .bind(project_id) - .fetch_one(&mut tx) - .await?; - - tx.commit().await?; - - Ok(LeftProject { - id: project_id, - host_user_id: UserId(host_user_id), - host_connection_id: ConnectionId(host_connection_id as u32), - connection_ids, - }) + .await }) .await } @@ -2538,9 +2600,9 @@ where let result = self.runtime.as_ref().unwrap().block_on(body); - if let Some(background) = self.background.as_ref() { - background.simulate_random_delay().await; - } + // if let Some(background) = self.background.as_ref() { + // background.simulate_random_delay().await; + // } result } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index ba97b09acd..07b9891480 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -42,6 +42,7 @@ use std::{ fmt, future::Future, marker::PhantomData, + mem, net::SocketAddr, ops::{Deref, DerefMut}, rc::Rc, @@ -702,20 +703,15 @@ async fn create_room( response: Response, session: Session, ) -> Result<()> { - let room = session - .db() - .await - .create_room(session.user_id, session.connection_id) - .await?; - + let live_kit_room = nanoid::nanoid!(30); let live_kit_connection_info = if let Some(live_kit) = session.live_kit_client.as_ref() { if let Some(_) = live_kit - .create_room(room.live_kit_room.clone()) + .create_room(live_kit_room.clone()) .await .trace_err() { if let Some(token) = live_kit - .room_token(&room.live_kit_room, &session.connection_id.to_string()) + .room_token(&live_kit_room, &session.connection_id.to_string()) .trace_err() { Some(proto::LiveKitConnectionInfo { @@ -732,10 +728,19 @@ async fn create_room( None }; - response.send(proto::CreateRoomResponse { - room: Some(room), - live_kit_connection_info, - })?; + { + let room = session + .db() + .await + .create_room(session.user_id, session.connection_id, &live_kit_room) + .await?; + + response.send(proto::CreateRoomResponse { + room: Some(room.clone()), + live_kit_connection_info, + })?; + } + update_user_contacts(session.user_id, &session).await?; Ok(()) } @@ -745,15 +750,20 @@ async fn join_room( response: Response, session: Session, ) -> Result<()> { - let room = session - .db() - .await - .join_room( - RoomId::from_proto(request.id), - session.user_id, - session.connection_id, - ) - .await?; + let room = { + let room = session + .db() + .await + .join_room( + RoomId::from_proto(request.id), + session.user_id, + session.connection_id, + ) + .await?; + room_updated(&room, &session); + room.clone() + }; + for connection_id in session .connection_pool() .await @@ -781,7 +791,6 @@ async fn join_room( None }; - room_updated(&room, &session); response.send(proto::JoinRoomResponse { room: Some(room), live_kit_connection_info, @@ -814,18 +823,21 @@ async fn call( return Err(anyhow!("cannot call a user who isn't a contact"))?; } - let (room, incoming_call) = session - .db() - .await - .call( - room_id, - calling_user_id, - calling_connection_id, - called_user_id, - initial_project_id, - ) - .await?; - room_updated(&room, &session); + let incoming_call = { + let (room, incoming_call) = &mut *session + .db() + .await + .call( + room_id, + calling_user_id, + calling_connection_id, + called_user_id, + initial_project_id, + ) + .await?; + room_updated(&room, &session); + mem::take(incoming_call) + }; update_user_contacts(called_user_id, &session).await?; let mut calls = session @@ -847,12 +859,14 @@ async fn call( } } - let room = session - .db() - .await - .call_failed(room_id, called_user_id) - .await?; - room_updated(&room, &session); + { + let room = session + .db() + .await + .call_failed(room_id, called_user_id) + .await?; + room_updated(&room, &session); + } update_user_contacts(called_user_id, &session).await?; Err(anyhow!("failed to ring user"))? @@ -865,11 +879,15 @@ async fn cancel_call( ) -> Result<()> { let called_user_id = UserId::from_proto(request.called_user_id); let room_id = RoomId::from_proto(request.room_id); - let room = session - .db() - .await - .cancel_call(Some(room_id), session.connection_id, called_user_id) - .await?; + { + let room = session + .db() + .await + .cancel_call(Some(room_id), session.connection_id, called_user_id) + .await?; + room_updated(&room, &session); + } + for connection_id in session .connection_pool() .await @@ -880,7 +898,6 @@ async fn cancel_call( .send(connection_id, proto::CallCanceled {}) .trace_err(); } - room_updated(&room, &session); response.send(proto::Ack {})?; update_user_contacts(called_user_id, &session).await?; @@ -889,11 +906,15 @@ async fn cancel_call( async fn decline_call(message: proto::DeclineCall, session: Session) -> Result<()> { let room_id = RoomId::from_proto(message.room_id); - let room = session - .db() - .await - .decline_call(Some(room_id), session.user_id) - .await?; + { + let room = session + .db() + .await + .decline_call(Some(room_id), session.user_id) + .await?; + room_updated(&room, &session); + } + for connection_id in session .connection_pool() .await @@ -904,7 +925,6 @@ async fn decline_call(message: proto::DeclineCall, session: Session) -> Result<( .send(connection_id, proto::CallCanceled {}) .trace_err(); } - room_updated(&room, &session); update_user_contacts(session.user_id, &session).await?; Ok(()) } @@ -933,7 +953,7 @@ async fn share_project( response: Response, session: Session, ) -> Result<()> { - let (project_id, room) = session + let (project_id, room) = &*session .db() .await .share_project( @@ -953,15 +973,17 @@ async fn share_project( async fn unshare_project(message: proto::UnshareProject, session: Session) -> Result<()> { let project_id = ProjectId::from_proto(message.project_id); - let (room, guest_connection_ids) = session + let (room, guest_connection_ids) = &*session .db() .await .unshare_project(project_id, session.connection_id) .await?; - broadcast(session.connection_id, guest_connection_ids, |conn_id| { - session.peer.send(conn_id, message.clone()) - }); + broadcast( + session.connection_id, + guest_connection_ids.iter().copied(), + |conn_id| session.peer.send(conn_id, message.clone()), + ); room_updated(&room, &session); Ok(()) @@ -977,7 +999,7 @@ async fn join_project( tracing::info!(%project_id, "join project"); - let (project, replica_id) = session + let (project, replica_id) = &mut *session .db() .await .join_project(project_id, session.connection_id) @@ -1029,7 +1051,7 @@ async fn join_project( language_servers: project.language_servers.clone(), })?; - for (worktree_id, worktree) in project.worktrees { + for (worktree_id, worktree) in mem::take(&mut project.worktrees) { #[cfg(any(test, feature = "test-support"))] const MAX_CHUNK_SIZE: usize = 2; #[cfg(not(any(test, feature = "test-support")))] @@ -1084,21 +1106,23 @@ async fn join_project( async fn leave_project(request: proto::LeaveProject, session: Session) -> Result<()> { let sender_id = session.connection_id; let project_id = ProjectId::from_proto(request.project_id); - let project; - { - project = session - .db() - .await - .leave_project(project_id, sender_id) - .await?; - tracing::info!( - %project_id, - host_user_id = %project.host_user_id, - host_connection_id = %project.host_connection_id, - "leave project" - ); - broadcast(sender_id, project.connection_ids, |conn_id| { + let project = session + .db() + .await + .leave_project(project_id, sender_id) + .await?; + tracing::info!( + %project_id, + host_user_id = %project.host_user_id, + host_connection_id = %project.host_connection_id, + "leave project" + ); + + broadcast( + sender_id, + project.connection_ids.iter().copied(), + |conn_id| { session.peer.send( conn_id, proto::RemoveProjectCollaborator { @@ -1106,8 +1130,8 @@ async fn leave_project(request: proto::LeaveProject, session: Session) -> Result peer_id: sender_id.0, }, ) - }); - } + }, + ); Ok(()) } @@ -1118,14 +1142,14 @@ async fn update_project( session: Session, ) -> Result<()> { let project_id = ProjectId::from_proto(request.project_id); - let (room, guest_connection_ids) = session + let (room, guest_connection_ids) = &*session .db() .await .update_project(project_id, session.connection_id, &request.worktrees) .await?; broadcast( session.connection_id, - guest_connection_ids, + guest_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1151,7 +1175,7 @@ async fn update_worktree( broadcast( session.connection_id, - guest_connection_ids, + guest_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1175,7 +1199,7 @@ async fn update_diagnostic_summary( broadcast( session.connection_id, - guest_connection_ids, + guest_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1199,7 +1223,7 @@ async fn start_language_server( broadcast( session.connection_id, - guest_connection_ids, + guest_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1826,52 +1850,61 @@ async fn update_user_contacts(user_id: UserId, session: &Session) -> Result<()> async fn leave_room_for_session(session: &Session) -> Result<()> { let mut contacts_to_update = HashSet::default(); - let Some(left_room) = session.db().await.leave_room(session.connection_id).await? else { - return Err(anyhow!("no room to leave"))?; - }; - contacts_to_update.insert(session.user_id); + let canceled_calls_to_user_ids; + let live_kit_room; + let delete_live_kit_room; + { + let Some(mut left_room) = session.db().await.leave_room(session.connection_id).await? else { + return Err(anyhow!("no room to leave"))?; + }; + contacts_to_update.insert(session.user_id); - for project in left_room.left_projects.into_values() { - for connection_id in project.connection_ids { - if project.host_user_id == session.user_id { - session - .peer - .send( - connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); - } else { - session - .peer - .send( - connection_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: session.connection_id.0, - }, - ) - .trace_err(); + for project in left_room.left_projects.values() { + for connection_id in &project.connection_ids { + if project.host_user_id == session.user_id { + session + .peer + .send( + *connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); + } else { + session + .peer + .send( + *connection_id, + proto::RemoveProjectCollaborator { + project_id: project.id.to_proto(), + peer_id: session.connection_id.0, + }, + ) + .trace_err(); + } } + + session + .peer + .send( + session.connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); } - session - .peer - .send( - session.connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); + room_updated(&left_room.room, &session); + canceled_calls_to_user_ids = mem::take(&mut left_room.canceled_calls_to_user_ids); + live_kit_room = mem::take(&mut left_room.room.live_kit_room); + delete_live_kit_room = left_room.room.participants.is_empty(); } - room_updated(&left_room.room, &session); { let pool = session.connection_pool().await; - for canceled_user_id in left_room.canceled_calls_to_user_ids { + for canceled_user_id in canceled_calls_to_user_ids { for connection_id in pool.user_connection_ids(canceled_user_id) { session .peer @@ -1888,18 +1921,12 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { if let Some(live_kit) = session.live_kit_client.as_ref() { live_kit - .remove_participant( - left_room.room.live_kit_room.clone(), - session.connection_id.to_string(), - ) + .remove_participant(live_kit_room.clone(), session.connection_id.to_string()) .await .trace_err(); - if left_room.room.participants.is_empty() { - live_kit - .delete_room(left_room.room.live_kit_room) - .await - .trace_err(); + if delete_live_kit_room { + live_kit.delete_room(live_kit_room).await.trace_err(); } } From af2a2d2494e2f72194aed7d4d2b012f4694e2dec Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 17:43:40 +0100 Subject: [PATCH 064/240] Return error when waiting on a worktree snapshot after disconnecting --- crates/project/src/worktree.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 836ac55b66..791cd1d622 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -81,6 +81,7 @@ pub struct RemoteWorktree { replica_id: ReplicaId, diagnostic_summaries: TreeMap, visible: bool, + disconnected: bool, } #[derive(Clone)] @@ -248,6 +249,7 @@ impl Worktree { client: client.clone(), diagnostic_summaries: Default::default(), visible, + disconnected: false, }) }); @@ -1069,6 +1071,7 @@ impl RemoteWorktree { pub fn disconnected_from_host(&mut self) { self.updates_tx.take(); self.snapshot_subscriptions.clear(); + self.disconnected = true; } pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) { @@ -1083,10 +1086,12 @@ impl RemoteWorktree { self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete) } - fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future { + fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future> { let (tx, rx) = oneshot::channel(); if self.observed_snapshot(scan_id) { let _ = tx.send(()); + } else if self.disconnected { + drop(tx); } else { match self .snapshot_subscriptions @@ -1097,7 +1102,8 @@ impl RemoteWorktree { } async move { - let _ = rx.await; + rx.await?; + Ok(()) } } @@ -1126,7 +1132,7 @@ impl RemoteWorktree { ) -> Task> { let wait_for_snapshot = self.wait_for_snapshot(scan_id); cx.spawn(|this, mut cx| async move { - wait_for_snapshot.await; + wait_for_snapshot.await?; this.update(&mut cx, |worktree, _| { let worktree = worktree.as_remote_mut().unwrap(); let mut snapshot = worktree.background_snapshot.lock(); @@ -1145,7 +1151,7 @@ impl RemoteWorktree { ) -> Task> { let wait_for_snapshot = self.wait_for_snapshot(scan_id); cx.spawn(|this, mut cx| async move { - wait_for_snapshot.await; + wait_for_snapshot.await?; this.update(&mut cx, |worktree, _| { let worktree = worktree.as_remote_mut().unwrap(); let mut snapshot = worktree.background_snapshot.lock(); From 0a565c6bae9e8ce1377c44bb608b2e305120ac75 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 17:44:18 +0100 Subject: [PATCH 065/240] :lipstick: --- crates/collab/src/db.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 84ad5082d0..eff97855c6 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2598,13 +2598,7 @@ where background.simulate_random_delay().await; } - let result = self.runtime.as_ref().unwrap().block_on(body); - - // if let Some(background) = self.background.as_ref() { - // background.simulate_random_delay().await; - // } - - result + self.runtime.as_ref().unwrap().block_on(body) } #[cfg(not(test))] From f0a721032d70f58469a61c399a64d24ce748752e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 18:56:11 +0100 Subject: [PATCH 066/240] Remove non-determinism caused by random entropy when reconnecting --- crates/client/src/client.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index bad85384be..c75aef3a1a 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -398,7 +398,11 @@ impl Client { let this = self.clone(); let reconnect_interval = state.reconnect_interval; state._reconnect_task = Some(cx.spawn(|cx| async move { + #[cfg(any(test, feature = "test-support"))] + let mut rng = StdRng::seed_from_u64(0); + #[cfg(not(any(test, feature = "test-support")))] let mut rng = StdRng::from_entropy(); + let mut delay = INITIAL_RECONNECTION_DELAY; while let Err(error) = this.authenticate_and_connect(true, &cx).await { log::error!("failed to connect {}", error); From fa3f100effebd136ad9e2a4a53908aa979465dd3 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 19:01:28 +0100 Subject: [PATCH 067/240] Introduce a new `detect_nondeterminism = true` attribute to `gpui::test` --- crates/gpui/src/executor.rs | 72 ++++++++++++++++++++--- crates/gpui/src/test.rs | 84 ++++++++++++++++++++++----- crates/gpui_macros/src/gpui_macros.rs | 27 ++++++--- 3 files changed, 150 insertions(+), 33 deletions(-) diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 0639445b0d..5231f8a51a 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -66,21 +66,31 @@ struct DeterministicState { rng: rand::prelude::StdRng, seed: u64, scheduled_from_foreground: collections::HashMap>, - scheduled_from_background: Vec, + scheduled_from_background: Vec, forbid_parking: bool, block_on_ticks: std::ops::RangeInclusive, now: std::time::Instant, next_timer_id: usize, pending_timers: Vec<(usize, std::time::Instant, postage::barrier::Sender)>, waiting_backtrace: Option, + next_runnable_id: usize, + poll_history: Vec, + runnable_backtraces: collections::HashMap, } #[cfg(any(test, feature = "test-support"))] struct ForegroundRunnable { + id: usize, runnable: Runnable, main: bool, } +#[cfg(any(test, feature = "test-support"))] +struct BackgroundRunnable { + id: usize, + runnable: Runnable, +} + #[cfg(any(test, feature = "test-support"))] pub struct Deterministic { state: Arc>, @@ -117,11 +127,24 @@ impl Deterministic { next_timer_id: Default::default(), pending_timers: Default::default(), waiting_backtrace: None, + next_runnable_id: 0, + poll_history: Default::default(), + runnable_backtraces: Default::default(), })), parker: Default::default(), }) } + pub fn runnable_history(&self) -> Vec { + self.state.lock().poll_history.clone() + } + + pub fn runnable_backtrace(&self, runnable_id: usize) -> backtrace::Backtrace { + let mut backtrace = self.state.lock().runnable_backtraces[&runnable_id].clone(); + backtrace.resolve(); + backtrace + } + pub fn build_background(self: &Arc) -> Arc { Arc::new(Background::Deterministic { executor: self.clone(), @@ -142,6 +165,15 @@ impl Deterministic { main: bool, ) -> AnyLocalTask { let state = self.state.clone(); + let id; + { + let mut state = state.lock(); + id = util::post_inc(&mut state.next_runnable_id); + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } + let unparker = self.parker.lock().unparker(); let (runnable, task) = async_task::spawn_local(future, move |runnable| { let mut state = state.lock(); @@ -149,7 +181,7 @@ impl Deterministic { .scheduled_from_foreground .entry(cx_id) .or_default() - .push(ForegroundRunnable { runnable, main }); + .push(ForegroundRunnable { id, runnable, main }); unparker.unpark(); }); runnable.schedule(); @@ -158,10 +190,21 @@ impl Deterministic { fn spawn(&self, future: AnyFuture) -> AnyTask { let state = self.state.clone(); + let id; + { + let mut state = state.lock(); + id = util::post_inc(&mut state.next_runnable_id); + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } + let unparker = self.parker.lock().unparker(); let (runnable, task) = async_task::spawn(future, move |runnable| { let mut state = state.lock(); - state.scheduled_from_background.push(runnable); + state + .scheduled_from_background + .push(BackgroundRunnable { id, runnable }); unparker.unpark(); }); runnable.schedule(); @@ -178,15 +221,25 @@ impl Deterministic { let woken = Arc::new(AtomicBool::new(false)); let state = self.state.clone(); + let id; + { + let mut state = state.lock(); + id = util::post_inc(&mut state.next_runnable_id); + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new()); + } + let unparker = self.parker.lock().unparker(); let (runnable, mut main_task) = unsafe { async_task::spawn_unchecked(main_future, move |runnable| { - let mut state = state.lock(); + let state = &mut *state.lock(); state .scheduled_from_foreground .entry(cx_id) .or_default() .push(ForegroundRunnable { + id: util::post_inc(&mut state.next_runnable_id), runnable, main: true, }); @@ -248,9 +301,10 @@ impl Deterministic { if !state.scheduled_from_background.is_empty() && state.rng.gen() { let background_len = state.scheduled_from_background.len(); let ix = state.rng.gen_range(0..background_len); - let runnable = state.scheduled_from_background.remove(ix); + let background_runnable = state.scheduled_from_background.remove(ix); + state.poll_history.push(background_runnable.id); drop(state); - runnable.run(); + background_runnable.runnable.run(); } else if !state.scheduled_from_foreground.is_empty() { let available_cx_ids = state .scheduled_from_foreground @@ -266,6 +320,7 @@ impl Deterministic { if scheduled_from_cx.is_empty() { state.scheduled_from_foreground.remove(&cx_id_to_run); } + state.poll_history.push(foreground_runnable.id); drop(state); @@ -298,9 +353,10 @@ impl Deterministic { let runnable_count = state.scheduled_from_background.len(); let ix = state.rng.gen_range(0..=runnable_count); if ix < state.scheduled_from_background.len() { - let runnable = state.scheduled_from_background.remove(ix); + let background_runnable = state.scheduled_from_background.remove(ix); + state.poll_history.push(background_runnable.id); drop(state); - runnable.run(); + background_runnable.runnable.run(); } else { drop(state); if let Poll::Ready(result) = future.poll(&mut cx) { diff --git a/crates/gpui/src/test.rs b/crates/gpui/src/test.rs index e76b094c9a..665033a71c 100644 --- a/crates/gpui/src/test.rs +++ b/crates/gpui/src/test.rs @@ -1,11 +1,13 @@ use crate::{ - elements::Empty, executor, platform, Element, ElementBox, Entity, FontCache, Handle, - LeakDetector, MutableAppContext, Platform, RenderContext, Subscription, TestAppContext, View, + elements::Empty, executor, platform, util::CwdBacktrace, Element, ElementBox, Entity, + FontCache, Handle, LeakDetector, MutableAppContext, Platform, RenderContext, Subscription, + TestAppContext, View, }; use futures::StreamExt; use parking_lot::Mutex; use smol::channel; use std::{ + fmt::Write, panic::{self, RefUnwindSafe}, rc::Rc, sync::{ @@ -29,13 +31,13 @@ pub fn run_test( mut num_iterations: u64, mut starting_seed: u64, max_retries: usize, + detect_nondeterminism: bool, test_fn: &mut (dyn RefUnwindSafe + Fn( &mut MutableAppContext, Rc, Arc, u64, - bool, )), fn_name: String, ) { @@ -60,10 +62,10 @@ pub fn run_test( let platform = Arc::new(platform::test::platform()); let font_system = platform.fonts(); let font_cache = Arc::new(FontCache::new(font_system)); + let mut prev_runnable_history: Option> = None; - loop { - let seed = atomic_seed.fetch_add(1, SeqCst); - let is_last_iteration = seed + 1 >= starting_seed + num_iterations; + for _ in 0..num_iterations { + let seed = atomic_seed.load(SeqCst); if is_randomized { dbg!(seed); @@ -82,13 +84,7 @@ pub fn run_test( fn_name.clone(), ); cx.update(|cx| { - test_fn( - cx, - foreground_platform.clone(), - deterministic.clone(), - seed, - is_last_iteration, - ); + test_fn(cx, foreground_platform.clone(), deterministic.clone(), seed); }); cx.update(|cx| cx.remove_all_windows()); @@ -96,8 +92,64 @@ pub fn run_test( cx.update(|cx| cx.clear_globals()); leak_detector.lock().detect(); - if is_last_iteration { - break; + + if detect_nondeterminism { + let curr_runnable_history = deterministic.runnable_history(); + if let Some(prev_runnable_history) = prev_runnable_history { + let mut prev_entries = prev_runnable_history.iter().fuse(); + let mut curr_entries = curr_runnable_history.iter().fuse(); + + let mut nondeterministic = false; + let mut common_history_prefix = Vec::new(); + let mut prev_history_suffix = Vec::new(); + let mut curr_history_suffix = Vec::new(); + loop { + match (prev_entries.next(), curr_entries.next()) { + (None, None) => break, + (None, Some(curr_id)) => curr_history_suffix.push(*curr_id), + (Some(prev_id), None) => prev_history_suffix.push(*prev_id), + (Some(prev_id), Some(curr_id)) => { + if nondeterministic { + prev_history_suffix.push(*prev_id); + curr_history_suffix.push(*curr_id); + } else if prev_id == curr_id { + common_history_prefix.push(*curr_id); + } else { + nondeterministic = true; + prev_history_suffix.push(*prev_id); + curr_history_suffix.push(*curr_id); + } + } + } + } + + if nondeterministic { + let mut error = String::new(); + writeln!(&mut error, "Common prefix: {:?}", common_history_prefix) + .unwrap(); + writeln!(&mut error, "Previous suffix: {:?}", prev_history_suffix) + .unwrap(); + writeln!(&mut error, "Current suffix: {:?}", curr_history_suffix) + .unwrap(); + + let last_common_backtrace = common_history_prefix + .last() + .map(|runnable_id| deterministic.runnable_backtrace(*runnable_id)); + + writeln!( + &mut error, + "Last future that ran on both executions: {:?}", + last_common_backtrace.as_ref().map(CwdBacktrace) + ) + .unwrap(); + panic!("Detected non-determinism.\n{}", error); + } + } + prev_runnable_history = Some(curr_runnable_history); + } + + if !detect_nondeterminism { + atomic_seed.fetch_add(1, SeqCst); } } }); @@ -112,7 +164,7 @@ pub fn run_test( println!("retrying: attempt {}", retries); } else { if is_randomized { - eprintln!("failing seed: {}", atomic_seed.load(SeqCst) - 1); + eprintln!("failing seed: {}", atomic_seed.load(SeqCst)); } panic::resume_unwind(error); } diff --git a/crates/gpui_macros/src/gpui_macros.rs b/crates/gpui_macros/src/gpui_macros.rs index b43bedc643..e28d1711d2 100644 --- a/crates/gpui_macros/src/gpui_macros.rs +++ b/crates/gpui_macros/src/gpui_macros.rs @@ -14,6 +14,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { let mut max_retries = 0; let mut num_iterations = 1; let mut starting_seed = 0; + let mut detect_nondeterminism = false; for arg in args { match arg { @@ -26,6 +27,9 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { let key_name = meta.path.get_ident().map(|i| i.to_string()); let result = (|| { match key_name.as_deref() { + Some("detect_nondeterminism") => { + detect_nondeterminism = parse_bool(&meta.lit)? + } Some("retries") => max_retries = parse_int(&meta.lit)?, Some("iterations") => num_iterations = parse_int(&meta.lit)?, Some("seed") => starting_seed = parse_int(&meta.lit)?, @@ -77,10 +81,6 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { inner_fn_args.extend(quote!(rand::SeedableRng::seed_from_u64(seed),)); continue; } - Some("bool") => { - inner_fn_args.extend(quote!(is_last_iteration,)); - continue; - } Some("Arc") => { if let syn::PathArguments::AngleBracketed(args) = &last_segment.unwrap().arguments @@ -146,7 +146,8 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { #num_iterations as u64, #starting_seed as u64, #max_retries, - &mut |cx, foreground_platform, deterministic, seed, is_last_iteration| { + #detect_nondeterminism, + &mut |cx, foreground_platform, deterministic, seed| { #cx_vars cx.foreground().run(#inner_fn_name(#inner_fn_args)); #cx_teardowns @@ -165,9 +166,6 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { Some("StdRng") => { inner_fn_args.extend(quote!(rand::SeedableRng::seed_from_u64(seed),)); } - Some("bool") => { - inner_fn_args.extend(quote!(is_last_iteration,)); - } _ => {} } } else { @@ -189,7 +187,8 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { #num_iterations as u64, #starting_seed as u64, #max_retries, - &mut |cx, _, _, seed, is_last_iteration| #inner_fn_name(#inner_fn_args), + #detect_nondeterminism, + &mut |cx, _, _, seed| #inner_fn_name(#inner_fn_args), stringify!(#outer_fn_name).to_string(), ); } @@ -209,3 +208,13 @@ fn parse_int(literal: &Lit) -> Result { result.map_err(|err| TokenStream::from(err.into_compile_error())) } + +fn parse_bool(literal: &Lit) -> Result { + let result = if let Lit::Bool(result) = &literal { + Ok(result.value) + } else { + Err(syn::Error::new(literal.span(), "must be a boolean")) + }; + + result.map_err(|err| TokenStream::from(err.into_compile_error())) +} From d0709e7bfa53d128aaeb3b7dab49d28dd735f7ce Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 19:18:31 +0100 Subject: [PATCH 068/240] Error if project is disconnected after getting completions response --- crates/project/src/project.rs | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index fb77da9347..a3439430fd 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -3408,19 +3408,29 @@ impl Project { position: Some(language::proto::serialize_anchor(&anchor)), version: serialize_version(&source_buffer.version()), }; - cx.spawn_weak(|_, mut cx| async move { + cx.spawn_weak(|this, mut cx| async move { let response = rpc.request(message).await?; - source_buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) - }) - .await; + if this + .upgrade(&cx) + .ok_or_else(|| anyhow!("project was dropped"))? + .read_with(&cx, |this, _| this.is_read_only()) + { + return Err(anyhow!( + "failed to get completions: project was disconnected" + )); + } else { + source_buffer_handle + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(response.version)) + }) + .await; - let completions = response.completions.into_iter().map(|completion| { - language::proto::deserialize_completion(completion, language.clone()) - }); - futures::future::try_join_all(completions).await + let completions = response.completions.into_iter().map(|completion| { + language::proto::deserialize_completion(completion, language.clone()) + }); + futures::future::try_join_all(completions).await + } }) } else { Task::ready(Ok(Default::default())) From cd2a8579b9dbd2ed2023d4da2d24b6219861c25e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 28 Nov 2022 19:35:33 +0100 Subject: [PATCH 069/240] Capture runnable backtraces only when detecting nondeterminism --- crates/gpui/src/executor.rs | 30 +++++++++++++++++++++--------- crates/gpui/src/test.rs | 4 ++++ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 5231f8a51a..876e48351d 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -75,6 +75,7 @@ struct DeterministicState { waiting_backtrace: Option, next_runnable_id: usize, poll_history: Vec, + enable_runnable_backtraces: bool, runnable_backtraces: collections::HashMap, } @@ -129,6 +130,7 @@ impl Deterministic { waiting_backtrace: None, next_runnable_id: 0, poll_history: Default::default(), + enable_runnable_backtraces: false, runnable_backtraces: Default::default(), })), parker: Default::default(), @@ -139,6 +141,10 @@ impl Deterministic { self.state.lock().poll_history.clone() } + pub fn enable_runnable_backtrace(&self) { + self.state.lock().enable_runnable_backtraces = true; + } + pub fn runnable_backtrace(&self, runnable_id: usize) -> backtrace::Backtrace { let mut backtrace = self.state.lock().runnable_backtraces[&runnable_id].clone(); backtrace.resolve(); @@ -169,9 +175,11 @@ impl Deterministic { { let mut state = state.lock(); id = util::post_inc(&mut state.next_runnable_id); - state - .runnable_backtraces - .insert(id, backtrace::Backtrace::new_unresolved()); + if state.enable_runnable_backtraces { + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } } let unparker = self.parker.lock().unparker(); @@ -194,9 +202,11 @@ impl Deterministic { { let mut state = state.lock(); id = util::post_inc(&mut state.next_runnable_id); - state - .runnable_backtraces - .insert(id, backtrace::Backtrace::new_unresolved()); + if state.enable_runnable_backtraces { + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } } let unparker = self.parker.lock().unparker(); @@ -225,9 +235,11 @@ impl Deterministic { { let mut state = state.lock(); id = util::post_inc(&mut state.next_runnable_id); - state - .runnable_backtraces - .insert(id, backtrace::Backtrace::new()); + if state.enable_runnable_backtraces { + state + .runnable_backtraces + .insert(id, backtrace::Backtrace::new_unresolved()); + } } let unparker = self.parker.lock().unparker(); diff --git a/crates/gpui/src/test.rs b/crates/gpui/src/test.rs index 665033a71c..aade1054a8 100644 --- a/crates/gpui/src/test.rs +++ b/crates/gpui/src/test.rs @@ -72,6 +72,10 @@ pub fn run_test( } let deterministic = executor::Deterministic::new(seed); + if detect_nondeterminism { + deterministic.enable_runnable_backtrace(); + } + let leak_detector = Arc::new(Mutex::new(LeakDetector::default())); let mut cx = TestAppContext::new( foreground_platform.clone(), From d2cd9c94f7fd9d69bfe1156cce8676e44ffb3935 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Mon, 28 Nov 2022 18:56:27 -0500 Subject: [PATCH 070/240] Remove sign in telemetry event --- crates/client/src/user.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/client/src/user.rs b/crates/client/src/user.rs index 11b9ef6117..4d29669c2f 100644 --- a/crates/client/src/user.rs +++ b/crates/client/src/user.rs @@ -150,7 +150,6 @@ impl UserStore { client.telemetry.set_authenticated_user_info(None, false); } - client.telemetry.report_event("sign in", Default::default()); current_user_tx.send(user).await.ok(); } } From 4436ec48ebab64e08b4360250b2d21d4cb33b04b Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Fri, 25 Nov 2022 15:04:15 -0500 Subject: [PATCH 071/240] Add "added_to_mailing_list" column on signups table --- ...20221125192125_add_added_to_mailing_list_to_signups.sql | 2 ++ crates/collab/src/db.rs | 7 +++++-- crates/collab/src/db_tests.rs | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 crates/collab/migrations/20221125192125_add_added_to_mailing_list_to_signups.sql diff --git a/crates/collab/migrations/20221125192125_add_added_to_mailing_list_to_signups.sql b/crates/collab/migrations/20221125192125_add_added_to_mailing_list_to_signups.sql new file mode 100644 index 0000000000..b154396df1 --- /dev/null +++ b/crates/collab/migrations/20221125192125_add_added_to_mailing_list_to_signups.sql @@ -0,0 +1,2 @@ +ALTER TABLE "signups" + ADD "added_to_mailing_list" BOOLEAN NOT NULL DEFAULT FALSE; \ No newline at end of file diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1609764f6e..85ace9a5f2 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -390,10 +390,11 @@ impl Db { platform_unknown, editor_features, programming_languages, - device_id + device_id, + added_to_mailing_list ) VALUES - ($1, $2, FALSE, $3, $4, $5, FALSE, $6, $7, $8) + ($1, $2, FALSE, $3, $4, $5, FALSE, $6, $7, $8, $9) ON CONFLICT (email_address) DO UPDATE SET email_address = excluded.email_address RETURNING id @@ -407,6 +408,7 @@ impl Db { .bind(&signup.editor_features) .bind(&signup.programming_languages) .bind(&signup.device_id) + .bind(&signup.added_to_mailing_list) .execute(&self.pool) .await?; Ok(()) @@ -1270,6 +1272,7 @@ pub struct Signup { pub editor_features: Vec, pub programming_languages: Vec, pub device_id: Option, + pub added_to_mailing_list: bool, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromRow)] diff --git a/crates/collab/src/db_tests.rs b/crates/collab/src/db_tests.rs index b3f964b8a7..6260eadc4a 100644 --- a/crates/collab/src/db_tests.rs +++ b/crates/collab/src/db_tests.rs @@ -657,6 +657,7 @@ async fn test_signups() { editor_features: vec!["speed".into()], programming_languages: vec!["rust".into(), "c".into()], device_id: Some(format!("device_id_{i}")), + added_to_mailing_list: i != 0, // One user failed to subscribe }) .collect::>(); From d525cfd697efae7a06e605c51f2da4703fdc484e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 29 Nov 2022 11:02:14 +0100 Subject: [PATCH 072/240] Increase probability of creating new files in randomized test --- crates/collab/src/integration_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index cf6bb8af3a..93ff73fc83 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -6391,7 +6391,7 @@ impl TestClient { buffers.extend(search.await?.into_keys()); } } - 60..=69 => { + 60..=79 => { let worktree = project .read_with(cx, |project, cx| { project From ac24600a4022716bc1aa4c305572b4e7141d5ec2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 29 Nov 2022 13:55:08 +0100 Subject: [PATCH 073/240] Start moving towards using sea-query to construct queries --- Cargo.lock | 34 +++++ Cargo.toml | 1 + crates/collab/Cargo.toml | 15 +- crates/collab/src/db.rs | 134 +++++++++++------- crates/collab/src/db/schema.rs | 43 ++++++ .../collab/src/{db_tests.rs => db/tests.rs} | 2 +- crates/collab/src/main.rs | 2 - 7 files changed, 168 insertions(+), 63 deletions(-) create mode 100644 crates/collab/src/db/schema.rs rename crates/collab/src/{db_tests.rs => db/tests.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index 8cd5e7d6d7..5083b91312 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1065,6 +1065,8 @@ dependencies = [ "reqwest", "rpc", "scrypt", + "sea-query", + "sea-query-binder", "serde", "serde_json", "settings", @@ -5121,6 +5123,38 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sea-query" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" +dependencies = [ + "sea-query-derive", +] + +[[package]] +name = "sea-query-binder" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" +dependencies = [ + "sea-query", + "sqlx", +] + +[[package]] +name = "sea-query-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cdc022b4f606353fe5dc85b09713a04e433323b70163e81513b141c6ae6eb5" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", + "thiserror", +] + [[package]] name = "seahash" version = "4.1.0" diff --git a/Cargo.toml b/Cargo.toml index 205017da1f..03fcb4cfd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ rand = { version = "0.8" } [patch.crates-io] tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "366210ae925d7ea0891bc7a0c738f60c77c04d7b" } async-task = { git = "https://github.com/zed-industries/async-task", rev = "341b57d6de98cdfd7b418567b8de2022ca993a6e" } +sqlx = { git = "https://github.com/launchbadge/sqlx", rev = "4b7053807c705df312bcb9b6281e184bf7534eb3" } # TODO - Remove when a version is released with this PR: https://github.com/servo/core-foundation-rs/pull/457 cocoa = { git = "https://github.com/servo/core-foundation-rs", rev = "079665882507dd5e2ff77db3de5070c1f6c0fb85" } diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index e5a97b9764..e854b003c8 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -36,9 +36,12 @@ prometheus = "0.13" rand = "0.8" reqwest = { version = "0.11", features = ["json"], optional = true } scrypt = "0.7" +sea-query = { version = "0.27", features = ["derive"] } +sea-query-binder = { version = "0.2", features = ["sqlx-postgres"] } serde = { version = "1.0", features = ["derive", "rc"] } serde_json = "1.0" sha-1 = "0.9" +sqlx = { version = "0.6", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid"] } time = { version = "0.3", features = ["serde", "serde-well-known"] } tokio = { version = "1", features = ["full"] } tokio-tungstenite = "0.17" @@ -49,11 +52,6 @@ tracing = "0.1.34" tracing-log = "0.1.3" tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } -[dependencies.sqlx] -git = "https://github.com/launchbadge/sqlx" -rev = "4b7053807c705df312bcb9b6281e184bf7534eb3" -features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid"] - [dev-dependencies] collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } @@ -76,13 +74,10 @@ env_logger = "0.9" log = { version = "0.4.16", features = ["kv_unstable_serde"] } util = { path = "../util" } lazy_static = "1.4" +sea-query-binder = { version = "0.2", features = ["sqlx-sqlite"] } serde_json = { version = "1.0", features = ["preserve_order"] } +sqlx = { version = "0.6", features = ["sqlite"] } unindent = "0.1" -[dev-dependencies.sqlx] -git = "https://github.com/launchbadge/sqlx" -rev = "4b7053807c705df312bcb9b6281e184bf7534eb3" -features = ["sqlite"] - [features] seed-support = ["clap", "lipsum", "reqwest"] diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index eff97855c6..044d4ef8d7 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,3 +1,7 @@ +mod schema; +#[cfg(test)] +mod tests; + use crate::{Error, Result}; use anyhow::anyhow; use axum::http::StatusCode; @@ -5,6 +9,8 @@ use collections::{BTreeMap, HashMap, HashSet}; use dashmap::DashMap; use futures::{future::BoxFuture, FutureExt, StreamExt}; use rpc::{proto, ConnectionId}; +use sea_query::{Expr, Query}; +use sea_query_binder::SqlxBinder; use serde::{Deserialize, Serialize}; use sqlx::{ migrate::{Migrate as _, Migration, MigrationSource}, @@ -89,6 +95,23 @@ impl BeginTransaction for Db { } } +pub trait BuildQuery { + fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues); +} + +impl BuildQuery for Db { + fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues) { + query.build_sqlx(sea_query::PostgresQueryBuilder) + } +} + +#[cfg(test)] +impl BuildQuery for Db { + fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues) { + query.build_sqlx(sea_query::SqliteQueryBuilder) + } +} + pub trait RowsAffected { fn rows_affected(&self) -> u64; } @@ -595,10 +618,11 @@ impl Db { impl Db where - Self: BeginTransaction, + Self: BeginTransaction + BuildQuery, D: sqlx::Database + sqlx::migrate::MigrateDatabase, D::Connection: sqlx::migrate::Migrate, for<'a> >::Arguments: sqlx::IntoArguments<'a, D>, + for<'a> sea_query_binder::SqlxValues: sqlx::IntoArguments<'a, D>, for<'a> &'a mut D::Connection: sqlx::Executor<'a, Database = D>, for<'a, 'b> &'b mut sqlx::Transaction<'a, D>: sqlx::Executor<'b, Database = D>, D::QueryResult: RowsAffected, @@ -1537,63 +1561,66 @@ where worktrees: &[proto::WorktreeMetadata], ) -> Result> { self.transact(|mut tx| async move { - let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( - " - SELECT room_id, user_id - FROM room_participants - WHERE answering_connection_id = $1 - ", - ) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; + let (sql, values) = self.build_query( + Query::select() + .columns([ + schema::room_participant::Definition::RoomId, + schema::room_participant::Definition::UserId, + ]) + .from(schema::room_participant::Definition::Table) + .and_where( + Expr::col(schema::room_participant::Definition::AnsweringConnectionId) + .eq(connection_id.0), + ), + ); + let (room_id, user_id) = sqlx::query_as_with::<_, (RoomId, UserId), _>(&sql, values) + .fetch_one(&mut tx) + .await?; if room_id != expected_room_id { return Err(anyhow!("shared project on unexpected room"))?; } - let project_id: ProjectId = sqlx::query_scalar( - " - INSERT INTO projects (room_id, host_user_id, host_connection_id) - VALUES ($1, $2, $3) - RETURNING id - ", - ) - .bind(room_id) - .bind(user_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; + let (sql, values) = self.build_query( + Query::insert() + .into_table(schema::project::Definition::Table) + .columns([ + schema::project::Definition::RoomId, + schema::project::Definition::HostUserId, + schema::project::Definition::HostConnectionId, + ]) + .values_panic([room_id.into(), user_id.into(), connection_id.0.into()]) + .returning_col(schema::project::Definition::Id), + ); + let project_id: ProjectId = sqlx::query_scalar_with(&sql, values) + .fetch_one(&mut tx) + .await?; if !worktrees.is_empty() { - let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); - params.pop(); - let query = format!( - " - INSERT INTO worktrees ( - project_id, - id, - root_name, - abs_path, - visible, - scan_id, - is_complete - ) - VALUES {params} - " - ); - - let mut query = sqlx::query(&query); + let mut query = Query::insert() + .into_table(schema::worktree::Definition::Table) + .columns([ + schema::worktree::Definition::ProjectId, + schema::worktree::Definition::Id, + schema::worktree::Definition::RootName, + schema::worktree::Definition::AbsPath, + schema::worktree::Definition::Visible, + schema::worktree::Definition::ScanId, + schema::worktree::Definition::IsComplete, + ]) + .to_owned(); for worktree in worktrees { - query = query - .bind(project_id) - .bind(worktree.id as i32) - .bind(&worktree.root_name) - .bind(&worktree.abs_path) - .bind(worktree.visible) - .bind(0) - .bind(false); + query.values_panic([ + project_id.into(), + worktree.id.into(), + worktree.root_name.clone().into(), + worktree.abs_path.clone().into(), + worktree.visible.into(), + 0.into(), + false.into(), + ]); } - query.execute(&mut tx).await?; + let (sql, values) = self.build_query(&query); + sqlx::query_with(&sql, values).execute(&mut tx).await?; } sqlx::query( @@ -2648,6 +2675,12 @@ macro_rules! id_type { self.0.fmt(f) } } + + impl From<$name> for sea_query::Value { + fn from(value: $name) -> Self { + sea_query::Value::Int(Some(value.0)) + } + } }; } @@ -2692,6 +2725,7 @@ id_type!(WorktreeId); #[derive(Clone, Debug, Default, FromRow, PartialEq)] struct WorktreeRow { pub id: WorktreeId, + pub project_id: ProjectId, pub abs_path: String, pub root_name: String, pub visible: bool, diff --git a/crates/collab/src/db/schema.rs b/crates/collab/src/db/schema.rs new file mode 100644 index 0000000000..40a3e334d1 --- /dev/null +++ b/crates/collab/src/db/schema.rs @@ -0,0 +1,43 @@ +pub mod project { + use sea_query::Iden; + + #[derive(Iden)] + pub enum Definition { + #[iden = "projects"] + Table, + Id, + RoomId, + HostUserId, + HostConnectionId, + } +} + +pub mod worktree { + use sea_query::Iden; + + #[derive(Iden)] + pub enum Definition { + #[iden = "worktrees"] + Table, + Id, + ProjectId, + AbsPath, + RootName, + Visible, + ScanId, + IsComplete, + } +} + +pub mod room_participant { + use sea_query::Iden; + + #[derive(Iden)] + pub enum Definition { + #[iden = "room_participants"] + Table, + RoomId, + UserId, + AnsweringConnectionId, + } +} diff --git a/crates/collab/src/db_tests.rs b/crates/collab/src/db/tests.rs similarity index 99% rename from crates/collab/src/db_tests.rs rename to crates/collab/src/db/tests.rs index 444e60ddeb..88488b10d2 100644 --- a/crates/collab/src/db_tests.rs +++ b/crates/collab/src/db/tests.rs @@ -1,4 +1,4 @@ -use super::db::*; +use super::*; use gpui::executor::{Background, Deterministic}; use std::sync::Arc; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 20fae38c16..019197fc46 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -4,8 +4,6 @@ mod db; mod env; mod rpc; -#[cfg(test)] -mod db_tests; #[cfg(test)] mod integration_tests; From 11a39226e8491a0774c19cd83b84918d2906fa86 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 29 Nov 2022 16:49:04 +0100 Subject: [PATCH 074/240] Start on a new `db2` module that uses SeaORM --- Cargo.lock | 280 ++++++++++++++++ crates/collab/Cargo.toml | 2 + .../20221109000000_test_schema.sql | 2 +- crates/collab/src/db2.rs | 316 ++++++++++++++++++ crates/collab/src/db2/project.rs | 37 ++ crates/collab/src/db2/project_collaborator.rs | 18 + crates/collab/src/db2/room.rs | 31 ++ crates/collab/src/db2/room_participant.rs | 34 ++ crates/collab/src/db2/worktree.rs | 33 ++ crates/collab/src/lib.rs | 12 + crates/collab/src/main.rs | 1 + 11 files changed, 765 insertions(+), 1 deletion(-) create mode 100644 crates/collab/src/db2.rs create mode 100644 crates/collab/src/db2/project.rs create mode 100644 crates/collab/src/db2/project_collaborator.rs create mode 100644 crates/collab/src/db2/room.rs create mode 100644 crates/collab/src/db2/room_participant.rs create mode 100644 crates/collab/src/db2/worktree.rs diff --git a/Cargo.lock b/Cargo.lock index 5083b91312..7b09775f2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + [[package]] name = "activity_indicator" version = "0.1.0" @@ -107,6 +113,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + [[package]] name = "ambient-authority" version = "0.0.1" @@ -547,6 +559,19 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bae" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b8de67cc41132507eeece2584804efcb15f85ba516e34c944b7667f480397a" +dependencies = [ + "heck 0.3.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "base64" version = "0.13.0" @@ -635,6 +660,51 @@ dependencies = [ "once_cell", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "breadcrumbs" version = "0.1.0" @@ -678,6 +748,27 @@ version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +[[package]] +name = "bytecheck" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bytemuck" version = "1.12.1" @@ -841,6 +932,7 @@ dependencies = [ "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.44", "wasm-bindgen", "winapi 0.3.9", @@ -1065,6 +1157,7 @@ dependencies = [ "reqwest", "rpc", "scrypt", + "sea-orm", "sea-query", "sea-query-binder", "serde", @@ -3843,6 +3936,29 @@ version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" +[[package]] +name = "ouroboros" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca" +dependencies = [ + "aliasable", + "ouroboros_macro", +] + +[[package]] +name = "ouroboros_macro" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d" +dependencies = [ + "Inflector", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "outline" version = "0.1.0" @@ -4201,6 +4317,15 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4446,6 +4571,26 @@ dependencies = [ "cc", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pulldown-cmark" version = "0.9.2" @@ -4683,6 +4828,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +dependencies = [ + "bytecheck", +] + [[package]] name = "reqwest" version = "0.11.12" @@ -4760,6 +4914,31 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rkyv" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rmp" version = "0.8.11" @@ -4911,6 +5090,24 @@ dependencies = [ "walkdir", ] +[[package]] +name = "rust_decimal" +version = "1.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" +dependencies = [ + "arrayvec 0.7.2", + "borsh", + "bytecheck", + "byteorder", + "bytes 1.2.1", + "num-traits", + "rand 0.8.5", + "rkyv", + "serde", + "serde_json", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -4982,6 +5179,12 @@ dependencies = [ "base64", ] +[[package]] +name = "rustversion" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" + [[package]] name = "rustybuzz" version = "0.3.0" @@ -5123,13 +5326,59 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sea-orm" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3120bc435b8640963ffda698f877610e07e077157e216eb99408d819c344034d" +dependencies = [ + "async-stream", + "async-trait", + "chrono", + "futures 0.3.24", + "futures-util", + "log", + "ouroboros", + "rust_decimal", + "sea-orm-macros", + "sea-query", + "sea-query-binder", + "sea-strum", + "serde", + "serde_json", + "sqlx", + "thiserror", + "time 0.3.15", + "tracing", + "url", + "uuid 1.2.1", +] + +[[package]] +name = "sea-orm-macros" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c54bacfeb842813c16821e21f9456c358861a448294075184ea1d6307e386d08" +dependencies = [ + "bae", + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sea-query" version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" dependencies = [ + "chrono", + "rust_decimal", "sea-query-derive", + "serde_json", + "time 0.3.15", + "uuid 1.2.1", ] [[package]] @@ -5138,8 +5387,13 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" dependencies = [ + "chrono", + "rust_decimal", "sea-query", + "serde_json", "sqlx", + "time 0.3.15", + "uuid 1.2.1", ] [[package]] @@ -5155,6 +5409,28 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sea-strum" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "391d06a6007842cfe79ac6f7f53911b76dfd69fc9a6769f1cf6569d12ce20e1b" +dependencies = [ + "sea-strum_macros", +] + +[[package]] +name = "sea-strum_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69b4397b825df6ccf1e98bcdabef3bbcfc47ff5853983467850eeab878384f21" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + [[package]] name = "seahash" version = "4.1.0" @@ -5670,6 +5946,7 @@ dependencies = [ "bitflags", "byteorder", "bytes 1.2.1", + "chrono", "crc", "crossbeam-queue", "dirs 4.0.0", @@ -5693,10 +5970,12 @@ dependencies = [ "log", "md-5", "memchr", + "num-bigint", "once_cell", "paste", "percent-encoding", "rand 0.8.5", + "rust_decimal", "rustls 0.20.7", "rustls-pemfile", "serde", @@ -6847,6 +7126,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ "getrandom 0.2.7", + "serde", ] [[package]] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index e854b003c8..e10f9fe8dc 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -36,6 +36,7 @@ prometheus = "0.13" rand = "0.8" reqwest = { version = "0.11", features = ["json"], optional = true } scrypt = "0.7" +sea-orm = { version = "0.10", features = ["sqlx-postgres", "runtime-tokio-rustls"] } sea-query = { version = "0.27", features = ["derive"] } sea-query-binder = { version = "0.2", features = ["sqlx-postgres"] } serde = { version = "1.0", features = ["derive", "rc"] } @@ -74,6 +75,7 @@ env_logger = "0.9" log = { version = "0.4.16", features = ["kv_unstable_serde"] } util = { path = "../util" } lazy_static = "1.4" +sea-orm = { version = "0.10", features = ["sqlx-sqlite"] } sea-query-binder = { version = "0.2", features = ["sqlx-sqlite"] } serde_json = { version = "1.0", features = ["preserve_order"] } sqlx = { version = "0.6", features = ["sqlite"] } diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 02ca0c75a9..65bf00e74c 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -41,7 +41,7 @@ CREATE TABLE "rooms" ( CREATE TABLE "projects" ( "id" INTEGER PRIMARY KEY, - "room_id" INTEGER REFERENCES rooms (id), + "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, "host_connection_id" INTEGER NOT NULL ); diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs new file mode 100644 index 0000000000..687e93daae --- /dev/null +++ b/crates/collab/src/db2.rs @@ -0,0 +1,316 @@ +mod project; +mod project_collaborator; +mod room; +mod room_participant; +mod worktree; + +use crate::{Error, Result}; +use anyhow::anyhow; +use collections::HashMap; +use dashmap::DashMap; +use futures::StreamExt; +use rpc::{proto, ConnectionId}; +use sea_orm::ActiveValue; +use sea_orm::{ + entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, + TransactionTrait, +}; +use serde::{Deserialize, Serialize}; +use std::ops::{Deref, DerefMut}; +use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; +use tokio::sync::{Mutex, OwnedMutexGuard}; + +pub struct Database { + pool: DatabaseConnection, + rooms: DashMap>>, + #[cfg(test)] + background: Option>, + #[cfg(test)] + runtime: Option, +} + +impl Database { + pub async fn new(url: &str, max_connections: u32) -> Result { + let mut options = ConnectOptions::new(url.into()); + options.max_connections(max_connections); + Ok(Self { + pool: sea_orm::Database::connect(options).await?, + rooms: DashMap::with_capacity(16384), + #[cfg(test)] + background: None, + #[cfg(test)] + runtime: None, + }) + } + + pub async fn share_project( + &self, + room_id: RoomId, + connection_id: ConnectionId, + worktrees: &[proto::WorktreeMetadata], + ) -> Result> { + self.transact(|tx| async move { + let participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not find participant"))?; + if participant.room_id != room_id.0 { + return Err(anyhow!("shared project on unexpected room"))?; + } + + let project = project::ActiveModel { + room_id: ActiveValue::set(participant.room_id), + host_user_id: ActiveValue::set(participant.user_id), + host_connection_id: ActiveValue::set(connection_id.0 as i32), + ..Default::default() + } + .insert(&tx) + .await?; + + worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i32), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + })) + .exec(&tx) + .await?; + + project_collaborator::ActiveModel { + project_id: ActiveValue::set(project.id), + connection_id: ActiveValue::set(connection_id.0 as i32), + user_id: ActiveValue::set(participant.user_id), + replica_id: ActiveValue::set(0), + is_host: ActiveValue::set(true), + ..Default::default() + } + .insert(&tx) + .await?; + + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, (ProjectId(project.id), room)) + .await + }) + .await + } + + async fn get_room(&self, room_id: RoomId, tx: &DatabaseTransaction) -> Result { + let db_room = room::Entity::find_by_id(room_id.0) + .one(tx) + .await? + .ok_or_else(|| anyhow!("could not find room"))?; + + let mut db_participants = db_room + .find_related(room_participant::Entity) + .stream(tx) + .await?; + let mut participants = HashMap::default(); + let mut pending_participants = Vec::new(); + while let Some(db_participant) = db_participants.next().await { + let db_participant = db_participant?; + if let Some(answering_connection_id) = db_participant.answering_connection_id { + let location = match ( + db_participant.location_kind, + db_participant.location_project_id, + ) { + (Some(0), Some(project_id)) => { + Some(proto::participant_location::Variant::SharedProject( + proto::participant_location::SharedProject { + id: project_id as u64, + }, + )) + } + (Some(1), _) => Some(proto::participant_location::Variant::UnsharedProject( + Default::default(), + )), + _ => Some(proto::participant_location::Variant::External( + Default::default(), + )), + }; + participants.insert( + answering_connection_id, + proto::Participant { + user_id: db_participant.user_id as u64, + peer_id: answering_connection_id as u32, + projects: Default::default(), + location: Some(proto::ParticipantLocation { variant: location }), + }, + ); + } else { + pending_participants.push(proto::PendingParticipant { + user_id: db_participant.user_id as u64, + calling_user_id: db_participant.calling_user_id as u64, + initial_project_id: db_participant.initial_project_id.map(|id| id as u64), + }); + } + } + + let mut db_projects = db_room + .find_related(project::Entity) + .find_with_related(worktree::Entity) + .stream(tx) + .await?; + + while let Some(row) = db_projects.next().await { + let (db_project, db_worktree) = row?; + if let Some(participant) = participants.get_mut(&db_project.host_connection_id) { + let project = if let Some(project) = participant + .projects + .iter_mut() + .find(|project| project.id as i32 == db_project.id) + { + project + } else { + participant.projects.push(proto::ParticipantProject { + id: db_project.id as u64, + worktree_root_names: Default::default(), + }); + participant.projects.last_mut().unwrap() + }; + + if let Some(db_worktree) = db_worktree { + project.worktree_root_names.push(db_worktree.root_name); + } + } + } + + Ok(proto::Room { + id: db_room.id as u64, + live_kit_room: db_room.live_kit_room, + participants: participants.into_values().collect(), + pending_participants, + }) + } + + async fn commit_room_transaction( + &self, + room_id: RoomId, + tx: DatabaseTransaction, + data: T, + ) -> Result> { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + tx.commit().await?; + Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, + }) + } + + async fn transact(&self, f: F) -> Result + where + F: Send + Fn(DatabaseTransaction) -> Fut, + Fut: Send + Future>, + { + let body = async { + loop { + let tx = self.pool.begin().await?; + match f(tx).await { + Ok(result) => return Ok(result), + Err(error) => match error { + Error::Database2( + DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) + | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), + ) if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some("40001") => + { + // Retry (don't break the loop) + } + error @ _ => return Err(error), + }, + } + } + }; + + #[cfg(test)] + { + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + self.runtime.as_ref().unwrap().block_on(body) + } + + #[cfg(not(test))] + { + body.await + } + } +} + +pub struct RoomGuard { + data: T, + _guard: OwnedMutexGuard<()>, + _not_send: PhantomData>, +} + +impl Deref for RoomGuard { + type Target = T; + + fn deref(&self) -> &T { + &self.data + } +} + +impl DerefMut for RoomGuard { + fn deref_mut(&mut self) -> &mut T { + &mut self.data + } +} + +macro_rules! id_type { + ($name:ident) => { + #[derive( + Clone, + Copy, + Debug, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + sqlx::Type, + Serialize, + Deserialize, + )] + #[sqlx(transparent)] + #[serde(transparent)] + pub struct $name(pub i32); + + impl $name { + #[allow(unused)] + pub const MAX: Self = Self(i32::MAX); + + #[allow(unused)] + pub fn from_proto(value: u64) -> Self { + Self(value as i32) + } + + #[allow(unused)] + pub fn to_proto(self) -> u64 { + self.0 as u64 + } + } + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.fmt(f) + } + } + }; +} + +id_type!(UserId); +id_type!(RoomId); +id_type!(RoomParticipantId); +id_type!(ProjectId); +id_type!(WorktreeId); diff --git a/crates/collab/src/db2/project.rs b/crates/collab/src/db2/project.rs new file mode 100644 index 0000000000..4ae0616835 --- /dev/null +++ b/crates/collab/src/db2/project.rs @@ -0,0 +1,37 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "projects")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub room_id: i32, + pub host_user_id: i32, + pub host_connection_id: i32, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::room::Entity", + from = "Column::RoomId", + to = "super::room::Column::Id" + )] + Room, + #[sea_orm(has_many = "super::worktree::Entity")] + Worktree, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Room.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Worktree.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db2/project_collaborator.rs b/crates/collab/src/db2/project_collaborator.rs new file mode 100644 index 0000000000..da567eb2c2 --- /dev/null +++ b/crates/collab/src/db2/project_collaborator.rs @@ -0,0 +1,18 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "project_collaborators")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub project_id: i32, + pub connection_id: i32, + pub user_id: i32, + pub replica_id: i32, + pub is_host: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db2/room.rs b/crates/collab/src/db2/room.rs new file mode 100644 index 0000000000..18f1d234e5 --- /dev/null +++ b/crates/collab/src/db2/room.rs @@ -0,0 +1,31 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "room_participants")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub live_kit_room: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::room_participant::Entity")] + RoomParticipant, + #[sea_orm(has_many = "super::project::Entity")] + Project, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::RoomParticipant.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Project.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db2/room_participant.rs b/crates/collab/src/db2/room_participant.rs new file mode 100644 index 0000000000..c9b7a13e07 --- /dev/null +++ b/crates/collab/src/db2/room_participant.rs @@ -0,0 +1,34 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "room_participants")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + pub room_id: i32, + pub user_id: i32, + pub answering_connection_id: Option, + pub location_kind: Option, + pub location_project_id: Option, + pub initial_project_id: Option, + pub calling_user_id: i32, + pub calling_connection_id: i32, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::room::Entity", + from = "Column::RoomId", + to = "super::room::Column::Id" + )] + Room, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Room.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db2/worktree.rs b/crates/collab/src/db2/worktree.rs new file mode 100644 index 0000000000..3a630fcfc9 --- /dev/null +++ b/crates/collab/src/db2/worktree.rs @@ -0,0 +1,33 @@ +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "worktrees")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i32, + #[sea_orm(primary_key)] + pub project_id: i32, + pub abs_path: String, + pub root_name: String, + pub visible: bool, + pub scan_id: i64, + pub is_complete: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::project::Entity", + from = "Column::ProjectId", + to = "super::project::Column::Id" + )] + Project, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Project.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index be21999a45..23af3344b5 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -5,6 +5,7 @@ pub type Result = std::result::Result; pub enum Error { Http(StatusCode, String), Database(sqlx::Error), + Database2(sea_orm::error::DbErr), Internal(anyhow::Error), } @@ -20,6 +21,12 @@ impl From for Error { } } +impl From for Error { + fn from(error: sea_orm::error::DbErr) -> Self { + Self::Database2(error) + } +} + impl From for Error { fn from(error: axum::Error) -> Self { Self::Internal(error.into()) @@ -45,6 +52,9 @@ impl IntoResponse for Error { Error::Database(error) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() } + Error::Database2(error) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() + } Error::Internal(error) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() } @@ -57,6 +67,7 @@ impl std::fmt::Debug for Error { match self { Error::Http(code, message) => (code, message).fmt(f), Error::Database(error) => error.fmt(f), + Error::Database2(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } @@ -67,6 +78,7 @@ impl std::fmt::Display for Error { match self { Error::Http(code, message) => write!(f, "{code}: {message}"), Error::Database(error) => error.fmt(f), + Error::Database2(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 019197fc46..8a2cdc980f 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -1,6 +1,7 @@ mod api; mod auth; mod db; +mod db2; mod env; mod rpc; From 049c0f8ba4d743c2cae09b8595b035c456436642 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 29 Nov 2022 12:57:51 -0500 Subject: [PATCH 075/240] Order invites by creation time --- crates/collab/src/db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1609764f6e..6aeb70a6da 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -773,6 +773,8 @@ where WHERE NOT email_confirmation_sent AND (platform_mac OR platform_unknown) + ORDER BY + created_at LIMIT $1 ", ) From b7294887c7c2f02c8730c8b662720d02a590cbb0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 29 Nov 2022 19:20:11 +0100 Subject: [PATCH 076/240] WIP: move to a non-generic test database struct Co-Authored-By: Mikayla Maki Co-Authored-By: Julia Risley --- crates/collab/Cargo.toml | 2 +- crates/collab/src/db2.rs | 180 +++++++- crates/collab/src/db2/tests.rs | 808 +++++++++++++++++++++++++++++++++ crates/collab/src/db2/user.rs | 21 + 4 files changed, 1009 insertions(+), 2 deletions(-) create mode 100644 crates/collab/src/db2/tests.rs create mode 100644 crates/collab/src/db2/user.rs diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index e10f9fe8dc..a268bdd7b0 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -42,7 +42,7 @@ sea-query-binder = { version = "0.2", features = ["sqlx-postgres"] } serde = { version = "1.0", features = ["derive", "rc"] } serde_json = "1.0" sha-1 = "0.9" -sqlx = { version = "0.6", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid"] } +sqlx = { version = "0.6", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid", "any"] } time = { version = "0.3", features = ["serde", "serde-well-known"] } tokio = { version = "1", features = ["full"] } tokio-tungstenite = "0.17" diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 687e93daae..765fea315d 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -2,6 +2,9 @@ mod project; mod project_collaborator; mod room; mod room_participant; +#[cfg(test)] +mod tests; +mod user; mod worktree; use crate::{Error, Result}; @@ -16,11 +19,18 @@ use sea_orm::{ TransactionTrait, }; use serde::{Deserialize, Serialize}; +use sqlx::migrate::{Migrate, Migration, MigrationSource}; +use sqlx::Connection; use std::ops::{Deref, DerefMut}; +use std::path::Path; +use std::time::Duration; use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; use tokio::sync::{Mutex, OwnedMutexGuard}; +pub use user::Model as User; + pub struct Database { + url: String, pool: DatabaseConnection, rooms: DashMap>>, #[cfg(test)] @@ -32,8 +42,9 @@ pub struct Database { impl Database { pub async fn new(url: &str, max_connections: u32) -> Result { let mut options = ConnectOptions::new(url.into()); - options.max_connections(max_connections); + options.min_connections(1).max_connections(max_connections); Ok(Self { + url: url.into(), pool: sea_orm::Database::connect(options).await?, rooms: DashMap::with_capacity(16384), #[cfg(test)] @@ -43,6 +54,59 @@ impl Database { }) } + pub async fn migrate( + &self, + migrations_path: &Path, + ignore_checksum_mismatch: bool, + ) -> anyhow::Result> { + let migrations = MigrationSource::resolve(migrations_path) + .await + .map_err(|err| anyhow!("failed to load migrations: {err:?}"))?; + + let mut connection = sqlx::AnyConnection::connect(&self.url).await?; + + connection.ensure_migrations_table().await?; + let applied_migrations: HashMap<_, _> = connection + .list_applied_migrations() + .await? + .into_iter() + .map(|m| (m.version, m)) + .collect(); + + let mut new_migrations = Vec::new(); + for migration in migrations { + match applied_migrations.get(&migration.version) { + Some(applied_migration) => { + if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch + { + Err(anyhow!( + "checksum mismatch for applied migration {}", + migration.description + ))?; + } + } + None => { + let elapsed = connection.apply(&migration).await?; + new_migrations.push((migration, elapsed)); + } + } + } + + Ok(new_migrations) + } + + pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { + let ids = ids.iter().map(|id| id.0).collect::>(); + self.transact(|tx| async { + let tx = tx; + Ok(user::Entity::find() + .filter(user::Column::Id.is_in(ids.iter().copied())) + .all(&tx) + .await?) + }) + .await + } + pub async fn share_project( &self, room_id: RoomId, @@ -266,6 +330,29 @@ impl DerefMut for RoomGuard { } } +#[derive(Debug, Serialize, Deserialize)] +pub struct NewUserParams { + pub github_login: String, + pub github_user_id: i32, + pub invite_count: i32, +} + +#[derive(Debug)] +pub struct NewUserResult { + pub user_id: UserId, + pub metrics_id: String, + pub inviting_user_id: Option, + pub signup_device_id: Option, +} + +fn random_invite_code() -> String { + nanoid::nanoid!(16) +} + +fn random_email_confirmation_code() -> String { + nanoid::nanoid!(64) +} + macro_rules! id_type { ($name:ident) => { #[derive( @@ -314,3 +401,94 @@ id_type!(RoomId); id_type!(RoomParticipantId); id_type!(ProjectId); id_type!(WorktreeId); + +#[cfg(test)] +pub use test::*; + +#[cfg(test)] +mod test { + use super::*; + use gpui::executor::Background; + use lazy_static::lazy_static; + use parking_lot::Mutex; + use rand::prelude::*; + use sqlx::migrate::MigrateDatabase; + use std::sync::Arc; + + pub struct TestDb { + pub db: Option>, + } + + impl TestDb { + pub fn sqlite(background: Arc) -> Self { + let mut rng = StdRng::from_entropy(); + let url = format!("sqlite://file:zed-test-{}?mode=memory", rng.gen::()); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + + let mut db = runtime.block_on(async { + let db = Database::new(&url, 5).await.unwrap(); + let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations.sqlite"); + db.migrate(migrations_path.as_ref(), false).await.unwrap(); + db + }); + + db.background = Some(background); + db.runtime = Some(runtime); + + Self { + db: Some(Arc::new(db)), + } + } + + pub fn postgres(background: Arc) -> Self { + lazy_static! { + static ref LOCK: Mutex<()> = Mutex::new(()); + } + + let _guard = LOCK.lock(); + let mut rng = StdRng::from_entropy(); + let url = format!( + "postgres://postgres@localhost/zed-test-{}", + rng.gen::() + ); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + + let mut db = runtime.block_on(async { + sqlx::Postgres::create_database(&url) + .await + .expect("failed to create test db"); + let db = Database::new(&url, 5).await.unwrap(); + let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); + db.migrate(Path::new(migrations_path), false).await.unwrap(); + db + }); + + db.background = Some(background); + db.runtime = Some(runtime); + + Self { + db: Some(Arc::new(db)), + } + } + + pub fn db(&self) -> &Arc { + self.db.as_ref().unwrap() + } + } + + // TODO: Implement drop + // impl Drop for PostgresTestDb { + // fn drop(&mut self) { + // let db = self.db.take().unwrap(); + // db.teardown(&self.url); + // } + // } +} diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs new file mode 100644 index 0000000000..6d88785938 --- /dev/null +++ b/crates/collab/src/db2/tests.rs @@ -0,0 +1,808 @@ +use super::*; +use gpui::executor::{Background, Deterministic}; +use std::sync::Arc; + +macro_rules! test_both_dbs { + ($postgres_test_name:ident, $sqlite_test_name:ident, $db:ident, $body:block) => { + #[gpui::test] + async fn $postgres_test_name() { + let test_db = TestDb::postgres(Deterministic::new(0).build_background()); + let $db = test_db.db(); + $body + } + + #[gpui::test] + async fn $sqlite_test_name() { + let test_db = TestDb::sqlite(Deterministic::new(0).build_background()); + let $db = test_db.db(); + $body + } + }; +} + +test_both_dbs!( + test_get_users_by_ids_postgres, + test_get_users_by_ids_sqlite, + db, + { + let mut user_ids = Vec::new(); + for i in 1..=4 { + user_ids.push( + db.create_user( + &format!("user{i}@example.com"), + false, + NewUserParams { + github_login: format!("user{i}"), + github_user_id: i, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id, + ); + } + + assert_eq!( + db.get_users_by_ids(user_ids.clone()).await.unwrap(), + vec![ + User { + id: user_ids[0], + github_login: "user1".to_string(), + github_user_id: Some(1), + email_address: Some("user1@example.com".to_string()), + admin: false, + ..Default::default() + }, + User { + id: user_ids[1], + github_login: "user2".to_string(), + github_user_id: Some(2), + email_address: Some("user2@example.com".to_string()), + admin: false, + ..Default::default() + }, + User { + id: user_ids[2], + github_login: "user3".to_string(), + github_user_id: Some(3), + email_address: Some("user3@example.com".to_string()), + admin: false, + ..Default::default() + }, + User { + id: user_ids[3], + github_login: "user4".to_string(), + github_user_id: Some(4), + email_address: Some("user4@example.com".to_string()), + admin: false, + ..Default::default() + } + ] + ); + } +); + +test_both_dbs!( + test_get_user_by_github_account_postgres, + test_get_user_by_github_account_sqlite, + db, + { + let user_id1 = db + .create_user( + "user1@example.com", + false, + NewUserParams { + github_login: "login1".into(), + github_user_id: 101, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + let user_id2 = db + .create_user( + "user2@example.com", + false, + NewUserParams { + github_login: "login2".into(), + github_user_id: 102, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + + let user = db + .get_user_by_github_account("login1", None) + .await + .unwrap() + .unwrap(); + assert_eq!(user.id, user_id1); + assert_eq!(&user.github_login, "login1"); + assert_eq!(user.github_user_id, Some(101)); + + assert!(db + .get_user_by_github_account("non-existent-login", None) + .await + .unwrap() + .is_none()); + + let user = db + .get_user_by_github_account("the-new-login2", Some(102)) + .await + .unwrap() + .unwrap(); + assert_eq!(user.id, user_id2); + assert_eq!(&user.github_login, "the-new-login2"); + assert_eq!(user.github_user_id, Some(102)); + } +); + +test_both_dbs!( + test_create_access_tokens_postgres, + test_create_access_tokens_sqlite, + db, + { + let user = db + .create_user( + "u1@example.com", + false, + NewUserParams { + github_login: "u1".into(), + github_user_id: 1, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + + db.create_access_token_hash(user, "h1", 3).await.unwrap(); + db.create_access_token_hash(user, "h2", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h2".to_string(), "h1".to_string()] + ); + + db.create_access_token_hash(user, "h3", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h3".to_string(), "h2".to_string(), "h1".to_string(),] + ); + + db.create_access_token_hash(user, "h4", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h4".to_string(), "h3".to_string(), "h2".to_string(),] + ); + + db.create_access_token_hash(user, "h5", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h5".to_string(), "h4".to_string(), "h3".to_string()] + ); + } +); + +test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { + let mut user_ids = Vec::new(); + for i in 0..3 { + user_ids.push( + db.create_user( + &format!("user{i}@example.com"), + false, + NewUserParams { + github_login: format!("user{i}"), + github_user_id: i, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id, + ); + } + + let user_1 = user_ids[0]; + let user_2 = user_ids[1]; + let user_3 = user_ids[2]; + + // User starts with no contacts + assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); + + // User requests a contact. Both users see the pending request. + db.send_contact_request(user_1, user_2).await.unwrap(); + assert!(!db.has_contact(user_1, user_2).await.unwrap()); + assert!(!db.has_contact(user_2, user_1).await.unwrap()); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Outgoing { user_id: user_2 }], + ); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Incoming { + user_id: user_1, + should_notify: true + }] + ); + + // User 2 dismisses the contact request notification without accepting or rejecting. + // We shouldn't notify them again. + db.dismiss_contact_notification(user_1, user_2) + .await + .unwrap_err(); + db.dismiss_contact_notification(user_2, user_1) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Incoming { + user_id: user_1, + should_notify: false + }] + ); + + // User can't accept their own contact request + db.respond_to_contact_request(user_1, user_2, true) + .await + .unwrap_err(); + + // User accepts a contact request. Both users see the contact. + db.respond_to_contact_request(user_2, user_1, true) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: true, + busy: false, + }], + ); + assert!(db.has_contact(user_1, user_2).await.unwrap()); + assert!(db.has_contact(user_2, user_1).await.unwrap()); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }] + ); + + // Users cannot re-request existing contacts. + db.send_contact_request(user_1, user_2).await.unwrap_err(); + db.send_contact_request(user_2, user_1).await.unwrap_err(); + + // Users can't dismiss notifications of them accepting other users' requests. + db.dismiss_contact_notification(user_2, user_1) + .await + .unwrap_err(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: true, + busy: false, + }] + ); + + // Users can dismiss notifications of other users accepting their requests. + db.dismiss_contact_notification(user_1, user_2) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: false, + busy: false, + }] + ); + + // Users send each other concurrent contact requests and + // see that they are immediately accepted. + db.send_contact_request(user_1, user_3).await.unwrap(); + db.send_contact_request(user_3, user_1).await.unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[ + Contact::Accepted { + user_id: user_2, + should_notify: false, + busy: false, + }, + Contact::Accepted { + user_id: user_3, + should_notify: false, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user_3).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }], + ); + + // User declines a contact request. Both users see that it is gone. + db.send_contact_request(user_2, user_3).await.unwrap(); + db.respond_to_contact_request(user_3, user_2, false) + .await + .unwrap(); + assert!(!db.has_contact(user_2, user_3).await.unwrap()); + assert!(!db.has_contact(user_3, user_2).await.unwrap()); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_contacts(user_3).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }], + ); +}); + +test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { + let NewUserResult { + user_id: user1, + metrics_id: metrics_id1, + .. + } = db + .create_user( + "person1@example.com", + false, + NewUserParams { + github_login: "person1".into(), + github_user_id: 101, + invite_count: 5, + }, + ) + .await + .unwrap(); + let NewUserResult { + user_id: user2, + metrics_id: metrics_id2, + .. + } = db + .create_user( + "person2@example.com", + false, + NewUserParams { + github_login: "person2".into(), + github_user_id: 102, + invite_count: 5, + }, + ) + .await + .unwrap(); + + assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); + assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); + assert_eq!(metrics_id1.len(), 36); + assert_eq!(metrics_id2.len(), 36); + assert_ne!(metrics_id1, metrics_id2); +}); + +#[test] +fn test_fuzzy_like_string() { + assert_eq!(DefaultDb::fuzzy_like_string("abcd"), "%a%b%c%d%"); + assert_eq!(DefaultDb::fuzzy_like_string("x y"), "%x%y%"); + assert_eq!(DefaultDb::fuzzy_like_string(" z "), "%z%"); +} + +#[gpui::test] +async fn test_fuzzy_search_users() { + let test_db = PostgresTestDb::new(build_background_executor()); + let db = test_db.db(); + for (i, github_login) in [ + "California", + "colorado", + "oregon", + "washington", + "florida", + "delaware", + "rhode-island", + ] + .into_iter() + .enumerate() + { + db.create_user( + &format!("{github_login}@example.com"), + false, + NewUserParams { + github_login: github_login.into(), + github_user_id: i as i32, + invite_count: 0, + }, + ) + .await + .unwrap(); + } + + assert_eq!( + fuzzy_search_user_names(db, "clr").await, + &["colorado", "California"] + ); + assert_eq!( + fuzzy_search_user_names(db, "ro").await, + &["rhode-island", "colorado", "oregon"], + ); + + async fn fuzzy_search_user_names(db: &Db, query: &str) -> Vec { + db.fuzzy_search_users(query, 10) + .await + .unwrap() + .into_iter() + .map(|user| user.github_login) + .collect::>() + } +} + +#[gpui::test] +async fn test_invite_codes() { + let test_db = PostgresTestDb::new(build_background_executor()); + let db = test_db.db(); + + let NewUserResult { user_id: user1, .. } = db + .create_user( + "user1@example.com", + false, + NewUserParams { + github_login: "user1".into(), + github_user_id: 0, + invite_count: 0, + }, + ) + .await + .unwrap(); + + // Initially, user 1 has no invite code + assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); + + // Setting invite count to 0 when no code is assigned does not assign a new code + db.set_invite_count_for_user(user1, 0).await.unwrap(); + assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); + + // User 1 creates an invite code that can be used twice. + db.set_invite_count_for_user(user1, 2).await.unwrap(); + let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 2); + + // User 2 redeems the invite code and becomes a contact of user 1. + let user2_invite = db + .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) + .await + .unwrap(); + let NewUserResult { + user_id: user2, + inviting_user_id, + signup_device_id, + metrics_id, + } = db + .create_user_from_invite( + &user2_invite, + NewUserParams { + github_login: "user2".into(), + github_user_id: 2, + invite_count: 7, + }, + ) + .await + .unwrap() + .unwrap(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); + assert_eq!(inviting_user_id, Some(user1)); + assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); + assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }] + ); + assert_eq!( + db.get_contacts(user2).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, + 7 + ); + + // User 3 redeems the invite code and becomes a contact of user 1. + let user3_invite = db + .create_invite_from_code(&invite_code, "user3@example.com", None) + .await + .unwrap(); + let NewUserResult { + user_id: user3, + inviting_user_id, + signup_device_id, + .. + } = db + .create_user_from_invite( + &user3_invite, + NewUserParams { + github_login: "user-3".into(), + github_user_id: 3, + invite_count: 3, + }, + ) + .await + .unwrap() + .unwrap(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 0); + assert_eq!(inviting_user_id, Some(user1)); + assert!(signup_device_id.is_none()); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [ + Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user3, + should_notify: true, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user3).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, + 3 + ); + + // Trying to reedem the code for the third time results in an error. + db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) + .await + .unwrap_err(); + + // Invite count can be updated after the code has been created. + db.set_invite_count_for_user(user1, 2).await.unwrap(); + let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 + assert_eq!(invite_count, 2); + + // User 4 can now redeem the invite code and becomes a contact of user 1. + let user4_invite = db + .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) + .await + .unwrap(); + let user4 = db + .create_user_from_invite( + &user4_invite, + NewUserParams { + github_login: "user-4".into(), + github_user_id: 4, + invite_count: 5, + }, + ) + .await + .unwrap() + .unwrap() + .user_id; + + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [ + Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user3, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user4, + should_notify: true, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user4).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, + 5 + ); + + // An existing user cannot redeem invite codes. + db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) + .await + .unwrap_err(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); +} + +#[gpui::test] +async fn test_signups() { + let test_db = PostgresTestDb::new(build_background_executor()); + let db = test_db.db(); + + // people sign up on the waitlist + for i in 0..8 { + db.create_signup(Signup { + email_address: format!("person-{i}@example.com"), + platform_mac: true, + platform_linux: i % 2 == 0, + platform_windows: i % 4 == 0, + editor_features: vec!["speed".into()], + programming_languages: vec!["rust".into(), "c".into()], + device_id: Some(format!("device_id_{i}")), + }) + .await + .unwrap(); + } + + assert_eq!( + db.get_waitlist_summary().await.unwrap(), + WaitlistSummary { + count: 8, + mac_count: 8, + linux_count: 4, + windows_count: 2, + unknown_count: 0, + } + ); + + // retrieve the next batch of signup emails to send + let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); + let addresses = signups_batch1 + .iter() + .map(|s| &s.email_address) + .collect::>(); + assert_eq!( + addresses, + &[ + "person-0@example.com", + "person-1@example.com", + "person-2@example.com" + ] + ); + assert_ne!( + signups_batch1[0].email_confirmation_code, + signups_batch1[1].email_confirmation_code + ); + + // the waitlist isn't updated until we record that the emails + // were successfully sent. + let signups_batch = db.get_unsent_invites(3).await.unwrap(); + assert_eq!(signups_batch, signups_batch1); + + // once the emails go out, we can retrieve the next batch + // of signups. + db.record_sent_invites(&signups_batch1).await.unwrap(); + let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); + let addresses = signups_batch2 + .iter() + .map(|s| &s.email_address) + .collect::>(); + assert_eq!( + addresses, + &[ + "person-3@example.com", + "person-4@example.com", + "person-5@example.com" + ] + ); + + // the sent invites are excluded from the summary. + assert_eq!( + db.get_waitlist_summary().await.unwrap(), + WaitlistSummary { + count: 5, + mac_count: 5, + linux_count: 2, + windows_count: 1, + unknown_count: 0, + } + ); + + // user completes the signup process by providing their + // github account. + let NewUserResult { + user_id, + inviting_user_id, + signup_device_id, + .. + } = db + .create_user_from_invite( + &Invite { + email_address: signups_batch1[0].email_address.clone(), + email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), + }, + NewUserParams { + github_login: "person-0".into(), + github_user_id: 0, + invite_count: 5, + }, + ) + .await + .unwrap() + .unwrap(); + let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); + assert!(inviting_user_id.is_none()); + assert_eq!(user.github_login, "person-0"); + assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); + assert_eq!(user.invite_count, 5); + assert_eq!(signup_device_id.unwrap(), "device_id_0"); + + // cannot redeem the same signup again. + assert!(db + .create_user_from_invite( + &Invite { + email_address: signups_batch1[0].email_address.clone(), + email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), + }, + NewUserParams { + github_login: "some-other-github_account".into(), + github_user_id: 1, + invite_count: 5, + }, + ) + .await + .unwrap() + .is_none()); + + // cannot redeem a signup with the wrong confirmation code. + db.create_user_from_invite( + &Invite { + email_address: signups_batch1[1].email_address.clone(), + email_confirmation_code: "the-wrong-code".to_string(), + }, + NewUserParams { + github_login: "person-1".into(), + github_user_id: 2, + invite_count: 5, + }, + ) + .await + .unwrap_err(); +} + +fn build_background_executor() -> Arc { + Deterministic::new(0).build_background() +} diff --git a/crates/collab/src/db2/user.rs b/crates/collab/src/db2/user.rs new file mode 100644 index 0000000000..de865db679 --- /dev/null +++ b/crates/collab/src/db2/user.rs @@ -0,0 +1,21 @@ +use super::UserId; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "users")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: UserId, + pub github_login: String, + pub github_user_id: Option, + pub email_address: Option, + pub admin: bool, + pub invite_code: Option, + pub invite_count: i32, + pub connected_once: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} From d9a892a423362c8f85157c94255e4b552b25a0e2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 12:06:25 +0100 Subject: [PATCH 077/240] Make some db tests pass against the new sea-orm implementation --- .../20221109000000_test_schema.sql | 2 +- crates/collab/src/db2.rs | 164 +- crates/collab/src/db2/project.rs | 7 +- crates/collab/src/db2/project_collaborator.rs | 7 +- crates/collab/src/db2/room.rs | 3 +- crates/collab/src/db2/room_participant.rs | 13 +- crates/collab/src/db2/tests.rs | 1357 +++++++++-------- crates/collab/src/db2/user.rs | 3 +- crates/collab/src/db2/worktree.rs | 4 +- 9 files changed, 849 insertions(+), 711 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 65bf00e74c..aeb6b7f720 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -8,7 +8,7 @@ CREATE TABLE "users" ( "inviter_id" INTEGER REFERENCES users (id), "connected_once" BOOLEAN NOT NULL DEFAULT false, "created_at" TIMESTAMP NOT NULL DEFAULT now, - "metrics_id" VARCHAR(255), + "metrics_id" TEXT, "github_user_id" INTEGER ); CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login"); diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 765fea315d..47ddf8cd22 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -18,6 +18,7 @@ use sea_orm::{ entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, TransactionTrait, }; +use sea_query::OnConflict; use serde::{Deserialize, Serialize}; use sqlx::migrate::{Migrate, Migration, MigrationSource}; use sqlx::Connection; @@ -42,7 +43,7 @@ pub struct Database { impl Database { pub async fn new(url: &str, max_connections: u32) -> Result { let mut options = ConnectOptions::new(url.into()); - options.min_connections(1).max_connections(max_connections); + options.max_connections(max_connections); Ok(Self { url: url.into(), pool: sea_orm::Database::connect(options).await?, @@ -58,7 +59,7 @@ impl Database { &self, migrations_path: &Path, ignore_checksum_mismatch: bool, - ) -> anyhow::Result> { + ) -> anyhow::Result<(sqlx::AnyConnection, Vec<(Migration, Duration)>)> { let migrations = MigrationSource::resolve(migrations_path) .await .map_err(|err| anyhow!("failed to load migrations: {err:?}"))?; @@ -92,11 +93,45 @@ impl Database { } } - Ok(new_migrations) + Ok((connection, new_migrations)) + } + + pub async fn create_user( + &self, + email_address: &str, + admin: bool, + params: NewUserParams, + ) -> Result { + self.transact(|tx| async { + let user = user::Entity::insert(user::ActiveModel { + email_address: ActiveValue::set(Some(email_address.into())), + github_login: ActiveValue::set(params.github_login.clone()), + github_user_id: ActiveValue::set(Some(params.github_user_id)), + admin: ActiveValue::set(admin), + metrics_id: ActiveValue::set(Uuid::new_v4()), + ..Default::default() + }) + .on_conflict( + OnConflict::column(user::Column::GithubLogin) + .update_column(user::Column::GithubLogin) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + + tx.commit().await?; + + Ok(NewUserResult { + user_id: user.id, + metrics_id: user.metrics_id.to_string(), + signup_device_id: None, + inviting_user_id: None, + }) + }) + .await } pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - let ids = ids.iter().map(|id| id.0).collect::>(); self.transact(|tx| async { let tx = tx; Ok(user::Entity::find() @@ -119,7 +154,7 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("could not find participant"))?; - if participant.room_id != room_id.0 { + if participant.room_id != room_id { return Err(anyhow!("shared project on unexpected room"))?; } @@ -156,14 +191,14 @@ impl Database { .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, (ProjectId(project.id), room)) + self.commit_room_transaction(room_id, tx, (project.id, room)) .await }) .await } async fn get_room(&self, room_id: RoomId, tx: &DatabaseTransaction) -> Result { - let db_room = room::Entity::find_by_id(room_id.0) + let db_room = room::Entity::find_by_id(room_id) .one(tx) .await? .ok_or_else(|| anyhow!("could not find room"))?; @@ -184,7 +219,7 @@ impl Database { (Some(0), Some(project_id)) => { Some(proto::participant_location::Variant::SharedProject( proto::participant_location::SharedProject { - id: project_id as u64, + id: project_id.to_proto(), }, )) } @@ -198,7 +233,7 @@ impl Database { participants.insert( answering_connection_id, proto::Participant { - user_id: db_participant.user_id as u64, + user_id: db_participant.user_id.to_proto(), peer_id: answering_connection_id as u32, projects: Default::default(), location: Some(proto::ParticipantLocation { variant: location }), @@ -206,9 +241,9 @@ impl Database { ); } else { pending_participants.push(proto::PendingParticipant { - user_id: db_participant.user_id as u64, - calling_user_id: db_participant.calling_user_id as u64, - initial_project_id: db_participant.initial_project_id.map(|id| id as u64), + user_id: db_participant.user_id.to_proto(), + calling_user_id: db_participant.calling_user_id.to_proto(), + initial_project_id: db_participant.initial_project_id.map(|id| id.to_proto()), }); } } @@ -225,12 +260,12 @@ impl Database { let project = if let Some(project) = participant .projects .iter_mut() - .find(|project| project.id as i32 == db_project.id) + .find(|project| project.id == db_project.id.to_proto()) { project } else { participant.projects.push(proto::ParticipantProject { - id: db_project.id as u64, + id: db_project.id.to_proto(), worktree_root_names: Default::default(), }); participant.projects.last_mut().unwrap() @@ -243,7 +278,7 @@ impl Database { } Ok(proto::Room { - id: db_room.id as u64, + id: db_room.id.to_proto(), live_kit_room: db_room.live_kit_room, participants: participants.into_values().collect(), pending_participants, @@ -393,6 +428,84 @@ macro_rules! id_type { self.0.fmt(f) } } + + impl From<$name> for sea_query::Value { + fn from(value: $name) -> Self { + sea_query::Value::Int(Some(value.0)) + } + } + + impl sea_orm::TryGetable for $name { + fn try_get( + res: &sea_orm::QueryResult, + pre: &str, + col: &str, + ) -> Result { + Ok(Self(i32::try_get(res, pre, col)?)) + } + } + + impl sea_query::ValueType for $name { + fn try_from(v: Value) -> Result { + match v { + Value::TinyInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::SmallInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::Int(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::BigInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::TinyUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::SmallUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::Unsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::BigUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + _ => Err(sea_query::ValueTypeErr), + } + } + + fn type_name() -> String { + stringify!($name).into() + } + + fn array_type() -> sea_query::ArrayType { + sea_query::ArrayType::Int + } + + fn column_type() -> sea_query::ColumnType { + sea_query::ColumnType::Integer(None) + } + } + + impl sea_orm::TryFromU64 for $name { + fn try_from_u64(n: u64) -> Result { + Ok(Self(n.try_into().map_err(|_| { + DbErr::ConvertFromU64(concat!( + "error converting ", + stringify!($name), + " to u64" + )) + })?)) + } + } + + impl sea_query::Nullable for $name { + fn null() -> Value { + Value::Int(None) + } + } }; } @@ -400,6 +513,7 @@ id_type!(UserId); id_type!(RoomId); id_type!(RoomParticipantId); id_type!(ProjectId); +id_type!(ProjectCollaboratorId); id_type!(WorktreeId); #[cfg(test)] @@ -412,17 +526,18 @@ mod test { use lazy_static::lazy_static; use parking_lot::Mutex; use rand::prelude::*; + use sea_orm::ConnectionTrait; use sqlx::migrate::MigrateDatabase; use std::sync::Arc; pub struct TestDb { pub db: Option>, + pub connection: Option, } impl TestDb { pub fn sqlite(background: Arc) -> Self { - let mut rng = StdRng::from_entropy(); - let url = format!("sqlite://file:zed-test-{}?mode=memory", rng.gen::()); + let url = format!("sqlite::memory:"); let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() .enable_time() @@ -431,8 +546,17 @@ mod test { let mut db = runtime.block_on(async { let db = Database::new(&url, 5).await.unwrap(); - let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations.sqlite"); - db.migrate(migrations_path.as_ref(), false).await.unwrap(); + let sql = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/migrations.sqlite/20221109000000_test_schema.sql" + )); + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + sql.into(), + )) + .await + .unwrap(); db }); @@ -441,6 +565,7 @@ mod test { Self { db: Some(Arc::new(db)), + connection: None, } } @@ -476,6 +601,7 @@ mod test { Self { db: Some(Arc::new(db)), + connection: None, } } diff --git a/crates/collab/src/db2/project.rs b/crates/collab/src/db2/project.rs index 4ae0616835..21ee0b27d1 100644 --- a/crates/collab/src/db2/project.rs +++ b/crates/collab/src/db2/project.rs @@ -1,12 +1,13 @@ +use super::{ProjectId, RoomId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "projects")] pub struct Model { #[sea_orm(primary_key)] - pub id: i32, - pub room_id: i32, - pub host_user_id: i32, + pub id: ProjectId, + pub room_id: RoomId, + pub host_user_id: UserId, pub host_connection_id: i32, } diff --git a/crates/collab/src/db2/project_collaborator.rs b/crates/collab/src/db2/project_collaborator.rs index da567eb2c2..3e572fe5d4 100644 --- a/crates/collab/src/db2/project_collaborator.rs +++ b/crates/collab/src/db2/project_collaborator.rs @@ -1,13 +1,14 @@ +use super::{ProjectCollaboratorId, ProjectId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "project_collaborators")] pub struct Model { #[sea_orm(primary_key)] - pub id: i32, - pub project_id: i32, + pub id: ProjectCollaboratorId, + pub project_id: ProjectId, pub connection_id: i32, - pub user_id: i32, + pub user_id: UserId, pub replica_id: i32, pub is_host: bool, } diff --git a/crates/collab/src/db2/room.rs b/crates/collab/src/db2/room.rs index 18f1d234e5..b57e612d46 100644 --- a/crates/collab/src/db2/room.rs +++ b/crates/collab/src/db2/room.rs @@ -1,10 +1,11 @@ +use super::RoomId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "room_participants")] pub struct Model { #[sea_orm(primary_key)] - pub id: i32, + pub id: RoomId, pub live_kit_room: String, } diff --git a/crates/collab/src/db2/room_participant.rs b/crates/collab/src/db2/room_participant.rs index c9b7a13e07..4fabfc3068 100644 --- a/crates/collab/src/db2/room_participant.rs +++ b/crates/collab/src/db2/room_participant.rs @@ -1,17 +1,18 @@ +use super::{ProjectId, RoomId, RoomParticipantId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "room_participants")] pub struct Model { #[sea_orm(primary_key)] - pub id: i32, - pub room_id: i32, - pub user_id: i32, + pub id: RoomParticipantId, + pub room_id: RoomId, + pub user_id: UserId, pub answering_connection_id: Option, pub location_kind: Option, - pub location_project_id: Option, - pub initial_project_id: Option, - pub calling_user_id: i32, + pub location_project_id: Option, + pub initial_project_id: Option, + pub calling_user_id: UserId, pub calling_connection_id: i32, } diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 6d88785938..a5bac24140 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -26,9 +26,10 @@ test_both_dbs!( db, { let mut user_ids = Vec::new(); + let mut user_metric_ids = Vec::new(); for i in 1..=4 { - user_ids.push( - db.create_user( + let user = db + .create_user( &format!("user{i}@example.com"), false, NewUserParams { @@ -38,9 +39,9 @@ test_both_dbs!( }, ) .await - .unwrap() - .user_id, - ); + .unwrap(); + user_ids.push(user.user_id); + user_metric_ids.push(user.metrics_id); } assert_eq!( @@ -52,6 +53,7 @@ test_both_dbs!( github_user_id: Some(1), email_address: Some("user1@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[0].parse().unwrap(), ..Default::default() }, User { @@ -60,6 +62,7 @@ test_both_dbs!( github_user_id: Some(2), email_address: Some("user2@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[1].parse().unwrap(), ..Default::default() }, User { @@ -68,6 +71,7 @@ test_both_dbs!( github_user_id: Some(3), email_address: Some("user3@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[2].parse().unwrap(), ..Default::default() }, User { @@ -76,6 +80,7 @@ test_both_dbs!( github_user_id: Some(4), email_address: Some("user4@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[3].parse().unwrap(), ..Default::default() } ] @@ -83,725 +88,725 @@ test_both_dbs!( } ); -test_both_dbs!( - test_get_user_by_github_account_postgres, - test_get_user_by_github_account_sqlite, - db, - { - let user_id1 = db - .create_user( - "user1@example.com", - false, - NewUserParams { - github_login: "login1".into(), - github_user_id: 101, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; - let user_id2 = db - .create_user( - "user2@example.com", - false, - NewUserParams { - github_login: "login2".into(), - github_user_id: 102, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; +// test_both_dbs!( +// test_get_user_by_github_account_postgres, +// test_get_user_by_github_account_sqlite, +// db, +// { +// let user_id1 = db +// .create_user( +// "user1@example.com", +// false, +// NewUserParams { +// github_login: "login1".into(), +// github_user_id: 101, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap() +// .user_id; +// let user_id2 = db +// .create_user( +// "user2@example.com", +// false, +// NewUserParams { +// github_login: "login2".into(), +// github_user_id: 102, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap() +// .user_id; - let user = db - .get_user_by_github_account("login1", None) - .await - .unwrap() - .unwrap(); - assert_eq!(user.id, user_id1); - assert_eq!(&user.github_login, "login1"); - assert_eq!(user.github_user_id, Some(101)); +// let user = db +// .get_user_by_github_account("login1", None) +// .await +// .unwrap() +// .unwrap(); +// assert_eq!(user.id, user_id1); +// assert_eq!(&user.github_login, "login1"); +// assert_eq!(user.github_user_id, Some(101)); - assert!(db - .get_user_by_github_account("non-existent-login", None) - .await - .unwrap() - .is_none()); +// assert!(db +// .get_user_by_github_account("non-existent-login", None) +// .await +// .unwrap() +// .is_none()); - let user = db - .get_user_by_github_account("the-new-login2", Some(102)) - .await - .unwrap() - .unwrap(); - assert_eq!(user.id, user_id2); - assert_eq!(&user.github_login, "the-new-login2"); - assert_eq!(user.github_user_id, Some(102)); - } -); +// let user = db +// .get_user_by_github_account("the-new-login2", Some(102)) +// .await +// .unwrap() +// .unwrap(); +// assert_eq!(user.id, user_id2); +// assert_eq!(&user.github_login, "the-new-login2"); +// assert_eq!(user.github_user_id, Some(102)); +// } +// ); -test_both_dbs!( - test_create_access_tokens_postgres, - test_create_access_tokens_sqlite, - db, - { - let user = db - .create_user( - "u1@example.com", - false, - NewUserParams { - github_login: "u1".into(), - github_user_id: 1, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; +// test_both_dbs!( +// test_create_access_tokens_postgres, +// test_create_access_tokens_sqlite, +// db, +// { +// let user = db +// .create_user( +// "u1@example.com", +// false, +// NewUserParams { +// github_login: "u1".into(), +// github_user_id: 1, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap() +// .user_id; - db.create_access_token_hash(user, "h1", 3).await.unwrap(); - db.create_access_token_hash(user, "h2", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h2".to_string(), "h1".to_string()] - ); +// db.create_access_token_hash(user, "h1", 3).await.unwrap(); +// db.create_access_token_hash(user, "h2", 3).await.unwrap(); +// assert_eq!( +// db.get_access_token_hashes(user).await.unwrap(), +// &["h2".to_string(), "h1".to_string()] +// ); - db.create_access_token_hash(user, "h3", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h3".to_string(), "h2".to_string(), "h1".to_string(),] - ); +// db.create_access_token_hash(user, "h3", 3).await.unwrap(); +// assert_eq!( +// db.get_access_token_hashes(user).await.unwrap(), +// &["h3".to_string(), "h2".to_string(), "h1".to_string(),] +// ); - db.create_access_token_hash(user, "h4", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h4".to_string(), "h3".to_string(), "h2".to_string(),] - ); +// db.create_access_token_hash(user, "h4", 3).await.unwrap(); +// assert_eq!( +// db.get_access_token_hashes(user).await.unwrap(), +// &["h4".to_string(), "h3".to_string(), "h2".to_string(),] +// ); - db.create_access_token_hash(user, "h5", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h5".to_string(), "h4".to_string(), "h3".to_string()] - ); - } -); +// db.create_access_token_hash(user, "h5", 3).await.unwrap(); +// assert_eq!( +// db.get_access_token_hashes(user).await.unwrap(), +// &["h5".to_string(), "h4".to_string(), "h3".to_string()] +// ); +// } +// ); -test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { - let mut user_ids = Vec::new(); - for i in 0..3 { - user_ids.push( - db.create_user( - &format!("user{i}@example.com"), - false, - NewUserParams { - github_login: format!("user{i}"), - github_user_id: i, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id, - ); - } +// test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { +// let mut user_ids = Vec::new(); +// for i in 0..3 { +// user_ids.push( +// db.create_user( +// &format!("user{i}@example.com"), +// false, +// NewUserParams { +// github_login: format!("user{i}"), +// github_user_id: i, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap() +// .user_id, +// ); +// } - let user_1 = user_ids[0]; - let user_2 = user_ids[1]; - let user_3 = user_ids[2]; +// let user_1 = user_ids[0]; +// let user_2 = user_ids[1]; +// let user_3 = user_ids[2]; - // User starts with no contacts - assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); +// // User starts with no contacts +// assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); - // User requests a contact. Both users see the pending request. - db.send_contact_request(user_1, user_2).await.unwrap(); - assert!(!db.has_contact(user_1, user_2).await.unwrap()); - assert!(!db.has_contact(user_2, user_1).await.unwrap()); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Outgoing { user_id: user_2 }], - ); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Incoming { - user_id: user_1, - should_notify: true - }] - ); +// // User requests a contact. Both users see the pending request. +// db.send_contact_request(user_1, user_2).await.unwrap(); +// assert!(!db.has_contact(user_1, user_2).await.unwrap()); +// assert!(!db.has_contact(user_2, user_1).await.unwrap()); +// assert_eq!( +// db.get_contacts(user_1).await.unwrap(), +// &[Contact::Outgoing { user_id: user_2 }], +// ); +// assert_eq!( +// db.get_contacts(user_2).await.unwrap(), +// &[Contact::Incoming { +// user_id: user_1, +// should_notify: true +// }] +// ); - // User 2 dismisses the contact request notification without accepting or rejecting. - // We shouldn't notify them again. - db.dismiss_contact_notification(user_1, user_2) - .await - .unwrap_err(); - db.dismiss_contact_notification(user_2, user_1) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Incoming { - user_id: user_1, - should_notify: false - }] - ); +// // User 2 dismisses the contact request notification without accepting or rejecting. +// // We shouldn't notify them again. +// db.dismiss_contact_notification(user_1, user_2) +// .await +// .unwrap_err(); +// db.dismiss_contact_notification(user_2, user_1) +// .await +// .unwrap(); +// assert_eq!( +// db.get_contacts(user_2).await.unwrap(), +// &[Contact::Incoming { +// user_id: user_1, +// should_notify: false +// }] +// ); - // User can't accept their own contact request - db.respond_to_contact_request(user_1, user_2, true) - .await - .unwrap_err(); +// // User can't accept their own contact request +// db.respond_to_contact_request(user_1, user_2, true) +// .await +// .unwrap_err(); - // User accepts a contact request. Both users see the contact. - db.respond_to_contact_request(user_2, user_1, true) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: true, - busy: false, - }], - ); - assert!(db.has_contact(user_1, user_2).await.unwrap()); - assert!(db.has_contact(user_2, user_1).await.unwrap()); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }] - ); +// // User accepts a contact request. Both users see the contact. +// db.respond_to_contact_request(user_2, user_1, true) +// .await +// .unwrap(); +// assert_eq!( +// db.get_contacts(user_1).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_2, +// should_notify: true, +// busy: false, +// }], +// ); +// assert!(db.has_contact(user_1, user_2).await.unwrap()); +// assert!(db.has_contact(user_2, user_1).await.unwrap()); +// assert_eq!( +// db.get_contacts(user_2).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_1, +// should_notify: false, +// busy: false, +// }] +// ); - // Users cannot re-request existing contacts. - db.send_contact_request(user_1, user_2).await.unwrap_err(); - db.send_contact_request(user_2, user_1).await.unwrap_err(); +// // Users cannot re-request existing contacts. +// db.send_contact_request(user_1, user_2).await.unwrap_err(); +// db.send_contact_request(user_2, user_1).await.unwrap_err(); - // Users can't dismiss notifications of them accepting other users' requests. - db.dismiss_contact_notification(user_2, user_1) - .await - .unwrap_err(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: true, - busy: false, - }] - ); +// // Users can't dismiss notifications of them accepting other users' requests. +// db.dismiss_contact_notification(user_2, user_1) +// .await +// .unwrap_err(); +// assert_eq!( +// db.get_contacts(user_1).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_2, +// should_notify: true, +// busy: false, +// }] +// ); - // Users can dismiss notifications of other users accepting their requests. - db.dismiss_contact_notification(user_1, user_2) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: false, - busy: false, - }] - ); +// // Users can dismiss notifications of other users accepting their requests. +// db.dismiss_contact_notification(user_1, user_2) +// .await +// .unwrap(); +// assert_eq!( +// db.get_contacts(user_1).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_2, +// should_notify: false, +// busy: false, +// }] +// ); - // Users send each other concurrent contact requests and - // see that they are immediately accepted. - db.send_contact_request(user_1, user_3).await.unwrap(); - db.send_contact_request(user_3, user_1).await.unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[ - Contact::Accepted { - user_id: user_2, - should_notify: false, - busy: false, - }, - Contact::Accepted { - user_id: user_3, - should_notify: false, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user_3).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }], - ); +// // Users send each other concurrent contact requests and +// // see that they are immediately accepted. +// db.send_contact_request(user_1, user_3).await.unwrap(); +// db.send_contact_request(user_3, user_1).await.unwrap(); +// assert_eq!( +// db.get_contacts(user_1).await.unwrap(), +// &[ +// Contact::Accepted { +// user_id: user_2, +// should_notify: false, +// busy: false, +// }, +// Contact::Accepted { +// user_id: user_3, +// should_notify: false, +// busy: false, +// } +// ] +// ); +// assert_eq!( +// db.get_contacts(user_3).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_1, +// should_notify: false, +// busy: false, +// }], +// ); - // User declines a contact request. Both users see that it is gone. - db.send_contact_request(user_2, user_3).await.unwrap(); - db.respond_to_contact_request(user_3, user_2, false) - .await - .unwrap(); - assert!(!db.has_contact(user_2, user_3).await.unwrap()); - assert!(!db.has_contact(user_3, user_2).await.unwrap()); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_contacts(user_3).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }], - ); -}); +// // User declines a contact request. Both users see that it is gone. +// db.send_contact_request(user_2, user_3).await.unwrap(); +// db.respond_to_contact_request(user_3, user_2, false) +// .await +// .unwrap(); +// assert!(!db.has_contact(user_2, user_3).await.unwrap()); +// assert!(!db.has_contact(user_3, user_2).await.unwrap()); +// assert_eq!( +// db.get_contacts(user_2).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_1, +// should_notify: false, +// busy: false, +// }] +// ); +// assert_eq!( +// db.get_contacts(user_3).await.unwrap(), +// &[Contact::Accepted { +// user_id: user_1, +// should_notify: false, +// busy: false, +// }], +// ); +// }); -test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { - let NewUserResult { - user_id: user1, - metrics_id: metrics_id1, - .. - } = db - .create_user( - "person1@example.com", - false, - NewUserParams { - github_login: "person1".into(), - github_user_id: 101, - invite_count: 5, - }, - ) - .await - .unwrap(); - let NewUserResult { - user_id: user2, - metrics_id: metrics_id2, - .. - } = db - .create_user( - "person2@example.com", - false, - NewUserParams { - github_login: "person2".into(), - github_user_id: 102, - invite_count: 5, - }, - ) - .await - .unwrap(); +// test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { +// let NewUserResult { +// user_id: user1, +// metrics_id: metrics_id1, +// .. +// } = db +// .create_user( +// "person1@example.com", +// false, +// NewUserParams { +// github_login: "person1".into(), +// github_user_id: 101, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap(); +// let NewUserResult { +// user_id: user2, +// metrics_id: metrics_id2, +// .. +// } = db +// .create_user( +// "person2@example.com", +// false, +// NewUserParams { +// github_login: "person2".into(), +// github_user_id: 102, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap(); - assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); - assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); - assert_eq!(metrics_id1.len(), 36); - assert_eq!(metrics_id2.len(), 36); - assert_ne!(metrics_id1, metrics_id2); -}); +// assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); +// assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); +// assert_eq!(metrics_id1.len(), 36); +// assert_eq!(metrics_id2.len(), 36); +// assert_ne!(metrics_id1, metrics_id2); +// }); -#[test] -fn test_fuzzy_like_string() { - assert_eq!(DefaultDb::fuzzy_like_string("abcd"), "%a%b%c%d%"); - assert_eq!(DefaultDb::fuzzy_like_string("x y"), "%x%y%"); - assert_eq!(DefaultDb::fuzzy_like_string(" z "), "%z%"); -} +// #[test] +// fn test_fuzzy_like_string() { +// assert_eq!(DefaultDb::fuzzy_like_string("abcd"), "%a%b%c%d%"); +// assert_eq!(DefaultDb::fuzzy_like_string("x y"), "%x%y%"); +// assert_eq!(DefaultDb::fuzzy_like_string(" z "), "%z%"); +// } -#[gpui::test] -async fn test_fuzzy_search_users() { - let test_db = PostgresTestDb::new(build_background_executor()); - let db = test_db.db(); - for (i, github_login) in [ - "California", - "colorado", - "oregon", - "washington", - "florida", - "delaware", - "rhode-island", - ] - .into_iter() - .enumerate() - { - db.create_user( - &format!("{github_login}@example.com"), - false, - NewUserParams { - github_login: github_login.into(), - github_user_id: i as i32, - invite_count: 0, - }, - ) - .await - .unwrap(); - } +// #[gpui::test] +// async fn test_fuzzy_search_users() { +// let test_db = PostgresTestDb::new(build_background_executor()); +// let db = test_db.db(); +// for (i, github_login) in [ +// "California", +// "colorado", +// "oregon", +// "washington", +// "florida", +// "delaware", +// "rhode-island", +// ] +// .into_iter() +// .enumerate() +// { +// db.create_user( +// &format!("{github_login}@example.com"), +// false, +// NewUserParams { +// github_login: github_login.into(), +// github_user_id: i as i32, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap(); +// } - assert_eq!( - fuzzy_search_user_names(db, "clr").await, - &["colorado", "California"] - ); - assert_eq!( - fuzzy_search_user_names(db, "ro").await, - &["rhode-island", "colorado", "oregon"], - ); +// assert_eq!( +// fuzzy_search_user_names(db, "clr").await, +// &["colorado", "California"] +// ); +// assert_eq!( +// fuzzy_search_user_names(db, "ro").await, +// &["rhode-island", "colorado", "oregon"], +// ); - async fn fuzzy_search_user_names(db: &Db, query: &str) -> Vec { - db.fuzzy_search_users(query, 10) - .await - .unwrap() - .into_iter() - .map(|user| user.github_login) - .collect::>() - } -} +// async fn fuzzy_search_user_names(db: &Db, query: &str) -> Vec { +// db.fuzzy_search_users(query, 10) +// .await +// .unwrap() +// .into_iter() +// .map(|user| user.github_login) +// .collect::>() +// } +// } -#[gpui::test] -async fn test_invite_codes() { - let test_db = PostgresTestDb::new(build_background_executor()); - let db = test_db.db(); +// #[gpui::test] +// async fn test_invite_codes() { +// let test_db = PostgresTestDb::new(build_background_executor()); +// let db = test_db.db(); - let NewUserResult { user_id: user1, .. } = db - .create_user( - "user1@example.com", - false, - NewUserParams { - github_login: "user1".into(), - github_user_id: 0, - invite_count: 0, - }, - ) - .await - .unwrap(); +// let NewUserResult { user_id: user1, .. } = db +// .create_user( +// "user1@example.com", +// false, +// NewUserParams { +// github_login: "user1".into(), +// github_user_id: 0, +// invite_count: 0, +// }, +// ) +// .await +// .unwrap(); - // Initially, user 1 has no invite code - assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); +// // Initially, user 1 has no invite code +// assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); - // Setting invite count to 0 when no code is assigned does not assign a new code - db.set_invite_count_for_user(user1, 0).await.unwrap(); - assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); +// // Setting invite count to 0 when no code is assigned does not assign a new code +// db.set_invite_count_for_user(user1, 0).await.unwrap(); +// assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); - // User 1 creates an invite code that can be used twice. - db.set_invite_count_for_user(user1, 2).await.unwrap(); - let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 2); +// // User 1 creates an invite code that can be used twice. +// db.set_invite_count_for_user(user1, 2).await.unwrap(); +// let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(invite_count, 2); - // User 2 redeems the invite code and becomes a contact of user 1. - let user2_invite = db - .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) - .await - .unwrap(); - let NewUserResult { - user_id: user2, - inviting_user_id, - signup_device_id, - metrics_id, - } = db - .create_user_from_invite( - &user2_invite, - NewUserParams { - github_login: "user2".into(), - github_user_id: 2, - invite_count: 7, - }, - ) - .await - .unwrap() - .unwrap(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); - assert_eq!(inviting_user_id, Some(user1)); - assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); - assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }] - ); - assert_eq!( - db.get_contacts(user2).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, - 7 - ); +// // User 2 redeems the invite code and becomes a contact of user 1. +// let user2_invite = db +// .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) +// .await +// .unwrap(); +// let NewUserResult { +// user_id: user2, +// inviting_user_id, +// signup_device_id, +// metrics_id, +// } = db +// .create_user_from_invite( +// &user2_invite, +// NewUserParams { +// github_login: "user2".into(), +// github_user_id: 2, +// invite_count: 7, +// }, +// ) +// .await +// .unwrap() +// .unwrap(); +// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(invite_count, 1); +// assert_eq!(inviting_user_id, Some(user1)); +// assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); +// assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); +// assert_eq!( +// db.get_contacts(user1).await.unwrap(), +// [Contact::Accepted { +// user_id: user2, +// should_notify: true, +// busy: false, +// }] +// ); +// assert_eq!( +// db.get_contacts(user2).await.unwrap(), +// [Contact::Accepted { +// user_id: user1, +// should_notify: false, +// busy: false, +// }] +// ); +// assert_eq!( +// db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, +// 7 +// ); - // User 3 redeems the invite code and becomes a contact of user 1. - let user3_invite = db - .create_invite_from_code(&invite_code, "user3@example.com", None) - .await - .unwrap(); - let NewUserResult { - user_id: user3, - inviting_user_id, - signup_device_id, - .. - } = db - .create_user_from_invite( - &user3_invite, - NewUserParams { - github_login: "user-3".into(), - github_user_id: 3, - invite_count: 3, - }, - ) - .await - .unwrap() - .unwrap(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 0); - assert_eq!(inviting_user_id, Some(user1)); - assert!(signup_device_id.is_none()); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [ - Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user3, - should_notify: true, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user3).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, - 3 - ); +// // User 3 redeems the invite code and becomes a contact of user 1. +// let user3_invite = db +// .create_invite_from_code(&invite_code, "user3@example.com", None) +// .await +// .unwrap(); +// let NewUserResult { +// user_id: user3, +// inviting_user_id, +// signup_device_id, +// .. +// } = db +// .create_user_from_invite( +// &user3_invite, +// NewUserParams { +// github_login: "user-3".into(), +// github_user_id: 3, +// invite_count: 3, +// }, +// ) +// .await +// .unwrap() +// .unwrap(); +// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(invite_count, 0); +// assert_eq!(inviting_user_id, Some(user1)); +// assert!(signup_device_id.is_none()); +// assert_eq!( +// db.get_contacts(user1).await.unwrap(), +// [ +// Contact::Accepted { +// user_id: user2, +// should_notify: true, +// busy: false, +// }, +// Contact::Accepted { +// user_id: user3, +// should_notify: true, +// busy: false, +// } +// ] +// ); +// assert_eq!( +// db.get_contacts(user3).await.unwrap(), +// [Contact::Accepted { +// user_id: user1, +// should_notify: false, +// busy: false, +// }] +// ); +// assert_eq!( +// db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, +// 3 +// ); - // Trying to reedem the code for the third time results in an error. - db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) - .await - .unwrap_err(); +// // Trying to reedem the code for the third time results in an error. +// db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) +// .await +// .unwrap_err(); - // Invite count can be updated after the code has been created. - db.set_invite_count_for_user(user1, 2).await.unwrap(); - let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 - assert_eq!(invite_count, 2); +// // Invite count can be updated after the code has been created. +// db.set_invite_count_for_user(user1, 2).await.unwrap(); +// let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 +// assert_eq!(invite_count, 2); - // User 4 can now redeem the invite code and becomes a contact of user 1. - let user4_invite = db - .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) - .await - .unwrap(); - let user4 = db - .create_user_from_invite( - &user4_invite, - NewUserParams { - github_login: "user-4".into(), - github_user_id: 4, - invite_count: 5, - }, - ) - .await - .unwrap() - .unwrap() - .user_id; +// // User 4 can now redeem the invite code and becomes a contact of user 1. +// let user4_invite = db +// .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) +// .await +// .unwrap(); +// let user4 = db +// .create_user_from_invite( +// &user4_invite, +// NewUserParams { +// github_login: "user-4".into(), +// github_user_id: 4, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap() +// .unwrap() +// .user_id; - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [ - Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user3, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user4, - should_notify: true, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user4).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, - 5 - ); +// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(invite_count, 1); +// assert_eq!( +// db.get_contacts(user1).await.unwrap(), +// [ +// Contact::Accepted { +// user_id: user2, +// should_notify: true, +// busy: false, +// }, +// Contact::Accepted { +// user_id: user3, +// should_notify: true, +// busy: false, +// }, +// Contact::Accepted { +// user_id: user4, +// should_notify: true, +// busy: false, +// } +// ] +// ); +// assert_eq!( +// db.get_contacts(user4).await.unwrap(), +// [Contact::Accepted { +// user_id: user1, +// should_notify: false, +// busy: false, +// }] +// ); +// assert_eq!( +// db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, +// 5 +// ); - // An existing user cannot redeem invite codes. - db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) - .await - .unwrap_err(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); -} +// // An existing user cannot redeem invite codes. +// db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) +// .await +// .unwrap_err(); +// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); +// assert_eq!(invite_count, 1); +// } -#[gpui::test] -async fn test_signups() { - let test_db = PostgresTestDb::new(build_background_executor()); - let db = test_db.db(); +// #[gpui::test] +// async fn test_signups() { +// let test_db = PostgresTestDb::new(build_background_executor()); +// let db = test_db.db(); - // people sign up on the waitlist - for i in 0..8 { - db.create_signup(Signup { - email_address: format!("person-{i}@example.com"), - platform_mac: true, - platform_linux: i % 2 == 0, - platform_windows: i % 4 == 0, - editor_features: vec!["speed".into()], - programming_languages: vec!["rust".into(), "c".into()], - device_id: Some(format!("device_id_{i}")), - }) - .await - .unwrap(); - } +// // people sign up on the waitlist +// for i in 0..8 { +// db.create_signup(Signup { +// email_address: format!("person-{i}@example.com"), +// platform_mac: true, +// platform_linux: i % 2 == 0, +// platform_windows: i % 4 == 0, +// editor_features: vec!["speed".into()], +// programming_languages: vec!["rust".into(), "c".into()], +// device_id: Some(format!("device_id_{i}")), +// }) +// .await +// .unwrap(); +// } - assert_eq!( - db.get_waitlist_summary().await.unwrap(), - WaitlistSummary { - count: 8, - mac_count: 8, - linux_count: 4, - windows_count: 2, - unknown_count: 0, - } - ); +// assert_eq!( +// db.get_waitlist_summary().await.unwrap(), +// WaitlistSummary { +// count: 8, +// mac_count: 8, +// linux_count: 4, +// windows_count: 2, +// unknown_count: 0, +// } +// ); - // retrieve the next batch of signup emails to send - let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); - let addresses = signups_batch1 - .iter() - .map(|s| &s.email_address) - .collect::>(); - assert_eq!( - addresses, - &[ - "person-0@example.com", - "person-1@example.com", - "person-2@example.com" - ] - ); - assert_ne!( - signups_batch1[0].email_confirmation_code, - signups_batch1[1].email_confirmation_code - ); +// // retrieve the next batch of signup emails to send +// let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); +// let addresses = signups_batch1 +// .iter() +// .map(|s| &s.email_address) +// .collect::>(); +// assert_eq!( +// addresses, +// &[ +// "person-0@example.com", +// "person-1@example.com", +// "person-2@example.com" +// ] +// ); +// assert_ne!( +// signups_batch1[0].email_confirmation_code, +// signups_batch1[1].email_confirmation_code +// ); - // the waitlist isn't updated until we record that the emails - // were successfully sent. - let signups_batch = db.get_unsent_invites(3).await.unwrap(); - assert_eq!(signups_batch, signups_batch1); +// // the waitlist isn't updated until we record that the emails +// // were successfully sent. +// let signups_batch = db.get_unsent_invites(3).await.unwrap(); +// assert_eq!(signups_batch, signups_batch1); - // once the emails go out, we can retrieve the next batch - // of signups. - db.record_sent_invites(&signups_batch1).await.unwrap(); - let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); - let addresses = signups_batch2 - .iter() - .map(|s| &s.email_address) - .collect::>(); - assert_eq!( - addresses, - &[ - "person-3@example.com", - "person-4@example.com", - "person-5@example.com" - ] - ); +// // once the emails go out, we can retrieve the next batch +// // of signups. +// db.record_sent_invites(&signups_batch1).await.unwrap(); +// let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); +// let addresses = signups_batch2 +// .iter() +// .map(|s| &s.email_address) +// .collect::>(); +// assert_eq!( +// addresses, +// &[ +// "person-3@example.com", +// "person-4@example.com", +// "person-5@example.com" +// ] +// ); - // the sent invites are excluded from the summary. - assert_eq!( - db.get_waitlist_summary().await.unwrap(), - WaitlistSummary { - count: 5, - mac_count: 5, - linux_count: 2, - windows_count: 1, - unknown_count: 0, - } - ); +// // the sent invites are excluded from the summary. +// assert_eq!( +// db.get_waitlist_summary().await.unwrap(), +// WaitlistSummary { +// count: 5, +// mac_count: 5, +// linux_count: 2, +// windows_count: 1, +// unknown_count: 0, +// } +// ); - // user completes the signup process by providing their - // github account. - let NewUserResult { - user_id, - inviting_user_id, - signup_device_id, - .. - } = db - .create_user_from_invite( - &Invite { - email_address: signups_batch1[0].email_address.clone(), - email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), - }, - NewUserParams { - github_login: "person-0".into(), - github_user_id: 0, - invite_count: 5, - }, - ) - .await - .unwrap() - .unwrap(); - let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); - assert!(inviting_user_id.is_none()); - assert_eq!(user.github_login, "person-0"); - assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); - assert_eq!(user.invite_count, 5); - assert_eq!(signup_device_id.unwrap(), "device_id_0"); +// // user completes the signup process by providing their +// // github account. +// let NewUserResult { +// user_id, +// inviting_user_id, +// signup_device_id, +// .. +// } = db +// .create_user_from_invite( +// &Invite { +// email_address: signups_batch1[0].email_address.clone(), +// email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), +// }, +// NewUserParams { +// github_login: "person-0".into(), +// github_user_id: 0, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap() +// .unwrap(); +// let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); +// assert!(inviting_user_id.is_none()); +// assert_eq!(user.github_login, "person-0"); +// assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); +// assert_eq!(user.invite_count, 5); +// assert_eq!(signup_device_id.unwrap(), "device_id_0"); - // cannot redeem the same signup again. - assert!(db - .create_user_from_invite( - &Invite { - email_address: signups_batch1[0].email_address.clone(), - email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), - }, - NewUserParams { - github_login: "some-other-github_account".into(), - github_user_id: 1, - invite_count: 5, - }, - ) - .await - .unwrap() - .is_none()); +// // cannot redeem the same signup again. +// assert!(db +// .create_user_from_invite( +// &Invite { +// email_address: signups_batch1[0].email_address.clone(), +// email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), +// }, +// NewUserParams { +// github_login: "some-other-github_account".into(), +// github_user_id: 1, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap() +// .is_none()); - // cannot redeem a signup with the wrong confirmation code. - db.create_user_from_invite( - &Invite { - email_address: signups_batch1[1].email_address.clone(), - email_confirmation_code: "the-wrong-code".to_string(), - }, - NewUserParams { - github_login: "person-1".into(), - github_user_id: 2, - invite_count: 5, - }, - ) - .await - .unwrap_err(); -} +// // cannot redeem a signup with the wrong confirmation code. +// db.create_user_from_invite( +// &Invite { +// email_address: signups_batch1[1].email_address.clone(), +// email_confirmation_code: "the-wrong-code".to_string(), +// }, +// NewUserParams { +// github_login: "person-1".into(), +// github_user_id: 2, +// invite_count: 5, +// }, +// ) +// .await +// .unwrap_err(); +// } fn build_background_executor() -> Arc { Deterministic::new(0).build_background() diff --git a/crates/collab/src/db2/user.rs b/crates/collab/src/db2/user.rs index de865db679..a0e21f9811 100644 --- a/crates/collab/src/db2/user.rs +++ b/crates/collab/src/db2/user.rs @@ -1,7 +1,7 @@ use super::UserId; use sea_orm::entity::prelude::*; -#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "users")] pub struct Model { #[sea_orm(primary_key)] @@ -13,6 +13,7 @@ pub struct Model { pub invite_code: Option, pub invite_count: i32, pub connected_once: bool, + pub metrics_id: Uuid, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db2/worktree.rs b/crates/collab/src/db2/worktree.rs index 3a630fcfc9..3c6f7c0c1d 100644 --- a/crates/collab/src/db2/worktree.rs +++ b/crates/collab/src/db2/worktree.rs @@ -1,12 +1,14 @@ use sea_orm::entity::prelude::*; +use super::ProjectId; + #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "worktrees")] pub struct Model { #[sea_orm(primary_key)] pub id: i32, #[sea_orm(primary_key)] - pub project_id: i32, + pub project_id: ProjectId, pub abs_path: String, pub root_name: String, pub visible: bool, From 9e59056e7fdf7886ba31461543b5942089cca3fa Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 14:18:46 +0100 Subject: [PATCH 078/240] Implement `db2::Database::get_user_by_github_account` --- crates/collab/src/db2.rs | 97 +++++++++++++++++++++++------ crates/collab/src/db2/tests.rs | 108 ++++++++++++++++----------------- 2 files changed, 133 insertions(+), 72 deletions(-) diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 47ddf8cd22..1d50437a9c 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -13,11 +13,11 @@ use collections::HashMap; use dashmap::DashMap; use futures::StreamExt; use rpc::{proto, ConnectionId}; -use sea_orm::ActiveValue; use sea_orm::{ entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, TransactionTrait, }; +use sea_orm::{ActiveValue, IntoActiveModel}; use sea_query::OnConflict; use serde::{Deserialize, Serialize}; use sqlx::migrate::{Migrate, Migration, MigrationSource}; @@ -31,7 +31,7 @@ use tokio::sync::{Mutex, OwnedMutexGuard}; pub use user::Model as User; pub struct Database { - url: String, + options: ConnectOptions, pool: DatabaseConnection, rooms: DashMap>>, #[cfg(test)] @@ -41,11 +41,9 @@ pub struct Database { } impl Database { - pub async fn new(url: &str, max_connections: u32) -> Result { - let mut options = ConnectOptions::new(url.into()); - options.max_connections(max_connections); + pub async fn new(options: ConnectOptions) -> Result { Ok(Self { - url: url.into(), + options: options.clone(), pool: sea_orm::Database::connect(options).await?, rooms: DashMap::with_capacity(16384), #[cfg(test)] @@ -59,12 +57,12 @@ impl Database { &self, migrations_path: &Path, ignore_checksum_mismatch: bool, - ) -> anyhow::Result<(sqlx::AnyConnection, Vec<(Migration, Duration)>)> { + ) -> anyhow::Result> { let migrations = MigrationSource::resolve(migrations_path) .await .map_err(|err| anyhow!("failed to load migrations: {err:?}"))?; - let mut connection = sqlx::AnyConnection::connect(&self.url).await?; + let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?; connection.ensure_migrations_table().await?; let applied_migrations: HashMap<_, _> = connection @@ -93,7 +91,7 @@ impl Database { } } - Ok((connection, new_migrations)) + Ok(new_migrations) } pub async fn create_user( @@ -142,6 +140,43 @@ impl Database { .await } + pub async fn get_user_by_github_account( + &self, + github_login: &str, + github_user_id: Option, + ) -> Result> { + self.transact(|tx| async { + let tx = tx; + if let Some(github_user_id) = github_user_id { + if let Some(user_by_github_user_id) = user::Entity::find() + .filter(user::Column::GithubUserId.eq(github_user_id)) + .one(&tx) + .await? + { + let mut user_by_github_user_id = user_by_github_user_id.into_active_model(); + user_by_github_user_id.github_login = ActiveValue::set(github_login.into()); + Ok(Some(user_by_github_user_id.update(&tx).await?)) + } else if let Some(user_by_github_login) = user::Entity::find() + .filter(user::Column::GithubLogin.eq(github_login)) + .one(&tx) + .await? + { + let mut user_by_github_login = user_by_github_login.into_active_model(); + user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id)); + Ok(Some(user_by_github_login.update(&tx).await?)) + } else { + Ok(None) + } + } else { + Ok(user::Entity::find() + .filter(user::Column::GithubLogin.eq(github_login)) + .one(&tx) + .await?) + } + }) + .await + } + pub async fn share_project( &self, room_id: RoomId, @@ -545,7 +580,9 @@ mod test { .unwrap(); let mut db = runtime.block_on(async { - let db = Database::new(&url, 5).await.unwrap(); + let mut options = ConnectOptions::new(url); + options.max_connections(5); + let db = Database::new(options).await.unwrap(); let sql = include_str!(concat!( env!("CARGO_MANIFEST_DIR"), "/migrations.sqlite/20221109000000_test_schema.sql" @@ -590,7 +627,11 @@ mod test { sqlx::Postgres::create_database(&url) .await .expect("failed to create test db"); - let db = Database::new(&url, 5).await.unwrap(); + let mut options = ConnectOptions::new(url); + options + .max_connections(5) + .idle_timeout(Duration::from_secs(0)); + let db = Database::new(options).await.unwrap(); let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); db.migrate(Path::new(migrations_path), false).await.unwrap(); db @@ -610,11 +651,31 @@ mod test { } } - // TODO: Implement drop - // impl Drop for PostgresTestDb { - // fn drop(&mut self) { - // let db = self.db.take().unwrap(); - // db.teardown(&self.url); - // } - // } + impl Drop for TestDb { + fn drop(&mut self) { + let db = self.db.take().unwrap(); + if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() { + db.runtime.as_ref().unwrap().block_on(async { + use util::ResultExt; + let query = " + SELECT pg_terminate_backend(pg_stat_activity.pid) + FROM pg_stat_activity + WHERE + pg_stat_activity.datname = current_database() AND + pid <> pg_backend_pid(); + "; + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + query.into(), + )) + .await + .log_err(); + sqlx::Postgres::drop_database(db.options.get_url()) + .await + .log_err(); + }) + } + } + } } diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index a5bac24140..60d3fa64b0 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -88,63 +88,63 @@ test_both_dbs!( } ); -// test_both_dbs!( -// test_get_user_by_github_account_postgres, -// test_get_user_by_github_account_sqlite, -// db, -// { -// let user_id1 = db -// .create_user( -// "user1@example.com", -// false, -// NewUserParams { -// github_login: "login1".into(), -// github_user_id: 101, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap() -// .user_id; -// let user_id2 = db -// .create_user( -// "user2@example.com", -// false, -// NewUserParams { -// github_login: "login2".into(), -// github_user_id: 102, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap() -// .user_id; +test_both_dbs!( + test_get_user_by_github_account_postgres, + test_get_user_by_github_account_sqlite, + db, + { + let user_id1 = db + .create_user( + "user1@example.com", + false, + NewUserParams { + github_login: "login1".into(), + github_user_id: 101, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + let user_id2 = db + .create_user( + "user2@example.com", + false, + NewUserParams { + github_login: "login2".into(), + github_user_id: 102, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; -// let user = db -// .get_user_by_github_account("login1", None) -// .await -// .unwrap() -// .unwrap(); -// assert_eq!(user.id, user_id1); -// assert_eq!(&user.github_login, "login1"); -// assert_eq!(user.github_user_id, Some(101)); + let user = db + .get_user_by_github_account("login1", None) + .await + .unwrap() + .unwrap(); + assert_eq!(user.id, user_id1); + assert_eq!(&user.github_login, "login1"); + assert_eq!(user.github_user_id, Some(101)); -// assert!(db -// .get_user_by_github_account("non-existent-login", None) -// .await -// .unwrap() -// .is_none()); + assert!(db + .get_user_by_github_account("non-existent-login", None) + .await + .unwrap() + .is_none()); -// let user = db -// .get_user_by_github_account("the-new-login2", Some(102)) -// .await -// .unwrap() -// .unwrap(); -// assert_eq!(user.id, user_id2); -// assert_eq!(&user.github_login, "the-new-login2"); -// assert_eq!(user.github_user_id, Some(102)); -// } -// ); + let user = db + .get_user_by_github_account("the-new-login2", Some(102)) + .await + .unwrap() + .unwrap(); + assert_eq!(user.id, user_id2); + assert_eq!(&user.github_login, "the-new-login2"); + assert_eq!(user.github_user_id, Some(102)); + } +); // test_both_dbs!( // test_create_access_tokens_postgres, From 2e24d128dba01f05055725fb43d2c51d89ce7138 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 14:47:03 +0100 Subject: [PATCH 079/240] Implement access tokens using sea-orm --- crates/collab/src/db2.rs | 73 +++++++++++++++++++++++- crates/collab/src/db2/access_token.rs | 29 ++++++++++ crates/collab/src/db2/tests.rs | 82 +++++++++++++-------------- crates/collab/src/db2/user.rs | 11 +++- 4 files changed, 151 insertions(+), 44 deletions(-) create mode 100644 crates/collab/src/db2/access_token.rs diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 1d50437a9c..e2a03931d8 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -1,3 +1,4 @@ +mod access_token; mod project; mod project_collaborator; mod room; @@ -17,8 +18,8 @@ use sea_orm::{ entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, TransactionTrait, }; -use sea_orm::{ActiveValue, IntoActiveModel}; -use sea_query::OnConflict; +use sea_orm::{ActiveValue, ConnectionTrait, IntoActiveModel, QueryOrder, QuerySelect}; +use sea_query::{OnConflict, Query}; use serde::{Deserialize, Serialize}; use sqlx::migrate::{Migrate, Migration, MigrationSource}; use sqlx::Connection; @@ -336,6 +337,63 @@ impl Database { }) } + pub async fn create_access_token_hash( + &self, + user_id: UserId, + access_token_hash: &str, + max_access_token_count: usize, + ) -> Result<()> { + self.transact(|tx| async { + let tx = tx; + + access_token::ActiveModel { + user_id: ActiveValue::set(user_id), + hash: ActiveValue::set(access_token_hash.into()), + ..Default::default() + } + .insert(&tx) + .await?; + + access_token::Entity::delete_many() + .filter( + access_token::Column::Id.in_subquery( + Query::select() + .column(access_token::Column::Id) + .from(access_token::Entity) + .and_where(access_token::Column::UserId.eq(user_id)) + .order_by(access_token::Column::Id, sea_orm::Order::Desc) + .limit(10000) + .offset(max_access_token_count as u64) + .to_owned(), + ), + ) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result> { + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + Hash, + } + + self.transact(|tx| async move { + Ok(access_token::Entity::find() + .select_only() + .column(access_token::Column::Hash) + .filter(access_token::Column::UserId.eq(user_id)) + .order_by_desc(access_token::Column::Id) + .into_values::<_, QueryAs>() + .all(&tx) + .await?) + }) + .await + } + async fn transact(&self, f: F) -> Result where F: Send + Fn(DatabaseTransaction) -> Fut, @@ -344,6 +402,16 @@ impl Database { let body = async { loop { let tx = self.pool.begin().await?; + + // In Postgres, serializable transactions are opt-in + if let sea_orm::DatabaseBackend::Postgres = self.pool.get_database_backend() { + tx.execute(sea_orm::Statement::from_string( + sea_orm::DatabaseBackend::Postgres, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), + )) + .await?; + } + match f(tx).await { Ok(result) => return Ok(result), Err(error) => match error { @@ -544,6 +612,7 @@ macro_rules! id_type { }; } +id_type!(AccessTokenId); id_type!(UserId); id_type!(RoomId); id_type!(RoomParticipantId); diff --git a/crates/collab/src/db2/access_token.rs b/crates/collab/src/db2/access_token.rs new file mode 100644 index 0000000000..f5caa4843d --- /dev/null +++ b/crates/collab/src/db2/access_token.rs @@ -0,0 +1,29 @@ +use super::{AccessTokenId, UserId}; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "access_tokens")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: AccessTokenId, + pub user_id: UserId, + pub hash: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::UserId", + to = "super::user::Column::Id" + )] + User, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 60d3fa64b0..e26ffee7a8 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -146,51 +146,51 @@ test_both_dbs!( } ); -// test_both_dbs!( -// test_create_access_tokens_postgres, -// test_create_access_tokens_sqlite, -// db, -// { -// let user = db -// .create_user( -// "u1@example.com", -// false, -// NewUserParams { -// github_login: "u1".into(), -// github_user_id: 1, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap() -// .user_id; +test_both_dbs!( + test_create_access_tokens_postgres, + test_create_access_tokens_sqlite, + db, + { + let user = db + .create_user( + "u1@example.com", + false, + NewUserParams { + github_login: "u1".into(), + github_user_id: 1, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; -// db.create_access_token_hash(user, "h1", 3).await.unwrap(); -// db.create_access_token_hash(user, "h2", 3).await.unwrap(); -// assert_eq!( -// db.get_access_token_hashes(user).await.unwrap(), -// &["h2".to_string(), "h1".to_string()] -// ); + db.create_access_token_hash(user, "h1", 3).await.unwrap(); + db.create_access_token_hash(user, "h2", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h2".to_string(), "h1".to_string()] + ); -// db.create_access_token_hash(user, "h3", 3).await.unwrap(); -// assert_eq!( -// db.get_access_token_hashes(user).await.unwrap(), -// &["h3".to_string(), "h2".to_string(), "h1".to_string(),] -// ); + db.create_access_token_hash(user, "h3", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h3".to_string(), "h2".to_string(), "h1".to_string(),] + ); -// db.create_access_token_hash(user, "h4", 3).await.unwrap(); -// assert_eq!( -// db.get_access_token_hashes(user).await.unwrap(), -// &["h4".to_string(), "h3".to_string(), "h2".to_string(),] -// ); + db.create_access_token_hash(user, "h4", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h4".to_string(), "h3".to_string(), "h2".to_string(),] + ); -// db.create_access_token_hash(user, "h5", 3).await.unwrap(); -// assert_eq!( -// db.get_access_token_hashes(user).await.unwrap(), -// &["h5".to_string(), "h4".to_string(), "h3".to_string()] -// ); -// } -// ); + db.create_access_token_hash(user, "h5", 3).await.unwrap(); + assert_eq!( + db.get_access_token_hashes(user).await.unwrap(), + &["h5".to_string(), "h4".to_string(), "h3".to_string()] + ); + } +); // test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { // let mut user_ids = Vec::new(); diff --git a/crates/collab/src/db2/user.rs b/crates/collab/src/db2/user.rs index a0e21f9811..5e8a484571 100644 --- a/crates/collab/src/db2/user.rs +++ b/crates/collab/src/db2/user.rs @@ -17,6 +17,15 @@ pub struct Model { } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation {} +pub enum Relation { + #[sea_orm(has_many = "super::access_token::Entity")] + AccessToken, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::AccessToken.def() + } +} impl ActiveModelBehavior for ActiveModel {} From 04d553d4d32e3c4dea2c608607ca015b230a535b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 15:06:04 +0100 Subject: [PATCH 080/240] Implement `db2::Database::get_user_metrics_id` --- crates/collab/src/db2.rs | 19 +++++++++ crates/collab/src/db2/tests.rs | 78 +++++++++++++++++----------------- 2 files changed, 58 insertions(+), 39 deletions(-) diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index e2a03931d8..5c5157d2aa 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -178,6 +178,25 @@ impl Database { .await } + pub async fn get_user_metrics_id(&self, id: UserId) -> Result { + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + MetricsId, + } + + self.transact(|tx| async move { + let metrics_id: Uuid = user::Entity::find_by_id(id) + .select_only() + .column(user::Column::MetricsId) + .into_values::<_, QueryAs>() + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not find user"))?; + Ok(metrics_id.to_string()) + }) + .await + } + pub async fn share_project( &self, room_id: RoomId, diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index e26ffee7a8..c66e2fa406 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -361,46 +361,46 @@ test_both_dbs!( // ); // }); -// test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { -// let NewUserResult { -// user_id: user1, -// metrics_id: metrics_id1, -// .. -// } = db -// .create_user( -// "person1@example.com", -// false, -// NewUserParams { -// github_login: "person1".into(), -// github_user_id: 101, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap(); -// let NewUserResult { -// user_id: user2, -// metrics_id: metrics_id2, -// .. -// } = db -// .create_user( -// "person2@example.com", -// false, -// NewUserParams { -// github_login: "person2".into(), -// github_user_id: 102, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap(); +test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { + let NewUserResult { + user_id: user1, + metrics_id: metrics_id1, + .. + } = db + .create_user( + "person1@example.com", + false, + NewUserParams { + github_login: "person1".into(), + github_user_id: 101, + invite_count: 5, + }, + ) + .await + .unwrap(); + let NewUserResult { + user_id: user2, + metrics_id: metrics_id2, + .. + } = db + .create_user( + "person2@example.com", + false, + NewUserParams { + github_login: "person2".into(), + github_user_id: 102, + invite_count: 5, + }, + ) + .await + .unwrap(); -// assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); -// assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); -// assert_eq!(metrics_id1.len(), 36); -// assert_eq!(metrics_id2.len(), 36); -// assert_ne!(metrics_id1, metrics_id2); -// }); + assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); + assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); + assert_eq!(metrics_id1.len(), 36); + assert_eq!(metrics_id2.len(), 36); + assert_ne!(metrics_id1, metrics_id2); +}); // #[test] // fn test_fuzzy_like_string() { From d1a44b889edd96fd61e4ba1ca712c80f50d45ee9 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 17:36:25 +0100 Subject: [PATCH 081/240] Implement contacts using sea-orm Co-Authored-By: Nathan Sobo --- crates/collab/src/db2.rs | 298 +++++++++++++++++++- crates/collab/src/db2/contact.rs | 58 ++++ crates/collab/src/db2/room_participant.rs | 12 + crates/collab/src/db2/tests.rs | 314 +++++++++++----------- crates/collab/src/db2/user.rs | 8 + 5 files changed, 531 insertions(+), 159 deletions(-) create mode 100644 crates/collab/src/db2/contact.rs diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 5c5157d2aa..35a45acedf 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -1,4 +1,5 @@ mod access_token; +mod contact; mod project; mod project_collaborator; mod room; @@ -18,8 +19,11 @@ use sea_orm::{ entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, TransactionTrait, }; -use sea_orm::{ActiveValue, ConnectionTrait, IntoActiveModel, QueryOrder, QuerySelect}; -use sea_query::{OnConflict, Query}; +use sea_orm::{ + ActiveValue, ConnectionTrait, FromQueryResult, IntoActiveModel, JoinType, QueryOrder, + QuerySelect, +}; +use sea_query::{Alias, Expr, OnConflict, Query}; use serde::{Deserialize, Serialize}; use sqlx::migrate::{Migrate, Migration, MigrationSource}; use sqlx::Connection; @@ -29,6 +33,7 @@ use std::time::Duration; use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; use tokio::sync::{Mutex, OwnedMutexGuard}; +pub use contact::Contact; pub use user::Model as User; pub struct Database { @@ -95,6 +100,8 @@ impl Database { Ok(new_migrations) } + // users + pub async fn create_user( &self, email_address: &str, @@ -197,6 +204,292 @@ impl Database { .await } + // contacts + + pub async fn get_contacts(&self, user_id: UserId) -> Result> { + #[derive(Debug, FromQueryResult)] + struct ContactWithUserBusyStatuses { + user_id_a: UserId, + user_id_b: UserId, + a_to_b: bool, + accepted: bool, + should_notify: bool, + user_a_busy: bool, + user_b_busy: bool, + } + + self.transact(|tx| async move { + let user_a_participant = Alias::new("user_a_participant"); + let user_b_participant = Alias::new("user_b_participant"); + let mut db_contacts = contact::Entity::find() + .column_as( + Expr::tbl(user_a_participant.clone(), room_participant::Column::Id) + .is_not_null(), + "user_a_busy", + ) + .column_as( + Expr::tbl(user_b_participant.clone(), room_participant::Column::Id) + .is_not_null(), + "user_b_busy", + ) + .filter( + contact::Column::UserIdA + .eq(user_id) + .or(contact::Column::UserIdB.eq(user_id)), + ) + .join_as( + JoinType::LeftJoin, + contact::Relation::UserARoomParticipant.def(), + user_a_participant, + ) + .join_as( + JoinType::LeftJoin, + contact::Relation::UserBRoomParticipant.def(), + user_b_participant, + ) + .into_model::() + .stream(&tx) + .await?; + + let mut contacts = Vec::new(); + while let Some(db_contact) = db_contacts.next().await { + let db_contact = db_contact?; + if db_contact.user_id_a == user_id { + if db_contact.accepted { + contacts.push(Contact::Accepted { + user_id: db_contact.user_id_b, + should_notify: db_contact.should_notify && db_contact.a_to_b, + busy: db_contact.user_b_busy, + }); + } else if db_contact.a_to_b { + contacts.push(Contact::Outgoing { + user_id: db_contact.user_id_b, + }) + } else { + contacts.push(Contact::Incoming { + user_id: db_contact.user_id_b, + should_notify: db_contact.should_notify, + }); + } + } else if db_contact.accepted { + contacts.push(Contact::Accepted { + user_id: db_contact.user_id_a, + should_notify: db_contact.should_notify && !db_contact.a_to_b, + busy: db_contact.user_a_busy, + }); + } else if db_contact.a_to_b { + contacts.push(Contact::Incoming { + user_id: db_contact.user_id_a, + should_notify: db_contact.should_notify, + }); + } else { + contacts.push(Contact::Outgoing { + user_id: db_contact.user_id_a, + }); + } + } + + contacts.sort_unstable_by_key(|contact| contact.user_id()); + + Ok(contacts) + }) + .await + } + + pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { + self.transact(|tx| async move { + let (id_a, id_b) = if user_id_1 < user_id_2 { + (user_id_1, user_id_2) + } else { + (user_id_2, user_id_1) + }; + + Ok(contact::Entity::find() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::Accepted.eq(true)), + ) + .one(&tx) + .await? + .is_some()) + }) + .await + } + + pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { + self.transact(|mut tx| async move { + let (id_a, id_b, a_to_b) = if sender_id < receiver_id { + (sender_id, receiver_id, true) + } else { + (receiver_id, sender_id, false) + }; + + let rows_affected = contact::Entity::insert(contact::ActiveModel { + user_id_a: ActiveValue::set(id_a), + user_id_b: ActiveValue::set(id_b), + a_to_b: ActiveValue::set(a_to_b), + accepted: ActiveValue::set(false), + should_notify: ActiveValue::set(true), + ..Default::default() + }) + .on_conflict( + OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB]) + .values([ + (contact::Column::Accepted, true.into()), + (contact::Column::ShouldNotify, false.into()), + ]) + .action_and_where( + contact::Column::Accepted.eq(false).and( + contact::Column::AToB + .eq(a_to_b) + .and(contact::Column::UserIdA.eq(id_b)) + .or(contact::Column::AToB + .ne(a_to_b) + .and(contact::Column::UserIdA.eq(id_a))), + ), + ) + .to_owned(), + ) + .exec_without_returning(&tx) + .await?; + + if rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("contact already requested"))? + } + }) + .await + } + + pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { + self.transact(|mut tx| async move { + // let (id_a, id_b) = if responder_id < requester_id { + // (responder_id, requester_id) + // } else { + // (requester_id, responder_id) + // }; + // let query = " + // DELETE FROM contacts + // WHERE user_id_a = $1 AND user_id_b = $2; + // "; + // let result = sqlx::query(query) + // .bind(id_a.0) + // .bind(id_b.0) + // .execute(&mut tx) + // .await?; + + // if result.rows_affected() == 1 { + // tx.commit().await?; + // Ok(()) + // } else { + // Err(anyhow!("no such contact"))? + // } + todo!() + }) + .await + } + + pub async fn dismiss_contact_notification( + &self, + user_id: UserId, + contact_user_id: UserId, + ) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b, a_to_b) = if user_id < contact_user_id { + (user_id, contact_user_id, true) + } else { + (contact_user_id, user_id, false) + }; + + let result = contact::Entity::update_many() + .set(contact::ActiveModel { + should_notify: ActiveValue::set(false), + ..Default::default() + }) + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and( + contact::Column::AToB + .eq(a_to_b) + .and(contact::Column::Accepted.eq(true)) + .or(contact::Column::AToB + .ne(a_to_b) + .and(contact::Column::Accepted.eq(false))), + ), + ) + .exec(&tx) + .await?; + if result.rows_affected == 0 { + Err(anyhow!("no such contact request"))? + } else { + tx.commit().await?; + Ok(()) + } + }) + .await + } + + pub async fn respond_to_contact_request( + &self, + responder_id: UserId, + requester_id: UserId, + accept: bool, + ) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b, a_to_b) = if responder_id < requester_id { + (responder_id, requester_id, false) + } else { + (requester_id, responder_id, true) + }; + let rows_affected = if accept { + let result = contact::Entity::update_many() + .set(contact::ActiveModel { + accepted: ActiveValue::set(true), + should_notify: ActiveValue::set(true), + ..Default::default() + }) + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::AToB.eq(a_to_b)), + ) + .exec(&tx) + .await?; + result.rows_affected + } else { + let result = contact::Entity::delete_many() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::AToB.eq(a_to_b)) + .and(contact::Column::Accepted.eq(false)), + ) + .exec(&tx) + .await?; + + result.rows_affected + }; + + if rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("no such contact request"))? + } + }) + .await + } + + // projects + pub async fn share_project( &self, room_id: RoomId, @@ -632,6 +925,7 @@ macro_rules! id_type { } id_type!(AccessTokenId); +id_type!(ContactId); id_type!(UserId); id_type!(RoomId); id_type!(RoomParticipantId); diff --git a/crates/collab/src/db2/contact.rs b/crates/collab/src/db2/contact.rs new file mode 100644 index 0000000000..c39d6643b3 --- /dev/null +++ b/crates/collab/src/db2/contact.rs @@ -0,0 +1,58 @@ +use super::{ContactId, UserId}; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "contacts")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: ContactId, + pub user_id_a: UserId, + pub user_id_b: UserId, + pub a_to_b: bool, + pub should_notify: bool, + pub accepted: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::room_participant::Entity", + from = "Column::UserIdA", + to = "super::room_participant::Column::UserId" + )] + UserARoomParticipant, + #[sea_orm( + belongs_to = "super::room_participant::Entity", + from = "Column::UserIdB", + to = "super::room_participant::Column::UserId" + )] + UserBRoomParticipant, +} + +impl ActiveModelBehavior for ActiveModel {} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Contact { + Accepted { + user_id: UserId, + should_notify: bool, + busy: bool, + }, + Outgoing { + user_id: UserId, + }, + Incoming { + user_id: UserId, + should_notify: bool, + }, +} + +impl Contact { + pub fn user_id(&self) -> UserId { + match self { + Contact::Accepted { user_id, .. } => *user_id, + Contact::Outgoing { user_id } => *user_id, + Contact::Incoming { user_id, .. } => *user_id, + } + } +} diff --git a/crates/collab/src/db2/room_participant.rs b/crates/collab/src/db2/room_participant.rs index 4fabfc3068..c7c804581b 100644 --- a/crates/collab/src/db2/room_participant.rs +++ b/crates/collab/src/db2/room_participant.rs @@ -18,6 +18,12 @@ pub struct Model { #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation { + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::UserId", + to = "super::user::Column::Id" + )] + User, #[sea_orm( belongs_to = "super::room::Entity", from = "Column::RoomId", @@ -26,6 +32,12 @@ pub enum Relation { Room, } +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} + impl Related for Entity { fn to() -> RelationDef { Relation::Room.def() diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index c66e2fa406..1aeb802025 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -192,174 +192,174 @@ test_both_dbs!( } ); -// test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { -// let mut user_ids = Vec::new(); -// for i in 0..3 { -// user_ids.push( -// db.create_user( -// &format!("user{i}@example.com"), -// false, -// NewUserParams { -// github_login: format!("user{i}"), -// github_user_id: i, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap() -// .user_id, -// ); -// } +test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { + let mut user_ids = Vec::new(); + for i in 0..3 { + user_ids.push( + db.create_user( + &format!("user{i}@example.com"), + false, + NewUserParams { + github_login: format!("user{i}"), + github_user_id: i, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id, + ); + } -// let user_1 = user_ids[0]; -// let user_2 = user_ids[1]; -// let user_3 = user_ids[2]; + let user_1 = user_ids[0]; + let user_2 = user_ids[1]; + let user_3 = user_ids[2]; -// // User starts with no contacts -// assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); + // User starts with no contacts + assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); -// // User requests a contact. Both users see the pending request. -// db.send_contact_request(user_1, user_2).await.unwrap(); -// assert!(!db.has_contact(user_1, user_2).await.unwrap()); -// assert!(!db.has_contact(user_2, user_1).await.unwrap()); -// assert_eq!( -// db.get_contacts(user_1).await.unwrap(), -// &[Contact::Outgoing { user_id: user_2 }], -// ); -// assert_eq!( -// db.get_contacts(user_2).await.unwrap(), -// &[Contact::Incoming { -// user_id: user_1, -// should_notify: true -// }] -// ); + // User requests a contact. Both users see the pending request. + db.send_contact_request(user_1, user_2).await.unwrap(); + assert!(!db.has_contact(user_1, user_2).await.unwrap()); + assert!(!db.has_contact(user_2, user_1).await.unwrap()); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Outgoing { user_id: user_2 }], + ); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Incoming { + user_id: user_1, + should_notify: true + }] + ); -// // User 2 dismisses the contact request notification without accepting or rejecting. -// // We shouldn't notify them again. -// db.dismiss_contact_notification(user_1, user_2) -// .await -// .unwrap_err(); -// db.dismiss_contact_notification(user_2, user_1) -// .await -// .unwrap(); -// assert_eq!( -// db.get_contacts(user_2).await.unwrap(), -// &[Contact::Incoming { -// user_id: user_1, -// should_notify: false -// }] -// ); + // User 2 dismisses the contact request notification without accepting or rejecting. + // We shouldn't notify them again. + db.dismiss_contact_notification(user_1, user_2) + .await + .unwrap_err(); + db.dismiss_contact_notification(user_2, user_1) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Incoming { + user_id: user_1, + should_notify: false + }] + ); -// // User can't accept their own contact request -// db.respond_to_contact_request(user_1, user_2, true) -// .await -// .unwrap_err(); + // User can't accept their own contact request + db.respond_to_contact_request(user_1, user_2, true) + .await + .unwrap_err(); -// // User accepts a contact request. Both users see the contact. -// db.respond_to_contact_request(user_2, user_1, true) -// .await -// .unwrap(); -// assert_eq!( -// db.get_contacts(user_1).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_2, -// should_notify: true, -// busy: false, -// }], -// ); -// assert!(db.has_contact(user_1, user_2).await.unwrap()); -// assert!(db.has_contact(user_2, user_1).await.unwrap()); -// assert_eq!( -// db.get_contacts(user_2).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_1, -// should_notify: false, -// busy: false, -// }] -// ); + // User accepts a contact request. Both users see the contact. + db.respond_to_contact_request(user_2, user_1, true) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: true, + busy: false, + }], + ); + assert!(db.has_contact(user_1, user_2).await.unwrap()); + assert!(db.has_contact(user_2, user_1).await.unwrap()); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }] + ); -// // Users cannot re-request existing contacts. -// db.send_contact_request(user_1, user_2).await.unwrap_err(); -// db.send_contact_request(user_2, user_1).await.unwrap_err(); + // Users cannot re-request existing contacts. + db.send_contact_request(user_1, user_2).await.unwrap_err(); + db.send_contact_request(user_2, user_1).await.unwrap_err(); -// // Users can't dismiss notifications of them accepting other users' requests. -// db.dismiss_contact_notification(user_2, user_1) -// .await -// .unwrap_err(); -// assert_eq!( -// db.get_contacts(user_1).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_2, -// should_notify: true, -// busy: false, -// }] -// ); + // Users can't dismiss notifications of them accepting other users' requests. + db.dismiss_contact_notification(user_2, user_1) + .await + .unwrap_err(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: true, + busy: false, + }] + ); -// // Users can dismiss notifications of other users accepting their requests. -// db.dismiss_contact_notification(user_1, user_2) -// .await -// .unwrap(); -// assert_eq!( -// db.get_contacts(user_1).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_2, -// should_notify: false, -// busy: false, -// }] -// ); + // Users can dismiss notifications of other users accepting their requests. + db.dismiss_contact_notification(user_1, user_2) + .await + .unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[Contact::Accepted { + user_id: user_2, + should_notify: false, + busy: false, + }] + ); -// // Users send each other concurrent contact requests and -// // see that they are immediately accepted. -// db.send_contact_request(user_1, user_3).await.unwrap(); -// db.send_contact_request(user_3, user_1).await.unwrap(); -// assert_eq!( -// db.get_contacts(user_1).await.unwrap(), -// &[ -// Contact::Accepted { -// user_id: user_2, -// should_notify: false, -// busy: false, -// }, -// Contact::Accepted { -// user_id: user_3, -// should_notify: false, -// busy: false, -// } -// ] -// ); -// assert_eq!( -// db.get_contacts(user_3).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_1, -// should_notify: false, -// busy: false, -// }], -// ); + // Users send each other concurrent contact requests and + // see that they are immediately accepted. + db.send_contact_request(user_1, user_3).await.unwrap(); + db.send_contact_request(user_3, user_1).await.unwrap(); + assert_eq!( + db.get_contacts(user_1).await.unwrap(), + &[ + Contact::Accepted { + user_id: user_2, + should_notify: false, + busy: false, + }, + Contact::Accepted { + user_id: user_3, + should_notify: false, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user_3).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }], + ); -// // User declines a contact request. Both users see that it is gone. -// db.send_contact_request(user_2, user_3).await.unwrap(); -// db.respond_to_contact_request(user_3, user_2, false) -// .await -// .unwrap(); -// assert!(!db.has_contact(user_2, user_3).await.unwrap()); -// assert!(!db.has_contact(user_3, user_2).await.unwrap()); -// assert_eq!( -// db.get_contacts(user_2).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_1, -// should_notify: false, -// busy: false, -// }] -// ); -// assert_eq!( -// db.get_contacts(user_3).await.unwrap(), -// &[Contact::Accepted { -// user_id: user_1, -// should_notify: false, -// busy: false, -// }], -// ); -// }); + // User declines a contact request. Both users see that it is gone. + db.send_contact_request(user_2, user_3).await.unwrap(); + db.respond_to_contact_request(user_3, user_2, false) + .await + .unwrap(); + assert!(!db.has_contact(user_2, user_3).await.unwrap()); + assert!(!db.has_contact(user_3, user_2).await.unwrap()); + assert_eq!( + db.get_contacts(user_2).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_contacts(user_3).await.unwrap(), + &[Contact::Accepted { + user_id: user_1, + should_notify: false, + busy: false, + }], + ); +}); test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { let NewUserResult { diff --git a/crates/collab/src/db2/user.rs b/crates/collab/src/db2/user.rs index 5e8a484571..f6bac9dc77 100644 --- a/crates/collab/src/db2/user.rs +++ b/crates/collab/src/db2/user.rs @@ -20,6 +20,8 @@ pub struct Model { pub enum Relation { #[sea_orm(has_many = "super::access_token::Entity")] AccessToken, + #[sea_orm(has_one = "super::room_participant::Entity")] + RoomParticipant, } impl Related for Entity { @@ -28,4 +30,10 @@ impl Related for Entity { } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::RoomParticipant.def() + } +} + impl ActiveModelBehavior for ActiveModel {} From 4c04d512dbedd1abaa1e60cc1e4d86f2ed6fb87b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 30 Nov 2022 17:39:17 +0100 Subject: [PATCH 082/240] Implement `db2::Database::remove_contact` --- crates/collab/src/db2.rs | 55 ++++++++++++++++++++-------------- crates/collab/src/db2/tests.rs | 12 ++++---- 2 files changed, 39 insertions(+), 28 deletions(-) diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 35a45acedf..2e6b349497 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -366,29 +366,28 @@ impl Database { } pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { - self.transact(|mut tx| async move { - // let (id_a, id_b) = if responder_id < requester_id { - // (responder_id, requester_id) - // } else { - // (requester_id, responder_id) - // }; - // let query = " - // DELETE FROM contacts - // WHERE user_id_a = $1 AND user_id_b = $2; - // "; - // let result = sqlx::query(query) - // .bind(id_a.0) - // .bind(id_b.0) - // .execute(&mut tx) - // .await?; + self.transact(|tx| async move { + let (id_a, id_b) = if responder_id < requester_id { + (responder_id, requester_id) + } else { + (requester_id, responder_id) + }; - // if result.rows_affected() == 1 { - // tx.commit().await?; - // Ok(()) - // } else { - // Err(anyhow!("no such contact"))? - // } - todo!() + let result = contact::Entity::delete_many() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)), + ) + .exec(&tx) + .await?; + + if result.rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("no such contact"))? + } }) .await } @@ -488,6 +487,18 @@ impl Database { .await } + pub fn fuzzy_like_string(string: &str) -> String { + let mut result = String::with_capacity(string.len() * 2 + 1); + for c in string.chars() { + if c.is_alphanumeric() { + result.push('%'); + result.push(c); + } + } + result.push('%'); + result + } + // projects pub async fn share_project( diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 1aeb802025..45715a925e 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -402,12 +402,12 @@ test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { assert_ne!(metrics_id1, metrics_id2); }); -// #[test] -// fn test_fuzzy_like_string() { -// assert_eq!(DefaultDb::fuzzy_like_string("abcd"), "%a%b%c%d%"); -// assert_eq!(DefaultDb::fuzzy_like_string("x y"), "%x%y%"); -// assert_eq!(DefaultDb::fuzzy_like_string(" z "), "%z%"); -// } +#[test] +fn test_fuzzy_like_string() { + assert_eq!(Database::fuzzy_like_string("abcd"), "%a%b%c%d%"); + assert_eq!(Database::fuzzy_like_string("x y"), "%x%y%"); + assert_eq!(Database::fuzzy_like_string(" z "), "%z%"); +} // #[gpui::test] // async fn test_fuzzy_search_users() { From 5965113fc8ac84b07b2c9cac4b4003efd7e6728a Mon Sep 17 00:00:00 2001 From: Julia Date: Mon, 28 Nov 2022 23:34:38 -0500 Subject: [PATCH 083/240] Add verify macros & use in one location for point conversion --- Cargo.lock | 9 +++++++++ Cargo.toml | 1 + crates/rope/Cargo.toml | 2 +- crates/rope/src/rope.rs | 10 ++++++---- crates/verify/Cargo.toml | 11 +++++++++++ crates/verify/src/verify.rs | 33 +++++++++++++++++++++++++++++++++ 6 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 crates/verify/Cargo.toml create mode 100644 crates/verify/src/verify.rs diff --git a/Cargo.lock b/Cargo.lock index 93631697c1..550b240b65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4778,6 +4778,7 @@ dependencies = [ "smallvec", "sum_tree", "util", + "verify", ] [[package]] @@ -6844,6 +6845,14 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +[[package]] +name = "verify" +version = "0.1.0" +dependencies = [ + "backtrace", + "log", +] + [[package]] name = "version_check" version = "0.9.4" diff --git a/Cargo.toml b/Cargo.toml index 8e9814c448..1461855e22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,6 +52,7 @@ members = [ "crates/theme_selector", "crates/theme_testbench", "crates/util", + "crates/verify", "crates/vim", "crates/workspace", "crates/zed", diff --git a/crates/rope/Cargo.toml b/crates/rope/Cargo.toml index 0f754c1fb3..fb7836fab8 100644 --- a/crates/rope/Cargo.toml +++ b/crates/rope/Cargo.toml @@ -12,7 +12,7 @@ smallvec = { version = "1.6", features = ["union"] } sum_tree = { path = "../sum_tree" } arrayvec = "0.7.1" log = { version = "0.4.16", features = ["kv_unstable_serde"] } - +verify = { path = "../verify" } [dev-dependencies] rand = "0.8.3" diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index d4ee894310..03810be0b9 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -12,6 +12,7 @@ use std::{ str, }; use sum_tree::{Bias, Dimension, SumTree}; +use verify::{verify, verify_not}; pub use offset_utf16::OffsetUtf16; pub use point::Point; @@ -680,10 +681,11 @@ impl Chunk { let mut offset = 0; let mut point = Point::new(0, 0); for ch in self.0.chars() { - if point >= target { - if point > target { - panic!("point {:?} is inside of character {:?}", target, ch); - } + verify_not!(point > target, ("point {:?} is inside of character {:?}", target, ch), else { + point = target; + }); + + if point == target { break; } diff --git a/crates/verify/Cargo.toml b/crates/verify/Cargo.toml new file mode 100644 index 0000000000..72d64511f3 --- /dev/null +++ b/crates/verify/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "verify" +version = "0.1.0" +edition = "2021" + +[lib] +path = "src/verify.rs" + +[dependencies] +backtrace = "0.3" +log = "0.4" diff --git a/crates/verify/src/verify.rs b/crates/verify/src/verify.rs new file mode 100644 index 0000000000..9e1a4a5c89 --- /dev/null +++ b/crates/verify/src/verify.rs @@ -0,0 +1,33 @@ +pub use backtrace::Backtrace; + +#[macro_export] +macro_rules! verify { + ( $expression:expr, else $block:expr ) => { + verify!($expression, (""), else $block) + }; + + ( $expression:expr, ( $($fmt_arg:tt)* ), else $block:expr ) => {{ + let verify_str = stringify!($expression); + + if !$expression { + if cfg!(debug_assertions) { + panic!("Claim failed {:?}: {}", verify_str, format_args!($($fmt_arg)*)); + } else { + let backtrace = $crate::Backtrace::new(); + log::error!("Claim failed {:?}\n{:?}", verify_str, backtrace); + $block + } + } + }}; +} + +#[macro_export] +macro_rules! verify_not { + ( $expression:expr, else $block:expr ) => { + verify_not!($expression, (""), else $block) + }; + + ( $expression:expr, ( $($fmt_arg:tt)* ), else $block:expr ) => { + verify!(!$expression, ( $($fmt_arg)* ), else $block) + }; +} From 2b979d3b88f7025407c0ee0a65a9d90a96f02685 Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 30 Nov 2022 00:01:40 -0500 Subject: [PATCH 084/240] Don't panic rope point conversions --- crates/rope/src/rope.rs | 47 ++++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index 03810be0b9..569d48dcbd 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -681,8 +681,8 @@ impl Chunk { let mut offset = 0; let mut point = Point::new(0, 0); for ch in self.0.chars() { - verify_not!(point > target, ("point {:?} is inside of character {:?}", target, ch), else { - point = target; + verify_not!(point > target, ("point {target:?} is inside of character {ch:?}"), else { + return offset; }); if point == target { @@ -691,16 +691,19 @@ impl Chunk { if ch == '\n' { point.row += 1; - if point.row > target.row { - panic!( - "point {:?} is beyond the end of a line with length {}", - target, point.column - ); - } point.column = 0; + + verify_not!( + point.row > target.row, + ("point {target:?} is beyond the end of a line with length {}", point.column), + else { + return offset; + } + ); } else { point.column += ch.len_utf8() as u32; } + offset += ch.len_utf8(); } offset @@ -739,26 +742,36 @@ impl Chunk { if ch == '\n' { point.row += 1; point.column = 0; - if point.row > target.row { - if clip { + + if clip { + if point.row > target.row { // Return the offset of the newline return offset; } - panic!( - "point {:?} is beyond the end of a line with length {}", - target, point.column - ); + } else { + verify_not!( + point.row > target.row, + ("point {target:?} is beyond the end of a line with length {}", point.column), + else { + // Return the offset of the newline + return offset; + } + ) } } else { point.column += ch.len_utf16() as u32; } - if point > target { - if clip { + if clip { + if point > target { // Return the offset of the codepoint which we have landed within, bias left return offset; } - panic!("point {:?} is inside of codepoint {:?}", target, ch); + } else { + verify_not!(point > target, ("point {target:?} is inside of codepoint {ch:?}"), else { + // Return the offset of the codepoint which we have landed within, bias left + return offset; + }); } offset += ch.len_utf8(); From 023ecd595b7248c1a7f8b13a2307ed54692e1a5d Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 30 Nov 2022 13:03:15 -0500 Subject: [PATCH 085/240] Change verify macro to debug panic Co-Authored-By: Max Brunsfeld --- Cargo.lock | 10 +------ Cargo.toml | 1 - crates/rope/Cargo.toml | 2 +- crates/rope/src/rope.rs | 56 ++++++++++++++++--------------------- crates/util/Cargo.toml | 1 + crates/util/src/lib.rs | 13 +++++++++ crates/verify/Cargo.toml | 11 -------- crates/verify/src/verify.rs | 33 ---------------------- 8 files changed, 40 insertions(+), 87 deletions(-) delete mode 100644 crates/verify/Cargo.toml delete mode 100644 crates/verify/src/verify.rs diff --git a/Cargo.lock b/Cargo.lock index 550b240b65..24cd7a7748 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4778,7 +4778,6 @@ dependencies = [ "smallvec", "sum_tree", "util", - "verify", ] [[package]] @@ -6786,6 +6785,7 @@ name = "util" version = "0.1.0" dependencies = [ "anyhow", + "backtrace", "futures 0.3.24", "git2", "lazy_static", @@ -6845,14 +6845,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" -[[package]] -name = "verify" -version = "0.1.0" -dependencies = [ - "backtrace", - "log", -] - [[package]] name = "version_check" version = "0.9.4" diff --git a/Cargo.toml b/Cargo.toml index 1461855e22..8e9814c448 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,6 @@ members = [ "crates/theme_selector", "crates/theme_testbench", "crates/util", - "crates/verify", "crates/vim", "crates/workspace", "crates/zed", diff --git a/crates/rope/Cargo.toml b/crates/rope/Cargo.toml index fb7836fab8..bd1dc690db 100644 --- a/crates/rope/Cargo.toml +++ b/crates/rope/Cargo.toml @@ -12,7 +12,7 @@ smallvec = { version = "1.6", features = ["union"] } sum_tree = { path = "../sum_tree" } arrayvec = "0.7.1" log = { version = "0.4.16", features = ["kv_unstable_serde"] } -verify = { path = "../verify" } +util = { path = "../util" } [dev-dependencies] rand = "0.8.3" diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index 569d48dcbd..e4f2bf5011 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -12,7 +12,7 @@ use std::{ str, }; use sum_tree::{Bias, Dimension, SumTree}; -use verify::{verify, verify_not}; +use util::debug_panic; pub use offset_utf16::OffsetUtf16; pub use point::Point; @@ -681,9 +681,10 @@ impl Chunk { let mut offset = 0; let mut point = Point::new(0, 0); for ch in self.0.chars() { - verify_not!(point > target, ("point {target:?} is inside of character {ch:?}"), else { + if point > target { + debug_panic!("point {target:?} is inside of character {ch:?}"); return offset; - }); + } if point == target { break; @@ -693,13 +694,13 @@ impl Chunk { point.row += 1; point.column = 0; - verify_not!( - point.row > target.row, - ("point {target:?} is beyond the end of a line with length {}", point.column), - else { - return offset; - } - ); + if point.row > target.row { + debug_panic!( + "point {target:?} is beyond the end of a line with length {}", + point.column + ); + return offset; + } } else { point.column += ch.len_utf8() as u32; } @@ -743,35 +744,26 @@ impl Chunk { point.row += 1; point.column = 0; - if clip { - if point.row > target.row { - // Return the offset of the newline - return offset; + if point.row > target.row { + if !clip { + debug_panic!( + "point {target:?} is beyond the end of a line with length {}", + point.column + ); } - } else { - verify_not!( - point.row > target.row, - ("point {target:?} is beyond the end of a line with length {}", point.column), - else { - // Return the offset of the newline - return offset; - } - ) + // Return the offset of the newline + return offset; } } else { point.column += ch.len_utf16() as u32; } - if clip { - if point > target { - // Return the offset of the codepoint which we have landed within, bias left - return offset; + if point > target { + if !clip { + debug_panic!("point {target:?} is inside of codepoint {ch:?}"); } - } else { - verify_not!(point > target, ("point {target:?} is inside of codepoint {ch:?}"), else { - // Return the offset of the codepoint which we have landed within, bias left - return offset; - }); + // Return the offset of the codepoint which we have landed within, bias left + return offset; } offset += ch.len_utf8(); diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index c083137156..fc16eeb53c 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -11,6 +11,7 @@ test-support = ["serde_json", "tempdir", "git2"] [dependencies] anyhow = "1.0.38" +backtrace = "0.3" futures = "0.3" log = { version = "0.4.16", features = ["kv_unstable_serde"] } lazy_static = "1.4.0" diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index e35f2df7d4..22d63a0996 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -1,6 +1,7 @@ #[cfg(any(test, feature = "test-support"))] pub mod test; +pub use backtrace::Backtrace; use futures::Future; use rand::{seq::SliceRandom, Rng}; use std::{ @@ -10,6 +11,18 @@ use std::{ task::{Context, Poll}, }; +#[macro_export] +macro_rules! debug_panic { + ( $($fmt_arg:tt)* ) => { + if cfg!(debug_assertions) { + panic!( $($fmt_arg)* ); + } else { + let backtrace = $crate::Backtrace::new(); + log::error!("{}\n{:?}", format_args!($($fmt_arg)*), backtrace); + } + }; +} + pub fn truncate(s: &str, max_chars: usize) -> &str { match s.char_indices().nth(max_chars) { None => s, diff --git a/crates/verify/Cargo.toml b/crates/verify/Cargo.toml deleted file mode 100644 index 72d64511f3..0000000000 --- a/crates/verify/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "verify" -version = "0.1.0" -edition = "2021" - -[lib] -path = "src/verify.rs" - -[dependencies] -backtrace = "0.3" -log = "0.4" diff --git a/crates/verify/src/verify.rs b/crates/verify/src/verify.rs deleted file mode 100644 index 9e1a4a5c89..0000000000 --- a/crates/verify/src/verify.rs +++ /dev/null @@ -1,33 +0,0 @@ -pub use backtrace::Backtrace; - -#[macro_export] -macro_rules! verify { - ( $expression:expr, else $block:expr ) => { - verify!($expression, (""), else $block) - }; - - ( $expression:expr, ( $($fmt_arg:tt)* ), else $block:expr ) => {{ - let verify_str = stringify!($expression); - - if !$expression { - if cfg!(debug_assertions) { - panic!("Claim failed {:?}: {}", verify_str, format_args!($($fmt_arg)*)); - } else { - let backtrace = $crate::Backtrace::new(); - log::error!("Claim failed {:?}\n{:?}", verify_str, backtrace); - $block - } - } - }}; -} - -#[macro_export] -macro_rules! verify_not { - ( $expression:expr, else $block:expr ) => { - verify_not!($expression, (""), else $block) - }; - - ( $expression:expr, ( $($fmt_arg:tt)* ), else $block:expr ) => { - verify!(!$expression, ( $($fmt_arg)* ), else $block) - }; -} From 41b2fde10d8285d4e77c246fdcb330d3850d4f23 Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 30 Nov 2022 13:11:08 -0500 Subject: [PATCH 086/240] Style Co-Authored-By: Max Brunsfeld --- crates/rope/src/rope.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index e4f2bf5011..53713e3f7a 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -680,13 +680,12 @@ impl Chunk { fn point_to_offset(&self, target: Point) -> usize { let mut offset = 0; let mut point = Point::new(0, 0); - for ch in self.0.chars() { - if point > target { - debug_panic!("point {target:?} is inside of character {ch:?}"); - return offset; - } - if point == target { + for ch in self.0.chars() { + if point >= target { + if point > target { + debug_panic!("point {target:?} is inside of character {ch:?}"); + } break; } @@ -699,7 +698,7 @@ impl Chunk { "point {target:?} is beyond the end of a line with length {}", point.column ); - return offset; + break; } } else { point.column += ch.len_utf8() as u32; @@ -707,6 +706,7 @@ impl Chunk { offset += ch.len_utf8(); } + offset } From d70996bb9923a06fa1e7334a372fd3d32677fe19 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 30 Nov 2022 14:10:10 -0800 Subject: [PATCH 087/240] collab 0.2.5 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 24cd7a7748..e04624d686 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1028,7 +1028,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.2.4" +version = "0.2.5" dependencies = [ "anyhow", "async-trait", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 57a57a00c1..09f379526e 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.2.4" +version = "0.2.5" [[bin]] name = "collab" From 46f1d5f5c24448abb2dc149689f389b4378858f8 Mon Sep 17 00:00:00 2001 From: Julia Date: Thu, 1 Dec 2022 00:29:58 -0500 Subject: [PATCH 088/240] Avoid moving tab when leader item updates --- crates/workspace/src/workspace.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7082b61949..25fa3654d7 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2676,7 +2676,12 @@ impl Workspace { } for (pane, item) in items_to_add { - Pane::add_item(self, &pane, item.boxed_clone(), false, false, None, cx); + if let Some(index) = pane.update(cx, |pane, _| pane.index_for_item(item.as_ref())) { + pane.update(cx, |pane, cx| pane.activate_item(index, false, false, cx)); + } else { + Pane::add_item(self, &pane, item.boxed_clone(), false, false, None, cx); + } + if pane == self.active_pane { pane.update(cx, |pane, cx| pane.focus_active_item(cx)); } From 2375741bdf0c289ddcbd8b906344db03efa93937 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 10:09:53 +0100 Subject: [PATCH 089/240] Implement `db2::Database::fuzzy_search_users` --- crates/collab/src/db2.rs | 36 +++++++++++--- crates/collab/src/db2/tests.rs | 90 +++++++++++++++++----------------- 2 files changed, 75 insertions(+), 51 deletions(-) diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 2e6b349497..b69f7f32a4 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -20,8 +20,8 @@ use sea_orm::{ TransactionTrait, }; use sea_orm::{ - ActiveValue, ConnectionTrait, FromQueryResult, IntoActiveModel, JoinType, QueryOrder, - QuerySelect, + ActiveValue, ConnectionTrait, DatabaseBackend, FromQueryResult, IntoActiveModel, JoinType, + QueryOrder, QuerySelect, Statement, }; use sea_query::{Alias, Expr, OnConflict, Query}; use serde::{Deserialize, Serialize}; @@ -499,6 +499,30 @@ impl Database { result } + pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { + self.transact(|tx| async { + let tx = tx; + let like_string = Self::fuzzy_like_string(name_query); + let query = " + SELECT users.* + FROM users + WHERE github_login ILIKE $1 + ORDER BY github_login <-> $2 + LIMIT $3 + "; + + Ok(user::Entity::find() + .from_raw_sql(Statement::from_sql_and_values( + self.pool.get_database_backend(), + query.into(), + vec![like_string.into(), name_query.into(), limit.into()], + )) + .all(&tx) + .await?) + }) + .await + } + // projects pub async fn share_project( @@ -727,9 +751,9 @@ impl Database { let tx = self.pool.begin().await?; // In Postgres, serializable transactions are opt-in - if let sea_orm::DatabaseBackend::Postgres = self.pool.get_database_backend() { - tx.execute(sea_orm::Statement::from_string( - sea_orm::DatabaseBackend::Postgres, + if let DatabaseBackend::Postgres = self.pool.get_database_backend() { + tx.execute(Statement::from_string( + DatabaseBackend::Postgres, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), )) .await?; @@ -1047,7 +1071,7 @@ mod test { impl Drop for TestDb { fn drop(&mut self) { let db = self.db.take().unwrap(); - if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() { + if let DatabaseBackend::Postgres = db.pool.get_database_backend() { db.runtime.as_ref().unwrap().block_on(async { use util::ResultExt; let query = " diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 45715a925e..527f70adb8 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -409,53 +409,53 @@ fn test_fuzzy_like_string() { assert_eq!(Database::fuzzy_like_string(" z "), "%z%"); } -// #[gpui::test] -// async fn test_fuzzy_search_users() { -// let test_db = PostgresTestDb::new(build_background_executor()); -// let db = test_db.db(); -// for (i, github_login) in [ -// "California", -// "colorado", -// "oregon", -// "washington", -// "florida", -// "delaware", -// "rhode-island", -// ] -// .into_iter() -// .enumerate() -// { -// db.create_user( -// &format!("{github_login}@example.com"), -// false, -// NewUserParams { -// github_login: github_login.into(), -// github_user_id: i as i32, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap(); -// } +#[gpui::test] +async fn test_fuzzy_search_users() { + let test_db = TestDb::postgres(build_background_executor()); + let db = test_db.db(); + for (i, github_login) in [ + "California", + "colorado", + "oregon", + "washington", + "florida", + "delaware", + "rhode-island", + ] + .into_iter() + .enumerate() + { + db.create_user( + &format!("{github_login}@example.com"), + false, + NewUserParams { + github_login: github_login.into(), + github_user_id: i as i32, + invite_count: 0, + }, + ) + .await + .unwrap(); + } -// assert_eq!( -// fuzzy_search_user_names(db, "clr").await, -// &["colorado", "California"] -// ); -// assert_eq!( -// fuzzy_search_user_names(db, "ro").await, -// &["rhode-island", "colorado", "oregon"], -// ); + assert_eq!( + fuzzy_search_user_names(db, "clr").await, + &["colorado", "California"] + ); + assert_eq!( + fuzzy_search_user_names(db, "ro").await, + &["rhode-island", "colorado", "oregon"], + ); -// async fn fuzzy_search_user_names(db: &Db, query: &str) -> Vec { -// db.fuzzy_search_users(query, 10) -// .await -// .unwrap() -// .into_iter() -// .map(|user| user.github_login) -// .collect::>() -// } -// } + async fn fuzzy_search_user_names(db: &Database, query: &str) -> Vec { + db.fuzzy_search_users(query, 10) + .await + .unwrap() + .into_iter() + .map(|user| user.github_login) + .collect::>() + } +} // #[gpui::test] // async fn test_invite_codes() { From 4f864a20a7cfede662091f3f71c8ba2aba71d295 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 11:10:51 +0100 Subject: [PATCH 090/240] Implement invite codes using sea-orm --- crates/collab/src/db2.rs | 220 ++++++++++++++++++ crates/collab/src/db2/signup.rs | 33 +++ crates/collab/src/db2/tests.rs | 386 ++++++++++++++++---------------- 3 files changed, 446 insertions(+), 193 deletions(-) create mode 100644 crates/collab/src/db2/signup.rs diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index b69f7f32a4..75329f9268 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -4,6 +4,7 @@ mod project; mod project_collaborator; mod room; mod room_participant; +mod signup; #[cfg(test)] mod tests; mod user; @@ -14,6 +15,7 @@ use anyhow::anyhow; use collections::HashMap; use dashmap::DashMap; use futures::StreamExt; +use hyper::StatusCode; use rpc::{proto, ConnectionId}; use sea_orm::{ entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, @@ -34,6 +36,7 @@ use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; use tokio::sync::{Mutex, OwnedMutexGuard}; pub use contact::Contact; +pub use signup::Invite; pub use user::Model as User; pub struct Database { @@ -523,6 +526,222 @@ impl Database { .await } + // invite codes + + pub async fn create_invite_from_code( + &self, + code: &str, + email_address: &str, + device_id: Option<&str>, + ) -> Result { + self.transact(|tx| async move { + let existing_user = user::Entity::find() + .filter(user::Column::EmailAddress.eq(email_address)) + .one(&tx) + .await?; + + if existing_user.is_some() { + Err(anyhow!("email address is already in use"))?; + } + + let inviter = match user::Entity::find() + .filter(user::Column::InviteCode.eq(code)) + .one(&tx) + .await? + { + Some(inviter) => inviter, + None => { + return Err(Error::Http( + StatusCode::NOT_FOUND, + "invite code not found".to_string(), + ))? + } + }; + + if inviter.invite_count == 0 { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "no invites remaining".to_string(), + ))?; + } + + let signup = signup::Entity::insert(signup::ActiveModel { + email_address: ActiveValue::set(email_address.into()), + email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), + email_confirmation_sent: ActiveValue::set(false), + inviting_user_id: ActiveValue::set(Some(inviter.id)), + platform_linux: ActiveValue::set(false), + platform_mac: ActiveValue::set(false), + platform_windows: ActiveValue::set(false), + platform_unknown: ActiveValue::set(true), + device_id: ActiveValue::set(device_id.map(|device_id| device_id.into())), + ..Default::default() + }) + .on_conflict( + OnConflict::column(signup::Column::EmailAddress) + .update_column(signup::Column::InvitingUserId) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + tx.commit().await?; + + Ok(Invite { + email_address: signup.email_address, + email_confirmation_code: signup.email_confirmation_code, + }) + }) + .await + } + + pub async fn create_user_from_invite( + &self, + invite: &Invite, + user: NewUserParams, + ) -> Result> { + self.transact(|tx| async { + let tx = tx; + let signup = signup::Entity::find() + .filter( + signup::Column::EmailAddress + .eq(invite.email_address.as_str()) + .and( + signup::Column::EmailConfirmationCode + .eq(invite.email_confirmation_code.as_str()), + ), + ) + .one(&tx) + .await? + .ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "no such invite".to_string()))?; + + if signup.user_id.is_some() { + return Ok(None); + } + + let user = user::Entity::insert(user::ActiveModel { + email_address: ActiveValue::set(Some(invite.email_address.clone())), + github_login: ActiveValue::set(user.github_login.clone()), + github_user_id: ActiveValue::set(Some(user.github_user_id)), + admin: ActiveValue::set(false), + invite_count: ActiveValue::set(user.invite_count), + invite_code: ActiveValue::set(Some(random_invite_code())), + metrics_id: ActiveValue::set(Uuid::new_v4()), + ..Default::default() + }) + .on_conflict( + OnConflict::column(user::Column::GithubLogin) + .update_columns([ + user::Column::EmailAddress, + user::Column::GithubUserId, + user::Column::Admin, + ]) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + + let mut signup = signup.into_active_model(); + signup.user_id = ActiveValue::set(Some(user.id)); + let signup = signup.update(&tx).await?; + + if let Some(inviting_user_id) = signup.inviting_user_id { + let result = user::Entity::update_many() + .filter( + user::Column::Id + .eq(inviting_user_id) + .and(user::Column::InviteCount.gt(0)), + ) + .col_expr( + user::Column::InviteCount, + Expr::col(user::Column::InviteCount).sub(1), + ) + .exec(&tx) + .await?; + + if result.rows_affected == 0 { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "no invites remaining".to_string(), + ))?; + } + + contact::Entity::insert(contact::ActiveModel { + user_id_a: ActiveValue::set(inviting_user_id), + user_id_b: ActiveValue::set(user.id), + a_to_b: ActiveValue::set(true), + should_notify: ActiveValue::set(true), + accepted: ActiveValue::set(true), + ..Default::default() + }) + .on_conflict(OnConflict::new().do_nothing().to_owned()) + .exec_without_returning(&tx) + .await?; + } + + tx.commit().await?; + Ok(Some(NewUserResult { + user_id: user.id, + metrics_id: user.metrics_id.to_string(), + inviting_user_id: signup.inviting_user_id, + signup_device_id: signup.device_id, + })) + }) + .await + } + + pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { + self.transact(|tx| async move { + if count > 0 { + user::Entity::update_many() + .filter( + user::Column::Id + .eq(id) + .and(user::Column::InviteCode.is_null()), + ) + .col_expr(user::Column::InviteCode, random_invite_code().into()) + .exec(&tx) + .await?; + } + + user::Entity::update_many() + .filter(user::Column::Id.eq(id)) + .col_expr(user::Column::InviteCount, count.into()) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { + self.transact(|tx| async move { + match user::Entity::find_by_id(id).one(&tx).await? { + Some(user) if user.invite_code.is_some() => { + Ok(Some((user.invite_code.unwrap(), user.invite_count as u32))) + } + _ => Ok(None), + } + }) + .await + } + + pub async fn get_user_for_invite_code(&self, code: &str) -> Result { + self.transact(|tx| async move { + user::Entity::find() + .filter(user::Column::InviteCode.eq(code)) + .one(&tx) + .await? + .ok_or_else(|| { + Error::Http( + StatusCode::NOT_FOUND, + "that invite code does not exist".to_string(), + ) + }) + }) + .await + } + // projects pub async fn share_project( @@ -966,6 +1185,7 @@ id_type!(RoomId); id_type!(RoomParticipantId); id_type!(ProjectId); id_type!(ProjectCollaboratorId); +id_type!(SignupId); id_type!(WorktreeId); #[cfg(test)] diff --git a/crates/collab/src/db2/signup.rs b/crates/collab/src/db2/signup.rs new file mode 100644 index 0000000000..ad0aa5eb82 --- /dev/null +++ b/crates/collab/src/db2/signup.rs @@ -0,0 +1,33 @@ +use super::{SignupId, UserId}; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "signups")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: SignupId, + pub email_address: String, + pub email_confirmation_code: String, + pub email_confirmation_sent: bool, + pub created_at: DateTime, + pub device_id: Option, + pub user_id: Option, + pub inviting_user_id: Option, + pub platform_mac: bool, + pub platform_linux: bool, + pub platform_windows: bool, + pub platform_unknown: bool, + pub editor_features: Option, + pub programming_languages: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} + +#[derive(Debug)] +pub struct Invite { + pub email_address: String, + pub email_confirmation_code: String, +} diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 527f70adb8..468d0074d4 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -457,210 +457,210 @@ async fn test_fuzzy_search_users() { } } -// #[gpui::test] -// async fn test_invite_codes() { -// let test_db = PostgresTestDb::new(build_background_executor()); -// let db = test_db.db(); +#[gpui::test] +async fn test_invite_codes() { + let test_db = TestDb::postgres(build_background_executor()); + let db = test_db.db(); -// let NewUserResult { user_id: user1, .. } = db -// .create_user( -// "user1@example.com", -// false, -// NewUserParams { -// github_login: "user1".into(), -// github_user_id: 0, -// invite_count: 0, -// }, -// ) -// .await -// .unwrap(); + let NewUserResult { user_id: user1, .. } = db + .create_user( + "user1@example.com", + false, + NewUserParams { + github_login: "user1".into(), + github_user_id: 0, + invite_count: 0, + }, + ) + .await + .unwrap(); -// // Initially, user 1 has no invite code -// assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); + // Initially, user 1 has no invite code + assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); -// // Setting invite count to 0 when no code is assigned does not assign a new code -// db.set_invite_count_for_user(user1, 0).await.unwrap(); -// assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); + // Setting invite count to 0 when no code is assigned does not assign a new code + db.set_invite_count_for_user(user1, 0).await.unwrap(); + assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); -// // User 1 creates an invite code that can be used twice. -// db.set_invite_count_for_user(user1, 2).await.unwrap(); -// let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(invite_count, 2); + // User 1 creates an invite code that can be used twice. + db.set_invite_count_for_user(user1, 2).await.unwrap(); + let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 2); -// // User 2 redeems the invite code and becomes a contact of user 1. -// let user2_invite = db -// .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) -// .await -// .unwrap(); -// let NewUserResult { -// user_id: user2, -// inviting_user_id, -// signup_device_id, -// metrics_id, -// } = db -// .create_user_from_invite( -// &user2_invite, -// NewUserParams { -// github_login: "user2".into(), -// github_user_id: 2, -// invite_count: 7, -// }, -// ) -// .await -// .unwrap() -// .unwrap(); -// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(invite_count, 1); -// assert_eq!(inviting_user_id, Some(user1)); -// assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); -// assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); -// assert_eq!( -// db.get_contacts(user1).await.unwrap(), -// [Contact::Accepted { -// user_id: user2, -// should_notify: true, -// busy: false, -// }] -// ); -// assert_eq!( -// db.get_contacts(user2).await.unwrap(), -// [Contact::Accepted { -// user_id: user1, -// should_notify: false, -// busy: false, -// }] -// ); -// assert_eq!( -// db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, -// 7 -// ); + // User 2 redeems the invite code and becomes a contact of user 1. + let user2_invite = db + .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) + .await + .unwrap(); + let NewUserResult { + user_id: user2, + inviting_user_id, + signup_device_id, + metrics_id, + } = db + .create_user_from_invite( + &user2_invite, + NewUserParams { + github_login: "user2".into(), + github_user_id: 2, + invite_count: 7, + }, + ) + .await + .unwrap() + .unwrap(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); + assert_eq!(inviting_user_id, Some(user1)); + assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); + assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }] + ); + assert_eq!( + db.get_contacts(user2).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, + 7 + ); -// // User 3 redeems the invite code and becomes a contact of user 1. -// let user3_invite = db -// .create_invite_from_code(&invite_code, "user3@example.com", None) -// .await -// .unwrap(); -// let NewUserResult { -// user_id: user3, -// inviting_user_id, -// signup_device_id, -// .. -// } = db -// .create_user_from_invite( -// &user3_invite, -// NewUserParams { -// github_login: "user-3".into(), -// github_user_id: 3, -// invite_count: 3, -// }, -// ) -// .await -// .unwrap() -// .unwrap(); -// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(invite_count, 0); -// assert_eq!(inviting_user_id, Some(user1)); -// assert!(signup_device_id.is_none()); -// assert_eq!( -// db.get_contacts(user1).await.unwrap(), -// [ -// Contact::Accepted { -// user_id: user2, -// should_notify: true, -// busy: false, -// }, -// Contact::Accepted { -// user_id: user3, -// should_notify: true, -// busy: false, -// } -// ] -// ); -// assert_eq!( -// db.get_contacts(user3).await.unwrap(), -// [Contact::Accepted { -// user_id: user1, -// should_notify: false, -// busy: false, -// }] -// ); -// assert_eq!( -// db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, -// 3 -// ); + // User 3 redeems the invite code and becomes a contact of user 1. + let user3_invite = db + .create_invite_from_code(&invite_code, "user3@example.com", None) + .await + .unwrap(); + let NewUserResult { + user_id: user3, + inviting_user_id, + signup_device_id, + .. + } = db + .create_user_from_invite( + &user3_invite, + NewUserParams { + github_login: "user-3".into(), + github_user_id: 3, + invite_count: 3, + }, + ) + .await + .unwrap() + .unwrap(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 0); + assert_eq!(inviting_user_id, Some(user1)); + assert!(signup_device_id.is_none()); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [ + Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user3, + should_notify: true, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user3).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, + 3 + ); -// // Trying to reedem the code for the third time results in an error. -// db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) -// .await -// .unwrap_err(); + // Trying to reedem the code for the third time results in an error. + db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) + .await + .unwrap_err(); -// // Invite count can be updated after the code has been created. -// db.set_invite_count_for_user(user1, 2).await.unwrap(); -// let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 -// assert_eq!(invite_count, 2); + // Invite count can be updated after the code has been created. + db.set_invite_count_for_user(user1, 2).await.unwrap(); + let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 + assert_eq!(invite_count, 2); -// // User 4 can now redeem the invite code and becomes a contact of user 1. -// let user4_invite = db -// .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) -// .await -// .unwrap(); -// let user4 = db -// .create_user_from_invite( -// &user4_invite, -// NewUserParams { -// github_login: "user-4".into(), -// github_user_id: 4, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap() -// .unwrap() -// .user_id; + // User 4 can now redeem the invite code and becomes a contact of user 1. + let user4_invite = db + .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) + .await + .unwrap(); + let user4 = db + .create_user_from_invite( + &user4_invite, + NewUserParams { + github_login: "user-4".into(), + github_user_id: 4, + invite_count: 5, + }, + ) + .await + .unwrap() + .unwrap() + .user_id; -// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(invite_count, 1); -// assert_eq!( -// db.get_contacts(user1).await.unwrap(), -// [ -// Contact::Accepted { -// user_id: user2, -// should_notify: true, -// busy: false, -// }, -// Contact::Accepted { -// user_id: user3, -// should_notify: true, -// busy: false, -// }, -// Contact::Accepted { -// user_id: user4, -// should_notify: true, -// busy: false, -// } -// ] -// ); -// assert_eq!( -// db.get_contacts(user4).await.unwrap(), -// [Contact::Accepted { -// user_id: user1, -// should_notify: false, -// busy: false, -// }] -// ); -// assert_eq!( -// db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, -// 5 -// ); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [ + Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user3, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user4, + should_notify: true, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user4).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: false, + busy: false, + }] + ); + assert_eq!( + db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, + 5 + ); -// // An existing user cannot redeem invite codes. -// db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) -// .await -// .unwrap_err(); -// let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); -// assert_eq!(invite_count, 1); -// } + // An existing user cannot redeem invite codes. + db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) + .await + .unwrap_err(); + let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); + assert_eq!(invite_count, 1); +} // #[gpui::test] // async fn test_signups() { From 19d14737bfe5b6a249236586e5e81f82ac6188d8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 11:58:07 +0100 Subject: [PATCH 091/240] Implement signups using sea-orm --- crates/collab/Cargo.toml | 2 +- crates/collab/src/db2.rs | 102 +++++++++++- crates/collab/src/db2/signup.rs | 29 +++- crates/collab/src/db2/tests.rs | 272 ++++++++++++++++---------------- 4 files changed, 262 insertions(+), 143 deletions(-) diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index a268bdd7b0..4cb91ad12d 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -36,7 +36,7 @@ prometheus = "0.13" rand = "0.8" reqwest = { version = "0.11", features = ["json"], optional = true } scrypt = "0.7" -sea-orm = { version = "0.10", features = ["sqlx-postgres", "runtime-tokio-rustls"] } +sea-orm = { version = "0.10", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls"] } sea-query = { version = "0.27", features = ["derive"] } sea-query-binder = { version = "0.2", features = ["sqlx-postgres"] } serde = { version = "1.0", features = ["derive", "rc"] } diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs index 75329f9268..3aa21c6059 100644 --- a/crates/collab/src/db2.rs +++ b/crates/collab/src/db2.rs @@ -36,7 +36,7 @@ use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; use tokio::sync::{Mutex, OwnedMutexGuard}; pub use contact::Contact; -pub use signup::Invite; +pub use signup::{Invite, NewSignup, WaitlistSummary}; pub use user::Model as User; pub struct Database { @@ -140,6 +140,11 @@ impl Database { .await } + pub async fn get_user_by_id(&self, id: UserId) -> Result> { + self.transact(|tx| async move { Ok(user::Entity::find_by_id(id).one(&tx).await?) }) + .await + } + pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { self.transact(|tx| async { let tx = tx; @@ -322,7 +327,7 @@ impl Database { } pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { - self.transact(|mut tx| async move { + self.transact(|tx| async move { let (id_a, id_b, a_to_b) = if sender_id < receiver_id { (sender_id, receiver_id, true) } else { @@ -526,6 +531,99 @@ impl Database { .await } + // signups + + pub async fn create_signup(&self, signup: NewSignup) -> Result<()> { + self.transact(|tx| async { + signup::ActiveModel { + email_address: ActiveValue::set(signup.email_address.clone()), + email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), + email_confirmation_sent: ActiveValue::set(false), + platform_mac: ActiveValue::set(signup.platform_mac), + platform_windows: ActiveValue::set(signup.platform_windows), + platform_linux: ActiveValue::set(signup.platform_linux), + platform_unknown: ActiveValue::set(false), + editor_features: ActiveValue::set(Some(signup.editor_features.clone())), + programming_languages: ActiveValue::set(Some(signup.programming_languages.clone())), + device_id: ActiveValue::set(signup.device_id.clone()), + ..Default::default() + } + .insert(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + pub async fn get_waitlist_summary(&self) -> Result { + self.transact(|tx| async move { + let query = " + SELECT + COUNT(*) as count, + COALESCE(SUM(CASE WHEN platform_linux THEN 1 ELSE 0 END), 0) as linux_count, + COALESCE(SUM(CASE WHEN platform_mac THEN 1 ELSE 0 END), 0) as mac_count, + COALESCE(SUM(CASE WHEN platform_windows THEN 1 ELSE 0 END), 0) as windows_count, + COALESCE(SUM(CASE WHEN platform_unknown THEN 1 ELSE 0 END), 0) as unknown_count + FROM ( + SELECT * + FROM signups + WHERE + NOT email_confirmation_sent + ) AS unsent + "; + Ok( + WaitlistSummary::find_by_statement(Statement::from_sql_and_values( + self.pool.get_database_backend(), + query.into(), + vec![], + )) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("invalid result"))?, + ) + }) + .await + } + + pub async fn record_sent_invites(&self, invites: &[Invite]) -> Result<()> { + let emails = invites + .iter() + .map(|s| s.email_address.as_str()) + .collect::>(); + self.transact(|tx| async { + signup::Entity::update_many() + .filter(signup::Column::EmailAddress.is_in(emails.iter().copied())) + .col_expr(signup::Column::EmailConfirmationSent, true.into()) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + pub async fn get_unsent_invites(&self, count: usize) -> Result> { + self.transact(|tx| async move { + Ok(signup::Entity::find() + .select_only() + .column(signup::Column::EmailAddress) + .column(signup::Column::EmailConfirmationCode) + .filter( + signup::Column::EmailConfirmationSent.eq(false).and( + signup::Column::PlatformMac + .eq(true) + .or(signup::Column::PlatformUnknown.eq(true)), + ), + ) + .limit(count as u64) + .into_model() + .all(&tx) + .await?) + }) + .await + } + // invite codes pub async fn create_invite_from_code( diff --git a/crates/collab/src/db2/signup.rs b/crates/collab/src/db2/signup.rs index ad0aa5eb82..8fab8daa36 100644 --- a/crates/collab/src/db2/signup.rs +++ b/crates/collab/src/db2/signup.rs @@ -1,5 +1,6 @@ use super::{SignupId, UserId}; -use sea_orm::entity::prelude::*; +use sea_orm::{entity::prelude::*, FromQueryResult}; +use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "signups")] @@ -17,8 +18,8 @@ pub struct Model { pub platform_linux: bool, pub platform_windows: bool, pub platform_unknown: bool, - pub editor_features: Option, - pub programming_languages: Option, + pub editor_features: Option>, + pub programming_languages: Option>, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] @@ -26,8 +27,28 @@ pub enum Relation {} impl ActiveModelBehavior for ActiveModel {} -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, FromQueryResult)] pub struct Invite { pub email_address: String, pub email_confirmation_code: String, } + +#[derive(Clone, Deserialize)] +pub struct NewSignup { + pub email_address: String, + pub platform_mac: bool, + pub platform_windows: bool, + pub platform_linux: bool, + pub editor_features: Vec, + pub programming_languages: Vec, + pub device_id: Option, +} + +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)] +pub struct WaitlistSummary { + pub count: i64, + pub linux_count: i64, + pub mac_count: i64, + pub windows_count: i64, + pub unknown_count: i64, +} diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs index 468d0074d4..b276bd5057 100644 --- a/crates/collab/src/db2/tests.rs +++ b/crates/collab/src/db2/tests.rs @@ -662,151 +662,151 @@ async fn test_invite_codes() { assert_eq!(invite_count, 1); } -// #[gpui::test] -// async fn test_signups() { -// let test_db = PostgresTestDb::new(build_background_executor()); -// let db = test_db.db(); +#[gpui::test] +async fn test_signups() { + let test_db = TestDb::postgres(build_background_executor()); + let db = test_db.db(); -// // people sign up on the waitlist -// for i in 0..8 { -// db.create_signup(Signup { -// email_address: format!("person-{i}@example.com"), -// platform_mac: true, -// platform_linux: i % 2 == 0, -// platform_windows: i % 4 == 0, -// editor_features: vec!["speed".into()], -// programming_languages: vec!["rust".into(), "c".into()], -// device_id: Some(format!("device_id_{i}")), -// }) -// .await -// .unwrap(); -// } + // people sign up on the waitlist + for i in 0..8 { + db.create_signup(NewSignup { + email_address: format!("person-{i}@example.com"), + platform_mac: true, + platform_linux: i % 2 == 0, + platform_windows: i % 4 == 0, + editor_features: vec!["speed".into()], + programming_languages: vec!["rust".into(), "c".into()], + device_id: Some(format!("device_id_{i}")), + }) + .await + .unwrap(); + } -// assert_eq!( -// db.get_waitlist_summary().await.unwrap(), -// WaitlistSummary { -// count: 8, -// mac_count: 8, -// linux_count: 4, -// windows_count: 2, -// unknown_count: 0, -// } -// ); + assert_eq!( + db.get_waitlist_summary().await.unwrap(), + WaitlistSummary { + count: 8, + mac_count: 8, + linux_count: 4, + windows_count: 2, + unknown_count: 0, + } + ); -// // retrieve the next batch of signup emails to send -// let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); -// let addresses = signups_batch1 -// .iter() -// .map(|s| &s.email_address) -// .collect::>(); -// assert_eq!( -// addresses, -// &[ -// "person-0@example.com", -// "person-1@example.com", -// "person-2@example.com" -// ] -// ); -// assert_ne!( -// signups_batch1[0].email_confirmation_code, -// signups_batch1[1].email_confirmation_code -// ); + // retrieve the next batch of signup emails to send + let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); + let addresses = signups_batch1 + .iter() + .map(|s| &s.email_address) + .collect::>(); + assert_eq!( + addresses, + &[ + "person-0@example.com", + "person-1@example.com", + "person-2@example.com" + ] + ); + assert_ne!( + signups_batch1[0].email_confirmation_code, + signups_batch1[1].email_confirmation_code + ); -// // the waitlist isn't updated until we record that the emails -// // were successfully sent. -// let signups_batch = db.get_unsent_invites(3).await.unwrap(); -// assert_eq!(signups_batch, signups_batch1); + // the waitlist isn't updated until we record that the emails + // were successfully sent. + let signups_batch = db.get_unsent_invites(3).await.unwrap(); + assert_eq!(signups_batch, signups_batch1); -// // once the emails go out, we can retrieve the next batch -// // of signups. -// db.record_sent_invites(&signups_batch1).await.unwrap(); -// let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); -// let addresses = signups_batch2 -// .iter() -// .map(|s| &s.email_address) -// .collect::>(); -// assert_eq!( -// addresses, -// &[ -// "person-3@example.com", -// "person-4@example.com", -// "person-5@example.com" -// ] -// ); + // once the emails go out, we can retrieve the next batch + // of signups. + db.record_sent_invites(&signups_batch1).await.unwrap(); + let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); + let addresses = signups_batch2 + .iter() + .map(|s| &s.email_address) + .collect::>(); + assert_eq!( + addresses, + &[ + "person-3@example.com", + "person-4@example.com", + "person-5@example.com" + ] + ); -// // the sent invites are excluded from the summary. -// assert_eq!( -// db.get_waitlist_summary().await.unwrap(), -// WaitlistSummary { -// count: 5, -// mac_count: 5, -// linux_count: 2, -// windows_count: 1, -// unknown_count: 0, -// } -// ); + // the sent invites are excluded from the summary. + assert_eq!( + db.get_waitlist_summary().await.unwrap(), + WaitlistSummary { + count: 5, + mac_count: 5, + linux_count: 2, + windows_count: 1, + unknown_count: 0, + } + ); -// // user completes the signup process by providing their -// // github account. -// let NewUserResult { -// user_id, -// inviting_user_id, -// signup_device_id, -// .. -// } = db -// .create_user_from_invite( -// &Invite { -// email_address: signups_batch1[0].email_address.clone(), -// email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), -// }, -// NewUserParams { -// github_login: "person-0".into(), -// github_user_id: 0, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap() -// .unwrap(); -// let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); -// assert!(inviting_user_id.is_none()); -// assert_eq!(user.github_login, "person-0"); -// assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); -// assert_eq!(user.invite_count, 5); -// assert_eq!(signup_device_id.unwrap(), "device_id_0"); + // user completes the signup process by providing their + // github account. + let NewUserResult { + user_id, + inviting_user_id, + signup_device_id, + .. + } = db + .create_user_from_invite( + &Invite { + email_address: signups_batch1[0].email_address.clone(), + email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), + }, + NewUserParams { + github_login: "person-0".into(), + github_user_id: 0, + invite_count: 5, + }, + ) + .await + .unwrap() + .unwrap(); + let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); + assert!(inviting_user_id.is_none()); + assert_eq!(user.github_login, "person-0"); + assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); + assert_eq!(user.invite_count, 5); + assert_eq!(signup_device_id.unwrap(), "device_id_0"); -// // cannot redeem the same signup again. -// assert!(db -// .create_user_from_invite( -// &Invite { -// email_address: signups_batch1[0].email_address.clone(), -// email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), -// }, -// NewUserParams { -// github_login: "some-other-github_account".into(), -// github_user_id: 1, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap() -// .is_none()); + // cannot redeem the same signup again. + assert!(db + .create_user_from_invite( + &Invite { + email_address: signups_batch1[0].email_address.clone(), + email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), + }, + NewUserParams { + github_login: "some-other-github_account".into(), + github_user_id: 1, + invite_count: 5, + }, + ) + .await + .unwrap() + .is_none()); -// // cannot redeem a signup with the wrong confirmation code. -// db.create_user_from_invite( -// &Invite { -// email_address: signups_batch1[1].email_address.clone(), -// email_confirmation_code: "the-wrong-code".to_string(), -// }, -// NewUserParams { -// github_login: "person-1".into(), -// github_user_id: 2, -// invite_count: 5, -// }, -// ) -// .await -// .unwrap_err(); -// } + // cannot redeem a signup with the wrong confirmation code. + db.create_user_from_invite( + &Invite { + email_address: signups_batch1[1].email_address.clone(), + email_confirmation_code: "the-wrong-code".to_string(), + }, + NewUserParams { + github_login: "person-1".into(), + github_user_id: 2, + invite_count: 5, + }, + ) + .await + .unwrap_err(); +} fn build_background_executor() -> Arc { Deterministic::new(0).build_background() From d2385bd6a0d90771cec772267916b3a7f566ea35 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 14:40:37 +0100 Subject: [PATCH 092/240] Start using the new sea-orm backed database --- Cargo.lock | 1 - crates/collab/Cargo.toml | 4 +- crates/collab/src/api.rs | 4 +- crates/collab/src/auth.rs | 2 +- crates/collab/src/db.rs | 3686 +++++------------ crates/collab/src/{db2 => db}/access_token.rs | 0 crates/collab/src/{db2 => db}/contact.rs | 0 crates/collab/src/{db2 => db}/project.rs | 0 .../src/{db2 => db}/project_collaborator.rs | 0 crates/collab/src/{db2 => db}/room.rs | 0 .../src/{db2 => db}/room_participant.rs | 0 crates/collab/src/db/schema.rs | 43 - crates/collab/src/{db2 => db}/signup.rs | 2 +- crates/collab/src/db/tests.rs | 35 +- crates/collab/src/{db2 => db}/user.rs | 4 +- crates/collab/src/{db2 => db}/worktree.rs | 0 crates/collab/src/db2.rs | 1416 ------- crates/collab/src/db2/tests.rs | 813 ---- crates/collab/src/integration_tests.rs | 4 +- crates/collab/src/main.rs | 13 +- crates/collab/src/rpc.rs | 6 +- 21 files changed, 1131 insertions(+), 4902 deletions(-) rename crates/collab/src/{db2 => db}/access_token.rs (100%) rename crates/collab/src/{db2 => db}/contact.rs (100%) rename crates/collab/src/{db2 => db}/project.rs (100%) rename crates/collab/src/{db2 => db}/project_collaborator.rs (100%) rename crates/collab/src/{db2 => db}/room.rs (100%) rename crates/collab/src/{db2 => db}/room_participant.rs (100%) delete mode 100644 crates/collab/src/db/schema.rs rename crates/collab/src/{db2 => db}/signup.rs (95%) rename crates/collab/src/{db2 => db}/user.rs (93%) rename crates/collab/src/{db2 => db}/worktree.rs (100%) delete mode 100644 crates/collab/src/db2.rs delete mode 100644 crates/collab/src/db2/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 7b09775f2a..590835a49b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1159,7 +1159,6 @@ dependencies = [ "scrypt", "sea-orm", "sea-query", - "sea-query-binder", "serde", "serde_json", "settings", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 4cb91ad12d..66f426839c 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -37,8 +37,7 @@ rand = "0.8" reqwest = { version = "0.11", features = ["json"], optional = true } scrypt = "0.7" sea-orm = { version = "0.10", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls"] } -sea-query = { version = "0.27", features = ["derive"] } -sea-query-binder = { version = "0.2", features = ["sqlx-postgres"] } +sea-query = "0.27" serde = { version = "1.0", features = ["derive", "rc"] } serde_json = "1.0" sha-1 = "0.9" @@ -76,7 +75,6 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] } util = { path = "../util" } lazy_static = "1.4" sea-orm = { version = "0.10", features = ["sqlx-sqlite"] } -sea-query-binder = { version = "0.2", features = ["sqlx-sqlite"] } serde_json = { version = "1.0", features = ["preserve_order"] } sqlx = { version = "0.6", features = ["sqlite"] } unindent = "0.1" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 5fcdc5fcfd..bf183edf54 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,6 +1,6 @@ use crate::{ auth, - db::{Invite, NewUserParams, Signup, User, UserId, WaitlistSummary}, + db::{Invite, NewSignup, NewUserParams, User, UserId, WaitlistSummary}, rpc::{self, ResultExt}, AppState, Error, Result, }; @@ -335,7 +335,7 @@ async fn get_user_for_invite_code( } async fn create_signup( - Json(params): Json, + Json(params): Json, Extension(app): Extension>, ) -> Result<()> { app.db.create_signup(params).await?; diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 63f032f7e6..0c9cf33a6b 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -75,7 +75,7 @@ pub async fn validate_header(mut req: Request, next: Next) -> impl Into const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; -pub async fn create_access_token(db: &db::DefaultDb, user_id: UserId) -> Result { +pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result { let access_token = rpc::auth::random_token(); let access_token_hash = hash_access_token(&access_token).context("failed to hash access token")?; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 044d4ef8d7..d89d041f2a 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,42 +1,44 @@ -mod schema; +mod access_token; +mod contact; +mod project; +mod project_collaborator; +mod room; +mod room_participant; +mod signup; #[cfg(test)] mod tests; +mod user; +mod worktree; use crate::{Error, Result}; use anyhow::anyhow; -use axum::http::StatusCode; -use collections::{BTreeMap, HashMap, HashSet}; +use collections::HashMap; +pub use contact::Contact; use dashmap::DashMap; -use futures::{future::BoxFuture, FutureExt, StreamExt}; +use futures::StreamExt; +use hyper::StatusCode; use rpc::{proto, ConnectionId}; -use sea_query::{Expr, Query}; -use sea_query_binder::SqlxBinder; +pub use sea_orm::ConnectOptions; +use sea_orm::{ + entity::prelude::*, ActiveValue, ConnectionTrait, DatabaseBackend, DatabaseConnection, + DatabaseTransaction, DbErr, FromQueryResult, IntoActiveModel, JoinType, QueryOrder, + QuerySelect, Statement, TransactionTrait, +}; +use sea_query::{Alias, Expr, OnConflict, Query}; use serde::{Deserialize, Serialize}; -use sqlx::{ - migrate::{Migrate as _, Migration, MigrationSource}, - types::Uuid, - FromRow, -}; -use std::{ - future::Future, - marker::PhantomData, - ops::{Deref, DerefMut}, - path::Path, - rc::Rc, - sync::Arc, - time::Duration, -}; -use time::{OffsetDateTime, PrimitiveDateTime}; +pub use signup::{Invite, NewSignup, WaitlistSummary}; +use sqlx::migrate::{Migrate, Migration, MigrationSource}; +use sqlx::Connection; +use std::ops::{Deref, DerefMut}; +use std::path::Path; +use std::time::Duration; +use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; use tokio::sync::{Mutex, OwnedMutexGuard}; +pub use user::Model as User; -#[cfg(test)] -pub type DefaultDb = Db; - -#[cfg(not(test))] -pub type DefaultDb = Db; - -pub struct Db { - pool: sqlx::Pool, +pub struct Database { + options: ConnectOptions, + pool: DatabaseConnection, rooms: DashMap>>, #[cfg(test)] background: Option>, @@ -44,214 +46,11 @@ pub struct Db { runtime: Option, } -pub struct RoomGuard { - data: T, - _guard: OwnedMutexGuard<()>, - _not_send: PhantomData>, -} - -impl Deref for RoomGuard { - type Target = T; - - fn deref(&self) -> &T { - &self.data - } -} - -impl DerefMut for RoomGuard { - fn deref_mut(&mut self) -> &mut T { - &mut self.data - } -} - -pub trait BeginTransaction: Send + Sync { - type Database: sqlx::Database; - - fn begin_transaction(&self) -> BoxFuture>>; -} - -// In Postgres, serializable transactions are opt-in -impl BeginTransaction for Db { - type Database = sqlx::Postgres; - - fn begin_transaction(&self) -> BoxFuture>> { - async move { - let mut tx = self.pool.begin().await?; - sqlx::Executor::execute(&mut tx, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;") - .await?; - Ok(tx) - } - .boxed() - } -} - -// In Sqlite, transactions are inherently serializable. -#[cfg(test)] -impl BeginTransaction for Db { - type Database = sqlx::Sqlite; - - fn begin_transaction(&self) -> BoxFuture>> { - async move { Ok(self.pool.begin().await?) }.boxed() - } -} - -pub trait BuildQuery { - fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues); -} - -impl BuildQuery for Db { - fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues) { - query.build_sqlx(sea_query::PostgresQueryBuilder) - } -} - -#[cfg(test)] -impl BuildQuery for Db { - fn build_query(&self, query: &T) -> (String, sea_query_binder::SqlxValues) { - query.build_sqlx(sea_query::SqliteQueryBuilder) - } -} - -pub trait RowsAffected { - fn rows_affected(&self) -> u64; -} - -#[cfg(test)] -impl RowsAffected for sqlx::sqlite::SqliteQueryResult { - fn rows_affected(&self) -> u64 { - self.rows_affected() - } -} - -impl RowsAffected for sqlx::postgres::PgQueryResult { - fn rows_affected(&self) -> u64 { - self.rows_affected() - } -} - -#[cfg(test)] -impl Db { - pub async fn new(url: &str, max_connections: u32) -> Result { - use std::str::FromStr as _; - let options = sqlx::sqlite::SqliteConnectOptions::from_str(url) - .unwrap() - .create_if_missing(true) - .shared_cache(true); - let pool = sqlx::sqlite::SqlitePoolOptions::new() - .min_connections(2) - .max_connections(max_connections) - .connect_with(options) - .await?; +impl Database { + pub async fn new(options: ConnectOptions) -> Result { Ok(Self { - pool, - rooms: Default::default(), - background: None, - runtime: None, - }) - } - - pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - self.transact(|tx| async { - let mut tx = tx; - let query = " - SELECT users.* - FROM users - WHERE users.id IN (SELECT value from json_each($1)) - "; - Ok(sqlx::query_as(query) - .bind(&serde_json::json!(ids)) - .fetch_all(&mut tx) - .await?) - }) - .await - } - - pub async fn get_user_metrics_id(&self, id: UserId) -> Result { - self.transact(|mut tx| async move { - let query = " - SELECT metrics_id - FROM users - WHERE id = $1 - "; - Ok(sqlx::query_scalar(query) - .bind(id) - .fetch_one(&mut tx) - .await?) - }) - .await - } - - pub async fn create_user( - &self, - email_address: &str, - admin: bool, - params: NewUserParams, - ) -> Result { - self.transact(|mut tx| async { - let query = " - INSERT INTO users (email_address, github_login, github_user_id, admin, metrics_id) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login - RETURNING id, metrics_id - "; - - let (user_id, metrics_id): (UserId, String) = sqlx::query_as(query) - .bind(email_address) - .bind(¶ms.github_login) - .bind(¶ms.github_user_id) - .bind(admin) - .bind(Uuid::new_v4().to_string()) - .fetch_one(&mut tx) - .await?; - tx.commit().await?; - Ok(NewUserResult { - user_id, - metrics_id, - signup_device_id: None, - inviting_user_id: None, - }) - }) - .await - } - - pub async fn fuzzy_search_users(&self, _name_query: &str, _limit: u32) -> Result> { - unimplemented!() - } - - pub async fn create_user_from_invite( - &self, - _invite: &Invite, - _user: NewUserParams, - ) -> Result> { - unimplemented!() - } - - pub async fn create_signup(&self, _signup: Signup) -> Result<()> { - unimplemented!() - } - - pub async fn create_invite_from_code( - &self, - _code: &str, - _email_address: &str, - _device_id: Option<&str>, - ) -> Result { - unimplemented!() - } - - pub async fn record_sent_invites(&self, _invites: &[Invite]) -> Result<()> { - unimplemented!() - } -} - -impl Db { - pub async fn new(url: &str, max_connections: u32) -> Result { - let pool = sqlx::postgres::PgPoolOptions::new() - .max_connections(max_connections) - .connect(url) - .await?; - Ok(Self { - pool, + options: options.clone(), + pool: sea_orm::Database::connect(options).await?, rooms: DashMap::with_capacity(16384), #[cfg(test)] background: None, @@ -260,396 +59,6 @@ impl Db { }) } - #[cfg(test)] - pub fn teardown(&self, url: &str) { - self.runtime.as_ref().unwrap().block_on(async { - use util::ResultExt; - let query = " - SELECT pg_terminate_backend(pg_stat_activity.pid) - FROM pg_stat_activity - WHERE pg_stat_activity.datname = current_database() AND pid <> pg_backend_pid(); - "; - sqlx::query(query).execute(&self.pool).await.log_err(); - self.pool.close().await; - ::drop_database(url) - .await - .log_err(); - }) - } - - pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { - self.transact(|tx| async { - let mut tx = tx; - let like_string = Self::fuzzy_like_string(name_query); - let query = " - SELECT users.* - FROM users - WHERE github_login ILIKE $1 - ORDER BY github_login <-> $2 - LIMIT $3 - "; - Ok(sqlx::query_as(query) - .bind(like_string) - .bind(name_query) - .bind(limit as i32) - .fetch_all(&mut tx) - .await?) - }) - .await - } - - pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - let ids = ids.iter().map(|id| id.0).collect::>(); - self.transact(|tx| async { - let mut tx = tx; - let query = " - SELECT users.* - FROM users - WHERE users.id = ANY ($1) - "; - Ok(sqlx::query_as(query).bind(&ids).fetch_all(&mut tx).await?) - }) - .await - } - - pub async fn get_user_metrics_id(&self, id: UserId) -> Result { - self.transact(|mut tx| async move { - let query = " - SELECT metrics_id::text - FROM users - WHERE id = $1 - "; - Ok(sqlx::query_scalar(query) - .bind(id) - .fetch_one(&mut tx) - .await?) - }) - .await - } - - pub async fn create_user( - &self, - email_address: &str, - admin: bool, - params: NewUserParams, - ) -> Result { - self.transact(|mut tx| async { - let query = " - INSERT INTO users (email_address, github_login, github_user_id, admin) - VALUES ($1, $2, $3, $4) - ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login - RETURNING id, metrics_id::text - "; - - let (user_id, metrics_id): (UserId, String) = sqlx::query_as(query) - .bind(email_address) - .bind(¶ms.github_login) - .bind(params.github_user_id) - .bind(admin) - .fetch_one(&mut tx) - .await?; - tx.commit().await?; - - Ok(NewUserResult { - user_id, - metrics_id, - signup_device_id: None, - inviting_user_id: None, - }) - }) - .await - } - - pub async fn create_user_from_invite( - &self, - invite: &Invite, - user: NewUserParams, - ) -> Result> { - self.transact(|mut tx| async { - let (signup_id, existing_user_id, inviting_user_id, signup_device_id): ( - i32, - Option, - Option, - Option, - ) = sqlx::query_as( - " - SELECT id, user_id, inviting_user_id, device_id - FROM signups - WHERE - email_address = $1 AND - email_confirmation_code = $2 - ", - ) - .bind(&invite.email_address) - .bind(&invite.email_confirmation_code) - .fetch_optional(&mut tx) - .await? - .ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "no such invite".to_string()))?; - - if existing_user_id.is_some() { - return Ok(None); - } - - let (user_id, metrics_id): (UserId, String) = sqlx::query_as( - " - INSERT INTO users - (email_address, github_login, github_user_id, admin, invite_count, invite_code) - VALUES - ($1, $2, $3, FALSE, $4, $5) - ON CONFLICT (github_login) DO UPDATE SET - email_address = excluded.email_address, - github_user_id = excluded.github_user_id, - admin = excluded.admin - RETURNING id, metrics_id::text - ", - ) - .bind(&invite.email_address) - .bind(&user.github_login) - .bind(&user.github_user_id) - .bind(&user.invite_count) - .bind(random_invite_code()) - .fetch_one(&mut tx) - .await?; - - sqlx::query( - " - UPDATE signups - SET user_id = $1 - WHERE id = $2 - ", - ) - .bind(&user_id) - .bind(&signup_id) - .execute(&mut tx) - .await?; - - if let Some(inviting_user_id) = inviting_user_id { - let id: Option = sqlx::query_scalar( - " - UPDATE users - SET invite_count = invite_count - 1 - WHERE id = $1 AND invite_count > 0 - RETURNING id - ", - ) - .bind(&inviting_user_id) - .fetch_optional(&mut tx) - .await?; - - if id.is_none() { - Err(Error::Http( - StatusCode::UNAUTHORIZED, - "no invites remaining".to_string(), - ))?; - } - - sqlx::query( - " - INSERT INTO contacts - (user_id_a, user_id_b, a_to_b, should_notify, accepted) - VALUES - ($1, $2, TRUE, TRUE, TRUE) - ON CONFLICT DO NOTHING - ", - ) - .bind(inviting_user_id) - .bind(user_id) - .execute(&mut tx) - .await?; - } - - tx.commit().await?; - Ok(Some(NewUserResult { - user_id, - metrics_id, - inviting_user_id, - signup_device_id, - })) - }) - .await - } - - pub async fn create_signup(&self, signup: Signup) -> Result<()> { - self.transact(|mut tx| async { - sqlx::query( - " - INSERT INTO signups - ( - email_address, - email_confirmation_code, - email_confirmation_sent, - platform_linux, - platform_mac, - platform_windows, - platform_unknown, - editor_features, - programming_languages, - device_id - ) - VALUES - ($1, $2, FALSE, $3, $4, $5, FALSE, $6, $7, $8) - RETURNING id - ", - ) - .bind(&signup.email_address) - .bind(&random_email_confirmation_code()) - .bind(&signup.platform_linux) - .bind(&signup.platform_mac) - .bind(&signup.platform_windows) - .bind(&signup.editor_features) - .bind(&signup.programming_languages) - .bind(&signup.device_id) - .execute(&mut tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn create_invite_from_code( - &self, - code: &str, - email_address: &str, - device_id: Option<&str>, - ) -> Result { - self.transact(|mut tx| async { - let existing_user: Option = sqlx::query_scalar( - " - SELECT id - FROM users - WHERE email_address = $1 - ", - ) - .bind(email_address) - .fetch_optional(&mut tx) - .await?; - if existing_user.is_some() { - Err(anyhow!("email address is already in use"))?; - } - - let row: Option<(UserId, i32)> = sqlx::query_as( - " - SELECT id, invite_count - FROM users - WHERE invite_code = $1 - ", - ) - .bind(code) - .fetch_optional(&mut tx) - .await?; - - let (inviter_id, invite_count) = match row { - Some(row) => row, - None => Err(Error::Http( - StatusCode::NOT_FOUND, - "invite code not found".to_string(), - ))?, - }; - - if invite_count == 0 { - Err(Error::Http( - StatusCode::UNAUTHORIZED, - "no invites remaining".to_string(), - ))?; - } - - let email_confirmation_code: String = sqlx::query_scalar( - " - INSERT INTO signups - ( - email_address, - email_confirmation_code, - email_confirmation_sent, - inviting_user_id, - platform_linux, - platform_mac, - platform_windows, - platform_unknown, - device_id - ) - VALUES - ($1, $2, FALSE, $3, FALSE, FALSE, FALSE, TRUE, $4) - ON CONFLICT (email_address) - DO UPDATE SET - inviting_user_id = excluded.inviting_user_id - RETURNING email_confirmation_code - ", - ) - .bind(&email_address) - .bind(&random_email_confirmation_code()) - .bind(&inviter_id) - .bind(&device_id) - .fetch_one(&mut tx) - .await?; - - tx.commit().await?; - - Ok(Invite { - email_address: email_address.into(), - email_confirmation_code, - }) - }) - .await - } - - pub async fn record_sent_invites(&self, invites: &[Invite]) -> Result<()> { - self.transact(|mut tx| async { - let emails = invites - .iter() - .map(|s| s.email_address.as_str()) - .collect::>(); - sqlx::query( - " - UPDATE signups - SET email_confirmation_sent = TRUE - WHERE email_address = ANY ($1) - ", - ) - .bind(&emails) - .execute(&mut tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } -} - -impl Db -where - Self: BeginTransaction + BuildQuery, - D: sqlx::Database + sqlx::migrate::MigrateDatabase, - D::Connection: sqlx::migrate::Migrate, - for<'a> >::Arguments: sqlx::IntoArguments<'a, D>, - for<'a> sea_query_binder::SqlxValues: sqlx::IntoArguments<'a, D>, - for<'a> &'a mut D::Connection: sqlx::Executor<'a, Database = D>, - for<'a, 'b> &'b mut sqlx::Transaction<'a, D>: sqlx::Executor<'b, Database = D>, - D::QueryResult: RowsAffected, - String: sqlx::Type, - i32: sqlx::Type, - i64: sqlx::Type, - bool: sqlx::Type, - str: sqlx::Type, - Uuid: sqlx::Type, - sqlx::types::Json: sqlx::Type, - OffsetDateTime: sqlx::Type, - PrimitiveDateTime: sqlx::Type, - usize: sqlx::ColumnIndex, - for<'a> &'a str: sqlx::ColumnIndex, - for<'a> &'a str: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> String: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> Option: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> Option<&'a str>: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> i32: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> i64: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> bool: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> Uuid: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> Option: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> sqlx::types::JsonValue: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> OffsetDateTime: sqlx::Encode<'a, D> + sqlx::Decode<'a, D>, - for<'a> PrimitiveDateTime: sqlx::Decode<'a, D> + sqlx::Decode<'a, D>, -{ pub async fn migrate( &self, migrations_path: &Path, @@ -659,10 +68,10 @@ where .await .map_err(|err| anyhow!("failed to load migrations: {err:?}"))?; - let mut conn = self.pool.acquire().await?; + let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?; - conn.ensure_migrations_table().await?; - let applied_migrations: HashMap<_, _> = conn + connection.ensure_migrations_table().await?; + let applied_migrations: HashMap<_, _> = connection .list_applied_migrations() .await? .into_iter() @@ -682,7 +91,7 @@ where } } None => { - let elapsed = conn.apply(&migration).await?; + let elapsed = connection.apply(&migration).await?; new_migrations.push((migration, elapsed)); } } @@ -691,6 +100,457 @@ where Ok(new_migrations) } + // users + + pub async fn create_user( + &self, + email_address: &str, + admin: bool, + params: NewUserParams, + ) -> Result { + self.transact(|tx| async { + let user = user::Entity::insert(user::ActiveModel { + email_address: ActiveValue::set(Some(email_address.into())), + github_login: ActiveValue::set(params.github_login.clone()), + github_user_id: ActiveValue::set(Some(params.github_user_id)), + admin: ActiveValue::set(admin), + metrics_id: ActiveValue::set(Uuid::new_v4()), + ..Default::default() + }) + .on_conflict( + OnConflict::column(user::Column::GithubLogin) + .update_column(user::Column::GithubLogin) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + + tx.commit().await?; + + Ok(NewUserResult { + user_id: user.id, + metrics_id: user.metrics_id.to_string(), + signup_device_id: None, + inviting_user_id: None, + }) + }) + .await + } + + pub async fn get_user_by_id(&self, id: UserId) -> Result> { + self.transact(|tx| async move { Ok(user::Entity::find_by_id(id).one(&tx).await?) }) + .await + } + + pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { + self.transact(|tx| async { + let tx = tx; + Ok(user::Entity::find() + .filter(user::Column::Id.is_in(ids.iter().copied())) + .all(&tx) + .await?) + }) + .await + } + + pub async fn get_user_by_github_account( + &self, + github_login: &str, + github_user_id: Option, + ) -> Result> { + self.transact(|tx| async { + let tx = tx; + if let Some(github_user_id) = github_user_id { + if let Some(user_by_github_user_id) = user::Entity::find() + .filter(user::Column::GithubUserId.eq(github_user_id)) + .one(&tx) + .await? + { + let mut user_by_github_user_id = user_by_github_user_id.into_active_model(); + user_by_github_user_id.github_login = ActiveValue::set(github_login.into()); + Ok(Some(user_by_github_user_id.update(&tx).await?)) + } else if let Some(user_by_github_login) = user::Entity::find() + .filter(user::Column::GithubLogin.eq(github_login)) + .one(&tx) + .await? + { + let mut user_by_github_login = user_by_github_login.into_active_model(); + user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id)); + Ok(Some(user_by_github_login.update(&tx).await?)) + } else { + Ok(None) + } + } else { + Ok(user::Entity::find() + .filter(user::Column::GithubLogin.eq(github_login)) + .one(&tx) + .await?) + } + }) + .await + } + + pub async fn get_all_users(&self, page: u32, limit: u32) -> Result> { + self.transact(|tx| async move { + Ok(user::Entity::find() + .order_by_asc(user::Column::GithubLogin) + .limit(limit as u64) + .offset(page as u64 * limit as u64) + .all(&tx) + .await?) + }) + .await + } + + pub async fn get_users_with_no_invites( + &self, + invited_by_another_user: bool, + ) -> Result> { + self.transact(|tx| async move { + Ok(user::Entity::find() + .filter( + user::Column::InviteCount + .eq(0) + .and(if invited_by_another_user { + user::Column::InviterId.is_not_null() + } else { + user::Column::InviterId.is_null() + }), + ) + .all(&tx) + .await?) + }) + .await + } + + pub async fn get_user_metrics_id(&self, id: UserId) -> Result { + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + MetricsId, + } + + self.transact(|tx| async move { + let metrics_id: Uuid = user::Entity::find_by_id(id) + .select_only() + .column(user::Column::MetricsId) + .into_values::<_, QueryAs>() + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not find user"))?; + Ok(metrics_id.to_string()) + }) + .await + } + + pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> { + self.transact(|tx| async move { + user::Entity::update_many() + .filter(user::Column::Id.eq(id)) + .col_expr(user::Column::Admin, is_admin.into()) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + pub async fn destroy_user(&self, id: UserId) -> Result<()> { + self.transact(|tx| async move { + access_token::Entity::delete_many() + .filter(access_token::Column::UserId.eq(id)) + .exec(&tx) + .await?; + user::Entity::delete_by_id(id).exec(&tx).await?; + tx.commit().await?; + Ok(()) + }) + .await + } + + // contacts + + pub async fn get_contacts(&self, user_id: UserId) -> Result> { + #[derive(Debug, FromQueryResult)] + struct ContactWithUserBusyStatuses { + user_id_a: UserId, + user_id_b: UserId, + a_to_b: bool, + accepted: bool, + should_notify: bool, + user_a_busy: bool, + user_b_busy: bool, + } + + self.transact(|tx| async move { + let user_a_participant = Alias::new("user_a_participant"); + let user_b_participant = Alias::new("user_b_participant"); + let mut db_contacts = contact::Entity::find() + .column_as( + Expr::tbl(user_a_participant.clone(), room_participant::Column::Id) + .is_not_null(), + "user_a_busy", + ) + .column_as( + Expr::tbl(user_b_participant.clone(), room_participant::Column::Id) + .is_not_null(), + "user_b_busy", + ) + .filter( + contact::Column::UserIdA + .eq(user_id) + .or(contact::Column::UserIdB.eq(user_id)), + ) + .join_as( + JoinType::LeftJoin, + contact::Relation::UserARoomParticipant.def(), + user_a_participant, + ) + .join_as( + JoinType::LeftJoin, + contact::Relation::UserBRoomParticipant.def(), + user_b_participant, + ) + .into_model::() + .stream(&tx) + .await?; + + let mut contacts = Vec::new(); + while let Some(db_contact) = db_contacts.next().await { + let db_contact = db_contact?; + if db_contact.user_id_a == user_id { + if db_contact.accepted { + contacts.push(Contact::Accepted { + user_id: db_contact.user_id_b, + should_notify: db_contact.should_notify && db_contact.a_to_b, + busy: db_contact.user_b_busy, + }); + } else if db_contact.a_to_b { + contacts.push(Contact::Outgoing { + user_id: db_contact.user_id_b, + }) + } else { + contacts.push(Contact::Incoming { + user_id: db_contact.user_id_b, + should_notify: db_contact.should_notify, + }); + } + } else if db_contact.accepted { + contacts.push(Contact::Accepted { + user_id: db_contact.user_id_a, + should_notify: db_contact.should_notify && !db_contact.a_to_b, + busy: db_contact.user_a_busy, + }); + } else if db_contact.a_to_b { + contacts.push(Contact::Incoming { + user_id: db_contact.user_id_a, + should_notify: db_contact.should_notify, + }); + } else { + contacts.push(Contact::Outgoing { + user_id: db_contact.user_id_a, + }); + } + } + + contacts.sort_unstable_by_key(|contact| contact.user_id()); + + Ok(contacts) + }) + .await + } + + pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { + self.transact(|tx| async move { + let (id_a, id_b) = if user_id_1 < user_id_2 { + (user_id_1, user_id_2) + } else { + (user_id_2, user_id_1) + }; + + Ok(contact::Entity::find() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::Accepted.eq(true)), + ) + .one(&tx) + .await? + .is_some()) + }) + .await + } + + pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b, a_to_b) = if sender_id < receiver_id { + (sender_id, receiver_id, true) + } else { + (receiver_id, sender_id, false) + }; + + let rows_affected = contact::Entity::insert(contact::ActiveModel { + user_id_a: ActiveValue::set(id_a), + user_id_b: ActiveValue::set(id_b), + a_to_b: ActiveValue::set(a_to_b), + accepted: ActiveValue::set(false), + should_notify: ActiveValue::set(true), + ..Default::default() + }) + .on_conflict( + OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB]) + .values([ + (contact::Column::Accepted, true.into()), + (contact::Column::ShouldNotify, false.into()), + ]) + .action_and_where( + contact::Column::Accepted.eq(false).and( + contact::Column::AToB + .eq(a_to_b) + .and(contact::Column::UserIdA.eq(id_b)) + .or(contact::Column::AToB + .ne(a_to_b) + .and(contact::Column::UserIdA.eq(id_a))), + ), + ) + .to_owned(), + ) + .exec_without_returning(&tx) + .await?; + + if rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("contact already requested"))? + } + }) + .await + } + + pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b) = if responder_id < requester_id { + (responder_id, requester_id) + } else { + (requester_id, responder_id) + }; + + let result = contact::Entity::delete_many() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)), + ) + .exec(&tx) + .await?; + + if result.rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("no such contact"))? + } + }) + .await + } + + pub async fn dismiss_contact_notification( + &self, + user_id: UserId, + contact_user_id: UserId, + ) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b, a_to_b) = if user_id < contact_user_id { + (user_id, contact_user_id, true) + } else { + (contact_user_id, user_id, false) + }; + + let result = contact::Entity::update_many() + .set(contact::ActiveModel { + should_notify: ActiveValue::set(false), + ..Default::default() + }) + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and( + contact::Column::AToB + .eq(a_to_b) + .and(contact::Column::Accepted.eq(true)) + .or(contact::Column::AToB + .ne(a_to_b) + .and(contact::Column::Accepted.eq(false))), + ), + ) + .exec(&tx) + .await?; + if result.rows_affected == 0 { + Err(anyhow!("no such contact request"))? + } else { + tx.commit().await?; + Ok(()) + } + }) + .await + } + + pub async fn respond_to_contact_request( + &self, + responder_id: UserId, + requester_id: UserId, + accept: bool, + ) -> Result<()> { + self.transact(|tx| async move { + let (id_a, id_b, a_to_b) = if responder_id < requester_id { + (responder_id, requester_id, false) + } else { + (requester_id, responder_id, true) + }; + let rows_affected = if accept { + let result = contact::Entity::update_many() + .set(contact::ActiveModel { + accepted: ActiveValue::set(true), + should_notify: ActiveValue::set(true), + ..Default::default() + }) + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::AToB.eq(a_to_b)), + ) + .exec(&tx) + .await?; + result.rows_affected + } else { + let result = contact::Entity::delete_many() + .filter( + contact::Column::UserIdA + .eq(id_a) + .and(contact::Column::UserIdB.eq(id_b)) + .and(contact::Column::AToB.eq(a_to_b)) + .and(contact::Column::Accepted.eq(false)), + ) + .exec(&tx) + .await?; + + result.rows_affected + }; + + if rows_affected == 1 { + tx.commit().await?; + Ok(()) + } else { + Err(anyhow!("no such contact request"))? + } + }) + .await + } + pub fn fuzzy_like_string(string: &str) -> String { let mut result = String::with_capacity(string.len() * 2 + 1); for c in string.chars() { @@ -703,163 +563,58 @@ where result } - // users - - pub async fn get_all_users(&self, page: u32, limit: u32) -> Result> { + pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { self.transact(|tx| async { - let mut tx = tx; - let query = "SELECT * FROM users ORDER BY github_login ASC LIMIT $1 OFFSET $2"; - Ok(sqlx::query_as(query) - .bind(limit as i32) - .bind((page * limit) as i32) - .fetch_all(&mut tx) - .await?) - }) - .await - } - - pub async fn get_user_by_id(&self, id: UserId) -> Result> { - self.transact(|tx| async { - let mut tx = tx; + let tx = tx; + let like_string = Self::fuzzy_like_string(name_query); let query = " SELECT users.* FROM users - WHERE id = $1 - LIMIT 1 + WHERE github_login ILIKE $1 + ORDER BY github_login <-> $2 + LIMIT $3 "; - Ok(sqlx::query_as(query) - .bind(&id) - .fetch_optional(&mut tx) + + Ok(user::Entity::find() + .from_raw_sql(Statement::from_sql_and_values( + self.pool.get_database_backend(), + query.into(), + vec![like_string.into(), name_query.into(), limit.into()], + )) + .all(&tx) .await?) }) .await } - pub async fn get_users_with_no_invites( - &self, - invited_by_another_user: bool, - ) -> Result> { - self.transact(|tx| async { - let mut tx = tx; - let query = format!( - " - SELECT users.* - FROM users - WHERE invite_count = 0 - AND inviter_id IS{} NULL - ", - if invited_by_another_user { " NOT" } else { "" } - ); - - Ok(sqlx::query_as(&query).fetch_all(&mut tx).await?) - }) - .await - } - - pub async fn get_user_by_github_account( - &self, - github_login: &str, - github_user_id: Option, - ) -> Result> { - self.transact(|tx| async { - let mut tx = tx; - if let Some(github_user_id) = github_user_id { - let mut user = sqlx::query_as::<_, User>( - " - UPDATE users - SET github_login = $1 - WHERE github_user_id = $2 - RETURNING * - ", - ) - .bind(github_login) - .bind(github_user_id) - .fetch_optional(&mut tx) - .await?; - - if user.is_none() { - user = sqlx::query_as::<_, User>( - " - UPDATE users - SET github_user_id = $1 - WHERE github_login = $2 - RETURNING * - ", - ) - .bind(github_user_id) - .bind(github_login) - .fetch_optional(&mut tx) - .await?; - } - - Ok(user) - } else { - let user = sqlx::query_as( - " - SELECT * FROM users - WHERE github_login = $1 - LIMIT 1 - ", - ) - .bind(github_login) - .fetch_optional(&mut tx) - .await?; - Ok(user) - } - }) - .await - } - - pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> { - self.transact(|mut tx| async { - let query = "UPDATE users SET admin = $1 WHERE id = $2"; - sqlx::query(query) - .bind(is_admin) - .bind(id.0) - .execute(&mut tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> { - self.transact(|mut tx| async move { - let query = "UPDATE users SET connected_once = $1 WHERE id = $2"; - sqlx::query(query) - .bind(connected_once) - .bind(id.0) - .execute(&mut tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn destroy_user(&self, id: UserId) -> Result<()> { - self.transact(|mut tx| async move { - let query = "DELETE FROM access_tokens WHERE user_id = $1;"; - sqlx::query(query) - .bind(id.0) - .execute(&mut tx) - .await - .map(drop)?; - let query = "DELETE FROM users WHERE id = $1;"; - sqlx::query(query).bind(id.0).execute(&mut tx).await?; - tx.commit().await?; - Ok(()) - }) - .await - } - // signups + pub async fn create_signup(&self, signup: NewSignup) -> Result<()> { + self.transact(|tx| async { + signup::ActiveModel { + email_address: ActiveValue::set(signup.email_address.clone()), + email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), + email_confirmation_sent: ActiveValue::set(false), + platform_mac: ActiveValue::set(signup.platform_mac), + platform_windows: ActiveValue::set(signup.platform_windows), + platform_linux: ActiveValue::set(signup.platform_linux), + platform_unknown: ActiveValue::set(false), + editor_features: ActiveValue::set(Some(signup.editor_features.clone())), + programming_languages: ActiveValue::set(Some(signup.programming_languages.clone())), + device_id: ActiveValue::set(signup.device_id.clone()), + ..Default::default() + } + .insert(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + pub async fn get_waitlist_summary(&self) -> Result { - self.transact(|mut tx| async move { - Ok(sqlx::query_as( - " + self.transact(|tx| async move { + let query = " SELECT COUNT(*) as count, COALESCE(SUM(CASE WHEN platform_linux THEN 1 ELSE 0 END), 0) as linux_count, @@ -872,63 +627,241 @@ where WHERE NOT email_confirmation_sent ) AS unsent - ", + "; + Ok( + WaitlistSummary::find_by_statement(Statement::from_sql_and_values( + self.pool.get_database_backend(), + query.into(), + vec![], + )) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("invalid result"))?, ) - .fetch_one(&mut tx) - .await?) + }) + .await + } + + pub async fn record_sent_invites(&self, invites: &[Invite]) -> Result<()> { + let emails = invites + .iter() + .map(|s| s.email_address.as_str()) + .collect::>(); + self.transact(|tx| async { + signup::Entity::update_many() + .filter(signup::Column::EmailAddress.is_in(emails.iter().copied())) + .col_expr(signup::Column::EmailConfirmationSent, true.into()) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) }) .await } pub async fn get_unsent_invites(&self, count: usize) -> Result> { - self.transact(|mut tx| async move { - Ok(sqlx::query_as( - " - SELECT - email_address, email_confirmation_code - FROM signups - WHERE - NOT email_confirmation_sent AND - (platform_mac OR platform_unknown) - LIMIT $1 - ", - ) - .bind(count as i32) - .fetch_all(&mut tx) - .await?) + self.transact(|tx| async move { + Ok(signup::Entity::find() + .select_only() + .column(signup::Column::EmailAddress) + .column(signup::Column::EmailConfirmationCode) + .filter( + signup::Column::EmailConfirmationSent.eq(false).and( + signup::Column::PlatformMac + .eq(true) + .or(signup::Column::PlatformUnknown.eq(true)), + ), + ) + .limit(count as u64) + .into_model() + .all(&tx) + .await?) }) .await } // invite codes - pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { - self.transact(|mut tx| async move { - if count > 0 { - sqlx::query( - " - UPDATE users - SET invite_code = $1 - WHERE id = $2 AND invite_code IS NULL - ", + pub async fn create_invite_from_code( + &self, + code: &str, + email_address: &str, + device_id: Option<&str>, + ) -> Result { + self.transact(|tx| async move { + let existing_user = user::Entity::find() + .filter(user::Column::EmailAddress.eq(email_address)) + .one(&tx) + .await?; + + if existing_user.is_some() { + Err(anyhow!("email address is already in use"))?; + } + + let inviter = match user::Entity::find() + .filter(user::Column::InviteCode.eq(code)) + .one(&tx) + .await? + { + Some(inviter) => inviter, + None => { + return Err(Error::Http( + StatusCode::NOT_FOUND, + "invite code not found".to_string(), + ))? + } + }; + + if inviter.invite_count == 0 { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "no invites remaining".to_string(), + ))?; + } + + let signup = signup::Entity::insert(signup::ActiveModel { + email_address: ActiveValue::set(email_address.into()), + email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), + email_confirmation_sent: ActiveValue::set(false), + inviting_user_id: ActiveValue::set(Some(inviter.id)), + platform_linux: ActiveValue::set(false), + platform_mac: ActiveValue::set(false), + platform_windows: ActiveValue::set(false), + platform_unknown: ActiveValue::set(true), + device_id: ActiveValue::set(device_id.map(|device_id| device_id.into())), + ..Default::default() + }) + .on_conflict( + OnConflict::column(signup::Column::EmailAddress) + .update_column(signup::Column::InvitingUserId) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + tx.commit().await?; + + Ok(Invite { + email_address: signup.email_address, + email_confirmation_code: signup.email_confirmation_code, + }) + }) + .await + } + + pub async fn create_user_from_invite( + &self, + invite: &Invite, + user: NewUserParams, + ) -> Result> { + self.transact(|tx| async { + let tx = tx; + let signup = signup::Entity::find() + .filter( + signup::Column::EmailAddress + .eq(invite.email_address.as_str()) + .and( + signup::Column::EmailConfirmationCode + .eq(invite.email_confirmation_code.as_str()), + ), ) - .bind(random_invite_code()) - .bind(id) - .execute(&mut tx) + .one(&tx) + .await? + .ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "no such invite".to_string()))?; + + if signup.user_id.is_some() { + return Ok(None); + } + + let user = user::Entity::insert(user::ActiveModel { + email_address: ActiveValue::set(Some(invite.email_address.clone())), + github_login: ActiveValue::set(user.github_login.clone()), + github_user_id: ActiveValue::set(Some(user.github_user_id)), + admin: ActiveValue::set(false), + invite_count: ActiveValue::set(user.invite_count), + invite_code: ActiveValue::set(Some(random_invite_code())), + metrics_id: ActiveValue::set(Uuid::new_v4()), + ..Default::default() + }) + .on_conflict( + OnConflict::column(user::Column::GithubLogin) + .update_columns([ + user::Column::EmailAddress, + user::Column::GithubUserId, + user::Column::Admin, + ]) + .to_owned(), + ) + .exec_with_returning(&tx) + .await?; + + let mut signup = signup.into_active_model(); + signup.user_id = ActiveValue::set(Some(user.id)); + let signup = signup.update(&tx).await?; + + if let Some(inviting_user_id) = signup.inviting_user_id { + let result = user::Entity::update_many() + .filter( + user::Column::Id + .eq(inviting_user_id) + .and(user::Column::InviteCount.gt(0)), + ) + .col_expr( + user::Column::InviteCount, + Expr::col(user::Column::InviteCount).sub(1), + ) + .exec(&tx) + .await?; + + if result.rows_affected == 0 { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "no invites remaining".to_string(), + ))?; + } + + contact::Entity::insert(contact::ActiveModel { + user_id_a: ActiveValue::set(inviting_user_id), + user_id_b: ActiveValue::set(user.id), + a_to_b: ActiveValue::set(true), + should_notify: ActiveValue::set(true), + accepted: ActiveValue::set(true), + ..Default::default() + }) + .on_conflict(OnConflict::new().do_nothing().to_owned()) + .exec_without_returning(&tx) .await?; } - sqlx::query( - " - UPDATE users - SET invite_count = $1 - WHERE id = $2 - ", - ) - .bind(count as i32) - .bind(id) - .execute(&mut tx) - .await?; + tx.commit().await?; + Ok(Some(NewUserResult { + user_id: user.id, + metrics_id: user.metrics_id.to_string(), + inviting_user_id: signup.inviting_user_id, + signup_device_id: signup.device_id, + })) + }) + .await + } + + pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { + self.transact(|tx| async move { + if count > 0 { + user::Entity::update_many() + .filter( + user::Column::Id + .eq(id) + .and(user::Column::InviteCode.is_null()), + ) + .col_expr(user::Column::InviteCode, random_invite_code().into()) + .exec(&tx) + .await?; + } + + user::Entity::update_many() + .filter(user::Column::Id.eq(id)) + .col_expr(user::Column::InviteCount, count.into()) + .exec(&tx) + .await?; tx.commit().await?; Ok(()) }) @@ -936,535 +869,109 @@ where } pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { - self.transact(|mut tx| async move { - let result: Option<(String, i32)> = sqlx::query_as( - " - SELECT invite_code, invite_count - FROM users - WHERE id = $1 AND invite_code IS NOT NULL - ", - ) - .bind(id) - .fetch_optional(&mut tx) - .await?; - if let Some((code, count)) = result { - Ok(Some((code, count.try_into().map_err(anyhow::Error::new)?))) - } else { - Ok(None) + self.transact(|tx| async move { + match user::Entity::find_by_id(id).one(&tx).await? { + Some(user) if user.invite_code.is_some() => { + Ok(Some((user.invite_code.unwrap(), user.invite_count as u32))) + } + _ => Ok(None), } }) .await } pub async fn get_user_for_invite_code(&self, code: &str) -> Result { - self.transact(|tx| async { - let mut tx = tx; - sqlx::query_as( - " - SELECT * - FROM users - WHERE invite_code = $1 - ", - ) - .bind(code) - .fetch_optional(&mut tx) - .await? - .ok_or_else(|| { - Error::Http( - StatusCode::NOT_FOUND, - "that invite code does not exist".to_string(), - ) - }) + self.transact(|tx| async move { + user::Entity::find() + .filter(user::Column::InviteCode.eq(code)) + .one(&tx) + .await? + .ok_or_else(|| { + Error::Http( + StatusCode::NOT_FOUND, + "that invite code does not exist".to_string(), + ) + }) }) .await } - async fn commit_room_transaction<'a, T>( - &'a self, - room_id: RoomId, - tx: sqlx::Transaction<'static, D>, - data: T, - ) -> Result> { - let lock = self.rooms.entry(room_id).or_default().clone(); - let _guard = lock.lock_owned().await; - tx.commit().await?; - Ok(RoomGuard { - data, - _guard, - _not_send: PhantomData, - }) - } + // projects - pub async fn create_room( + pub async fn share_project( &self, - user_id: UserId, + room_id: RoomId, connection_id: ConnectionId, - live_kit_room: &str, - ) -> Result> { - self.transact(|mut tx| async move { - let room_id = sqlx::query_scalar( - " - INSERT INTO rooms (live_kit_room) - VALUES ($1) - RETURNING id - ", - ) - .bind(&live_kit_room) - .fetch_one(&mut tx) - .await - .map(RoomId)?; + worktrees: &[proto::WorktreeMetadata], + ) -> Result> { + self.transact(|tx| async move { + let participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not find participant"))?; + if participant.room_id != room_id { + return Err(anyhow!("shared project on unexpected room"))?; + } - sqlx::query( - " - INSERT INTO room_participants (room_id, user_id, answering_connection_id, calling_user_id, calling_connection_id) - VALUES ($1, $2, $3, $4, $5) - ", - ) - .bind(room_id) - .bind(user_id) - .bind(connection_id.0 as i32) - .bind(user_id) - .bind(connection_id.0 as i32) - .execute(&mut tx) + let project = project::ActiveModel { + room_id: ActiveValue::set(participant.room_id), + host_user_id: ActiveValue::set(participant.user_id), + host_connection_id: ActiveValue::set(connection_id.0 as i32), + ..Default::default() + } + .insert(&tx) .await?; - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }).await - } - - pub async fn call( - &self, - room_id: RoomId, - calling_user_id: UserId, - calling_connection_id: ConnectionId, - called_user_id: UserId, - initial_project_id: Option, - ) -> Result> { - self.transact(|mut tx| async move { - sqlx::query( - " - INSERT INTO room_participants ( - room_id, - user_id, - calling_user_id, - calling_connection_id, - initial_project_id - ) - VALUES ($1, $2, $3, $4, $5) - ", - ) - .bind(room_id) - .bind(called_user_id) - .bind(calling_user_id) - .bind(calling_connection_id.0 as i32) - .bind(initial_project_id) - .execute(&mut tx) + worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i32), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + })) + .exec(&tx) .await?; - let room = self.get_room(room_id, &mut tx).await?; - let incoming_call = Self::build_incoming_call(&room, called_user_id) - .ok_or_else(|| anyhow!("failed to build incoming call"))?; - self.commit_room_transaction(room_id, tx, (room, incoming_call)) + project_collaborator::ActiveModel { + project_id: ActiveValue::set(project.id), + connection_id: ActiveValue::set(connection_id.0 as i32), + user_id: ActiveValue::set(participant.user_id), + replica_id: ActiveValue::set(0), + is_host: ActiveValue::set(true), + ..Default::default() + } + .insert(&tx) + .await?; + + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, (project.id, room)) .await }) .await } - pub async fn incoming_call_for_user( - &self, - user_id: UserId, - ) -> Result> { - self.transact(|mut tx| async move { - let room_id = sqlx::query_scalar::<_, RoomId>( - " - SELECT room_id - FROM room_participants - WHERE user_id = $1 AND answering_connection_id IS NULL - ", - ) - .bind(user_id) - .fetch_optional(&mut tx) + async fn get_room(&self, room_id: RoomId, tx: &DatabaseTransaction) -> Result { + let db_room = room::Entity::find_by_id(room_id) + .one(tx) + .await? + .ok_or_else(|| anyhow!("could not find room"))?; + + let mut db_participants = db_room + .find_related(room_participant::Entity) + .stream(tx) .await?; - - if let Some(room_id) = room_id { - let room = self.get_room(room_id, &mut tx).await?; - Ok(Self::build_incoming_call(&room, user_id)) - } else { - Ok(None) - } - }) - .await - } - - fn build_incoming_call( - room: &proto::Room, - called_user_id: UserId, - ) -> Option { - let pending_participant = room - .pending_participants - .iter() - .find(|participant| participant.user_id == called_user_id.to_proto())?; - - Some(proto::IncomingCall { - room_id: room.id, - calling_user_id: pending_participant.calling_user_id, - participant_user_ids: room - .participants - .iter() - .map(|participant| participant.user_id) - .collect(), - initial_project: room.participants.iter().find_map(|participant| { - let initial_project_id = pending_participant.initial_project_id?; - participant - .projects - .iter() - .find(|project| project.id == initial_project_id) - .cloned() - }), - }) - } - - pub async fn call_failed( - &self, - room_id: RoomId, - called_user_id: UserId, - ) -> Result> { - self.transact(|mut tx| async move { - sqlx::query( - " - DELETE FROM room_participants - WHERE room_id = $1 AND user_id = $2 - ", - ) - .bind(room_id) - .bind(called_user_id) - .execute(&mut tx) - .await?; - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }) - .await - } - - pub async fn decline_call( - &self, - expected_room_id: Option, - user_id: UserId, - ) -> Result> { - self.transact(|mut tx| async move { - let room_id = sqlx::query_scalar( - " - DELETE FROM room_participants - WHERE user_id = $1 AND answering_connection_id IS NULL - RETURNING room_id - ", - ) - .bind(user_id) - .fetch_one(&mut tx) - .await?; - if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - return Err(anyhow!("declining call on unexpected room"))?; - } - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }) - .await - } - - pub async fn cancel_call( - &self, - expected_room_id: Option, - calling_connection_id: ConnectionId, - called_user_id: UserId, - ) -> Result> { - self.transact(|mut tx| async move { - let room_id = sqlx::query_scalar( - " - DELETE FROM room_participants - WHERE user_id = $1 AND calling_connection_id = $2 AND answering_connection_id IS NULL - RETURNING room_id - ", - ) - .bind(called_user_id) - .bind(calling_connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - return Err(anyhow!("canceling call on unexpected room"))?; - } - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }).await - } - - pub async fn join_room( - &self, - room_id: RoomId, - user_id: UserId, - connection_id: ConnectionId, - ) -> Result> { - self.transact(|mut tx| async move { - sqlx::query( - " - UPDATE room_participants - SET answering_connection_id = $1 - WHERE room_id = $2 AND user_id = $3 - RETURNING 1 - ", - ) - .bind(connection_id.0 as i32) - .bind(room_id) - .bind(user_id) - .fetch_one(&mut tx) - .await?; - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }) - .await - } - - pub async fn leave_room( - &self, - connection_id: ConnectionId, - ) -> Result>> { - self.transact(|mut tx| async move { - // Leave room. - let room_id = sqlx::query_scalar::<_, RoomId>( - " - DELETE FROM room_participants - WHERE answering_connection_id = $1 - RETURNING room_id - ", - ) - .bind(connection_id.0 as i32) - .fetch_optional(&mut tx) - .await?; - - if let Some(room_id) = room_id { - // Cancel pending calls initiated by the leaving user. - let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( - " - DELETE FROM room_participants - WHERE calling_connection_id = $1 AND answering_connection_id IS NULL - RETURNING user_id - ", - ) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; - - let project_ids = sqlx::query_scalar::<_, ProjectId>( - " - SELECT project_id - FROM project_collaborators - WHERE connection_id = $1 - ", - ) - .bind(connection_id.0 as i32) - .fetch_all(&mut tx) - .await?; - - // Leave projects. - let mut left_projects = HashMap::default(); - if !project_ids.is_empty() { - let mut params = "?,".repeat(project_ids.len()); - params.pop(); - let query = format!( - " - SELECT * - FROM project_collaborators - WHERE project_id IN ({params}) - " - ); - let mut query = sqlx::query_as::<_, ProjectCollaborator>(&query); - for project_id in project_ids { - query = query.bind(project_id); - } - - let mut project_collaborators = query.fetch(&mut tx); - while let Some(collaborator) = project_collaborators.next().await { - let collaborator = collaborator?; - let left_project = - left_projects - .entry(collaborator.project_id) - .or_insert(LeftProject { - id: collaborator.project_id, - host_user_id: Default::default(), - connection_ids: Default::default(), - host_connection_id: Default::default(), - }); - - let collaborator_connection_id = - ConnectionId(collaborator.connection_id as u32); - if collaborator_connection_id != connection_id { - left_project.connection_ids.push(collaborator_connection_id); - } - - if collaborator.is_host { - left_project.host_user_id = collaborator.user_id; - left_project.host_connection_id = - ConnectionId(collaborator.connection_id as u32); - } - } - } - sqlx::query( - " - DELETE FROM project_collaborators - WHERE connection_id = $1 - ", - ) - .bind(connection_id.0 as i32) - .execute(&mut tx) - .await?; - - // Unshare projects. - sqlx::query( - " - DELETE FROM projects - WHERE room_id = $1 AND host_connection_id = $2 - ", - ) - .bind(room_id) - .bind(connection_id.0 as i32) - .execute(&mut tx) - .await?; - - let room = self.get_room(room_id, &mut tx).await?; - Ok(Some( - self.commit_room_transaction( - room_id, - tx, - LeftRoom { - room, - left_projects, - canceled_calls_to_user_ids, - }, - ) - .await?, - )) - } else { - Ok(None) - } - }) - .await - } - - pub async fn update_room_participant_location( - &self, - room_id: RoomId, - connection_id: ConnectionId, - location: proto::ParticipantLocation, - ) -> Result> { - self.transact(|tx| async { - let mut tx = tx; - let location_kind; - let location_project_id; - match location - .variant - .as_ref() - .ok_or_else(|| anyhow!("invalid location"))? - { - proto::participant_location::Variant::SharedProject(project) => { - location_kind = 0; - location_project_id = Some(ProjectId::from_proto(project.id)); - } - proto::participant_location::Variant::UnsharedProject(_) => { - location_kind = 1; - location_project_id = None; - } - proto::participant_location::Variant::External(_) => { - location_kind = 2; - location_project_id = None; - } - } - - sqlx::query( - " - UPDATE room_participants - SET location_kind = $1, location_project_id = $2 - WHERE room_id = $3 AND answering_connection_id = $4 - RETURNING 1 - ", - ) - .bind(location_kind) - .bind(location_project_id) - .bind(room_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await - }) - .await - } - - async fn get_guest_connection_ids( - &self, - project_id: ProjectId, - tx: &mut sqlx::Transaction<'_, D>, - ) -> Result> { - let mut guest_connection_ids = Vec::new(); - let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 AND is_host = FALSE - ", - ) - .bind(project_id) - .fetch(tx); - while let Some(connection_id) = db_guest_connection_ids.next().await { - guest_connection_ids.push(ConnectionId(connection_id? as u32)); - } - Ok(guest_connection_ids) - } - - async fn get_room( - &self, - room_id: RoomId, - tx: &mut sqlx::Transaction<'_, D>, - ) -> Result { - let room: Room = sqlx::query_as( - " - SELECT * - FROM rooms - WHERE id = $1 - ", - ) - .bind(room_id) - .fetch_one(&mut *tx) - .await?; - - let mut db_participants = - sqlx::query_as::<_, (UserId, Option, Option, Option, UserId, Option)>( - " - SELECT user_id, answering_connection_id, location_kind, location_project_id, calling_user_id, initial_project_id - FROM room_participants - WHERE room_id = $1 - ", - ) - .bind(room_id) - .fetch(&mut *tx); - let mut participants = HashMap::default(); let mut pending_participants = Vec::new(); - while let Some(participant) = db_participants.next().await { - let ( - user_id, - answering_connection_id, - location_kind, - location_project_id, - calling_user_id, - initial_project_id, - ) = participant?; - if let Some(answering_connection_id) = answering_connection_id { - let location = match (location_kind, location_project_id) { + while let Some(db_participant) = db_participants.next().await { + let db_participant = db_participant?; + if let Some(answering_connection_id) = db_participant.answering_connection_id { + let location = match ( + db_participant.location_kind, + db_participant.location_project_id, + ) { (Some(0), Some(project_id)) => { Some(proto::participant_location::Variant::SharedProject( proto::participant_location::SharedProject { @@ -1482,7 +989,7 @@ where participants.insert( answering_connection_id, proto::Participant { - user_id: user_id.to_proto(), + user_id: db_participant.user_id.to_proto(), peer_id: answering_connection_id as u32, projects: Default::default(), location: Some(proto::ParticipantLocation { variant: location }), @@ -1490,1054 +997,66 @@ where ); } else { pending_participants.push(proto::PendingParticipant { - user_id: user_id.to_proto(), - calling_user_id: calling_user_id.to_proto(), - initial_project_id: initial_project_id.map(|id| id.to_proto()), + user_id: db_participant.user_id.to_proto(), + calling_user_id: db_participant.calling_user_id.to_proto(), + initial_project_id: db_participant.initial_project_id.map(|id| id.to_proto()), }); } } - drop(db_participants); - let mut rows = sqlx::query_as::<_, (i32, ProjectId, Option)>( - " - SELECT host_connection_id, projects.id, worktrees.root_name - FROM projects - LEFT JOIN worktrees ON projects.id = worktrees.project_id - WHERE room_id = $1 - ", - ) - .bind(room_id) - .fetch(&mut *tx); + let mut db_projects = db_room + .find_related(project::Entity) + .find_with_related(worktree::Entity) + .stream(tx) + .await?; - while let Some(row) = rows.next().await { - let (connection_id, project_id, worktree_root_name) = row?; - if let Some(participant) = participants.get_mut(&connection_id) { + while let Some(row) = db_projects.next().await { + let (db_project, db_worktree) = row?; + if let Some(participant) = participants.get_mut(&db_project.host_connection_id) { let project = if let Some(project) = participant .projects .iter_mut() - .find(|project| project.id == project_id.to_proto()) + .find(|project| project.id == db_project.id.to_proto()) { project } else { participant.projects.push(proto::ParticipantProject { - id: project_id.to_proto(), + id: db_project.id.to_proto(), worktree_root_names: Default::default(), }); participant.projects.last_mut().unwrap() }; - project.worktree_root_names.extend(worktree_root_name); + + if let Some(db_worktree) = db_worktree { + project.worktree_root_names.push(db_worktree.root_name); + } } } Ok(proto::Room { - id: room.id.to_proto(), - live_kit_room: room.live_kit_room, + id: db_room.id.to_proto(), + live_kit_room: db_room.live_kit_room, participants: participants.into_values().collect(), pending_participants, }) } - // projects - - pub async fn project_count_excluding_admins(&self) -> Result { - self.transact(|mut tx| async move { - Ok(sqlx::query_scalar::<_, i32>( - " - SELECT COUNT(*) - FROM projects, users - WHERE projects.host_user_id = users.id AND users.admin IS FALSE - ", - ) - .fetch_one(&mut tx) - .await? as usize) - }) - .await - } - - pub async fn share_project( + async fn commit_room_transaction( &self, - expected_room_id: RoomId, - connection_id: ConnectionId, - worktrees: &[proto::WorktreeMetadata], - ) -> Result> { - self.transact(|mut tx| async move { - let (sql, values) = self.build_query( - Query::select() - .columns([ - schema::room_participant::Definition::RoomId, - schema::room_participant::Definition::UserId, - ]) - .from(schema::room_participant::Definition::Table) - .and_where( - Expr::col(schema::room_participant::Definition::AnsweringConnectionId) - .eq(connection_id.0), - ), - ); - let (room_id, user_id) = sqlx::query_as_with::<_, (RoomId, UserId), _>(&sql, values) - .fetch_one(&mut tx) - .await?; - if room_id != expected_room_id { - return Err(anyhow!("shared project on unexpected room"))?; - } - - let (sql, values) = self.build_query( - Query::insert() - .into_table(schema::project::Definition::Table) - .columns([ - schema::project::Definition::RoomId, - schema::project::Definition::HostUserId, - schema::project::Definition::HostConnectionId, - ]) - .values_panic([room_id.into(), user_id.into(), connection_id.0.into()]) - .returning_col(schema::project::Definition::Id), - ); - let project_id: ProjectId = sqlx::query_scalar_with(&sql, values) - .fetch_one(&mut tx) - .await?; - - if !worktrees.is_empty() { - let mut query = Query::insert() - .into_table(schema::worktree::Definition::Table) - .columns([ - schema::worktree::Definition::ProjectId, - schema::worktree::Definition::Id, - schema::worktree::Definition::RootName, - schema::worktree::Definition::AbsPath, - schema::worktree::Definition::Visible, - schema::worktree::Definition::ScanId, - schema::worktree::Definition::IsComplete, - ]) - .to_owned(); - for worktree in worktrees { - query.values_panic([ - project_id.into(), - worktree.id.into(), - worktree.root_name.clone().into(), - worktree.abs_path.clone().into(), - worktree.visible.into(), - 0.into(), - false.into(), - ]); - } - let (sql, values) = self.build_query(&query); - sqlx::query_with(&sql, values).execute(&mut tx).await?; - } - - sqlx::query( - " - INSERT INTO project_collaborators ( - project_id, - connection_id, - user_id, - replica_id, - is_host - ) - VALUES ($1, $2, $3, $4, $5) - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .bind(user_id) - .bind(0) - .bind(true) - .execute(&mut tx) - .await?; - - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, (project_id, room)) - .await + room_id: RoomId, + tx: DatabaseTransaction, + data: T, + ) -> Result> { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + tx.commit().await?; + Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, }) - .await } - pub async fn unshare_project( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result)>> { - self.transact(|mut tx| async move { - let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - let room_id: RoomId = sqlx::query_scalar( - " - DELETE FROM projects - WHERE id = $1 AND host_connection_id = $2 - RETURNING room_id - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) - .await - }) - .await - } - - pub async fn update_project( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - worktrees: &[proto::WorktreeMetadata], - ) -> Result)>> { - self.transact(|mut tx| async move { - let room_id: RoomId = sqlx::query_scalar( - " - SELECT room_id - FROM projects - WHERE id = $1 AND host_connection_id = $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - if !worktrees.is_empty() { - let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); - params.pop(); - let query = format!( - " - INSERT INTO worktrees ( - project_id, - id, - root_name, - abs_path, - visible, - scan_id, - is_complete - ) - VALUES {params} - ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name - " - ); - - let mut query = sqlx::query(&query); - for worktree in worktrees { - query = query - .bind(project_id) - .bind(worktree.id as i32) - .bind(&worktree.root_name) - .bind(&worktree.abs_path) - .bind(worktree.visible) - .bind(0) - .bind(false) - } - query.execute(&mut tx).await?; - } - - let mut params = "?,".repeat(worktrees.len()); - if !worktrees.is_empty() { - params.pop(); - } - let query = format!( - " - DELETE FROM worktrees - WHERE project_id = ? AND id NOT IN ({params}) - ", - ); - - let mut query = sqlx::query(&query).bind(project_id); - for worktree in worktrees { - query = query.bind(WorktreeId(worktree.id as i32)); - } - query.execute(&mut tx).await?; - - let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) - .await - }) - .await - } - - pub async fn update_worktree( - &self, - update: &proto::UpdateWorktree, - connection_id: ConnectionId, - ) -> Result>> { - self.transact(|mut tx| async move { - let project_id = ProjectId::from_proto(update.project_id); - let worktree_id = WorktreeId::from_proto(update.worktree_id); - - // Ensure the update comes from the host. - let room_id: RoomId = sqlx::query_scalar( - " - SELECT room_id - FROM projects - WHERE id = $1 AND host_connection_id = $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - // Update metadata. - sqlx::query( - " - UPDATE worktrees - SET - root_name = $1, - scan_id = $2, - is_complete = $3, - abs_path = $4 - WHERE project_id = $5 AND id = $6 - RETURNING 1 - ", - ) - .bind(&update.root_name) - .bind(update.scan_id as i64) - .bind(update.is_last_update) - .bind(&update.abs_path) - .bind(project_id) - .bind(worktree_id) - .fetch_one(&mut tx) - .await?; - - if !update.updated_entries.is_empty() { - let mut params = - "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?),".repeat(update.updated_entries.len()); - params.pop(); - - let query = format!( - " - INSERT INTO worktree_entries ( - project_id, - worktree_id, - id, - is_dir, - path, - inode, - mtime_seconds, - mtime_nanos, - is_symlink, - is_ignored - ) - VALUES {params} - ON CONFLICT (project_id, worktree_id, id) DO UPDATE SET - is_dir = excluded.is_dir, - path = excluded.path, - inode = excluded.inode, - mtime_seconds = excluded.mtime_seconds, - mtime_nanos = excluded.mtime_nanos, - is_symlink = excluded.is_symlink, - is_ignored = excluded.is_ignored - " - ); - let mut query = sqlx::query(&query); - for entry in &update.updated_entries { - let mtime = entry.mtime.clone().unwrap_or_default(); - query = query - .bind(project_id) - .bind(worktree_id) - .bind(entry.id as i64) - .bind(entry.is_dir) - .bind(&entry.path) - .bind(entry.inode as i64) - .bind(mtime.seconds as i64) - .bind(mtime.nanos as i32) - .bind(entry.is_symlink) - .bind(entry.is_ignored); - } - query.execute(&mut tx).await?; - } - - if !update.removed_entries.is_empty() { - let mut params = "?,".repeat(update.removed_entries.len()); - params.pop(); - let query = format!( - " - DELETE FROM worktree_entries - WHERE project_id = ? AND worktree_id = ? AND id IN ({params}) - " - ); - - let mut query = sqlx::query(&query).bind(project_id).bind(worktree_id); - for entry_id in &update.removed_entries { - query = query.bind(*entry_id as i64); - } - query.execute(&mut tx).await?; - } - - let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, connection_ids) - .await - }) - .await - } - - pub async fn update_diagnostic_summary( - &self, - update: &proto::UpdateDiagnosticSummary, - connection_id: ConnectionId, - ) -> Result>> { - self.transact(|mut tx| async { - let project_id = ProjectId::from_proto(update.project_id); - let worktree_id = WorktreeId::from_proto(update.worktree_id); - let summary = update - .summary - .as_ref() - .ok_or_else(|| anyhow!("invalid summary"))?; - - // Ensure the update comes from the host. - let room_id: RoomId = sqlx::query_scalar( - " - SELECT room_id - FROM projects - WHERE id = $1 AND host_connection_id = $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - // Update summary. - sqlx::query( - " - INSERT INTO worktree_diagnostic_summaries ( - project_id, - worktree_id, - path, - language_server_id, - error_count, - warning_count - ) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET - language_server_id = excluded.language_server_id, - error_count = excluded.error_count, - warning_count = excluded.warning_count - ", - ) - .bind(project_id) - .bind(worktree_id) - .bind(&summary.path) - .bind(summary.language_server_id as i64) - .bind(summary.error_count as i32) - .bind(summary.warning_count as i32) - .execute(&mut tx) - .await?; - - let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, connection_ids) - .await - }) - .await - } - - pub async fn start_language_server( - &self, - update: &proto::StartLanguageServer, - connection_id: ConnectionId, - ) -> Result>> { - self.transact(|mut tx| async { - let project_id = ProjectId::from_proto(update.project_id); - let server = update - .server - .as_ref() - .ok_or_else(|| anyhow!("invalid language server"))?; - - // Ensure the update comes from the host. - let room_id: RoomId = sqlx::query_scalar( - " - SELECT room_id - FROM projects - WHERE id = $1 AND host_connection_id = $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - // Add the newly-started language server. - sqlx::query( - " - INSERT INTO language_servers (project_id, id, name) - VALUES ($1, $2, $3) - ON CONFLICT (project_id, id) DO UPDATE SET - name = excluded.name - ", - ) - .bind(project_id) - .bind(server.id as i64) - .bind(&server.name) - .execute(&mut tx) - .await?; - - let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, connection_ids) - .await - }) - .await - } - - pub async fn join_project( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result> { - self.transact(|mut tx| async move { - let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( - " - SELECT room_id, user_id - FROM room_participants - WHERE answering_connection_id = $1 - ", - ) - .bind(connection_id.0 as i32) - .fetch_one(&mut tx) - .await?; - - // Ensure project id was shared on this room. - sqlx::query( - " - SELECT 1 - FROM projects - WHERE id = $1 AND room_id = $2 - ", - ) - .bind(project_id) - .bind(room_id) - .fetch_one(&mut tx) - .await?; - - let mut collaborators = sqlx::query_as::<_, ProjectCollaborator>( - " - SELECT * - FROM project_collaborators - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await?; - let replica_ids = collaborators - .iter() - .map(|c| c.replica_id) - .collect::>(); - let mut replica_id = ReplicaId(1); - while replica_ids.contains(&replica_id) { - replica_id.0 += 1; - } - let new_collaborator = ProjectCollaborator { - project_id, - connection_id: connection_id.0 as i32, - user_id, - replica_id, - is_host: false, - }; - - sqlx::query( - " - INSERT INTO project_collaborators ( - project_id, - connection_id, - user_id, - replica_id, - is_host - ) - VALUES ($1, $2, $3, $4, $5) - ", - ) - .bind(new_collaborator.project_id) - .bind(new_collaborator.connection_id) - .bind(new_collaborator.user_id) - .bind(new_collaborator.replica_id) - .bind(new_collaborator.is_host) - .execute(&mut tx) - .await?; - collaborators.push(new_collaborator); - - let worktree_rows = sqlx::query_as::<_, WorktreeRow>( - " - SELECT * - FROM worktrees - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await?; - let mut worktrees = worktree_rows - .into_iter() - .map(|worktree_row| { - ( - worktree_row.id, - Worktree { - id: worktree_row.id, - abs_path: worktree_row.abs_path, - root_name: worktree_row.root_name, - visible: worktree_row.visible, - entries: Default::default(), - diagnostic_summaries: Default::default(), - scan_id: worktree_row.scan_id as u64, - is_complete: worktree_row.is_complete, - }, - ) - }) - .collect::>(); - - // Populate worktree entries. - { - let mut entries = sqlx::query_as::<_, WorktreeEntry>( - " - SELECT * - FROM worktree_entries - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch(&mut tx); - while let Some(entry) = entries.next().await { - let entry = entry?; - if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { - worktree.entries.push(proto::Entry { - id: entry.id as u64, - is_dir: entry.is_dir, - path: entry.path, - inode: entry.inode as u64, - mtime: Some(proto::Timestamp { - seconds: entry.mtime_seconds as u64, - nanos: entry.mtime_nanos as u32, - }), - is_symlink: entry.is_symlink, - is_ignored: entry.is_ignored, - }); - } - } - } - - // Populate worktree diagnostic summaries. - { - let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>( - " - SELECT * - FROM worktree_diagnostic_summaries - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch(&mut tx); - while let Some(summary) = summaries.next().await { - let summary = summary?; - if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) { - worktree - .diagnostic_summaries - .push(proto::DiagnosticSummary { - path: summary.path, - language_server_id: summary.language_server_id as u64, - error_count: summary.error_count as u32, - warning_count: summary.warning_count as u32, - }); - } - } - } - - // Populate language servers. - let language_servers = sqlx::query_as::<_, LanguageServer>( - " - SELECT * - FROM language_servers - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await?; - - self.commit_room_transaction( - room_id, - tx, - ( - Project { - collaborators, - worktrees, - language_servers: language_servers - .into_iter() - .map(|language_server| proto::LanguageServer { - id: language_server.id.to_proto(), - name: language_server.name, - }) - .collect(), - }, - replica_id as ReplicaId, - ), - ) - .await - }) - .await - } - - pub async fn leave_project( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result> { - self.transact(|mut tx| async move { - let result = sqlx::query( - " - DELETE FROM project_collaborators - WHERE project_id = $1 AND connection_id = $2 - ", - ) - .bind(project_id) - .bind(connection_id.0 as i32) - .execute(&mut tx) - .await?; - - if result.rows_affected() == 0 { - Err(anyhow!("not a collaborator on this project"))?; - } - - let connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await? - .into_iter() - .map(|id| ConnectionId(id as u32)) - .collect(); - - let (room_id, host_user_id, host_connection_id) = - sqlx::query_as::<_, (RoomId, i32, i32)>( - " - SELECT room_id, host_user_id, host_connection_id - FROM projects - WHERE id = $1 - ", - ) - .bind(project_id) - .fetch_one(&mut tx) - .await?; - - self.commit_room_transaction( - room_id, - tx, - LeftProject { - id: project_id, - host_user_id: UserId(host_user_id), - host_connection_id: ConnectionId(host_connection_id as u32), - connection_ids, - }, - ) - .await - }) - .await - } - - pub async fn project_collaborators( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result> { - self.transact(|mut tx| async move { - let collaborators = sqlx::query_as::<_, ProjectCollaborator>( - " - SELECT * - FROM project_collaborators - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await?; - - if collaborators - .iter() - .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) - { - Ok(collaborators) - } else { - Err(anyhow!("no such project"))? - } - }) - .await - } - - pub async fn project_connection_ids( - &self, - project_id: ProjectId, - connection_id: ConnectionId, - ) -> Result> { - self.transact(|mut tx| async move { - let connection_ids = sqlx::query_scalar::<_, i32>( - " - SELECT connection_id - FROM project_collaborators - WHERE project_id = $1 - ", - ) - .bind(project_id) - .fetch_all(&mut tx) - .await?; - - if connection_ids.contains(&(connection_id.0 as i32)) { - Ok(connection_ids - .into_iter() - .map(|connection_id| ConnectionId(connection_id as u32)) - .collect()) - } else { - Err(anyhow!("no such project"))? - } - }) - .await - } - - // contacts - - pub async fn get_contacts(&self, user_id: UserId) -> Result> { - self.transact(|mut tx| async move { - let query = " - SELECT user_id_a, user_id_b, a_to_b, accepted, should_notify, (room_participants.id IS NOT NULL) as busy - FROM contacts - LEFT JOIN room_participants ON room_participants.user_id = $1 - WHERE user_id_a = $1 OR user_id_b = $1; - "; - - let mut rows = sqlx::query_as::<_, (UserId, UserId, bool, bool, bool, bool)>(query) - .bind(user_id) - .fetch(&mut tx); - - let mut contacts = Vec::new(); - while let Some(row) = rows.next().await { - let (user_id_a, user_id_b, a_to_b, accepted, should_notify, busy) = row?; - if user_id_a == user_id { - if accepted { - contacts.push(Contact::Accepted { - user_id: user_id_b, - should_notify: should_notify && a_to_b, - busy - }); - } else if a_to_b { - contacts.push(Contact::Outgoing { user_id: user_id_b }) - } else { - contacts.push(Contact::Incoming { - user_id: user_id_b, - should_notify, - }); - } - } else if accepted { - contacts.push(Contact::Accepted { - user_id: user_id_a, - should_notify: should_notify && !a_to_b, - busy - }); - } else if a_to_b { - contacts.push(Contact::Incoming { - user_id: user_id_a, - should_notify, - }); - } else { - contacts.push(Contact::Outgoing { user_id: user_id_a }); - } - } - - contacts.sort_unstable_by_key(|contact| contact.user_id()); - - Ok(contacts) - }) - .await - } - - pub async fn is_user_busy(&self, user_id: UserId) -> Result { - self.transact(|mut tx| async move { - Ok(sqlx::query_scalar::<_, i32>( - " - SELECT 1 - FROM room_participants - WHERE room_participants.user_id = $1 - ", - ) - .bind(user_id) - .fetch_optional(&mut tx) - .await? - .is_some()) - }) - .await - } - - pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { - self.transact(|mut tx| async move { - let (id_a, id_b) = if user_id_1 < user_id_2 { - (user_id_1, user_id_2) - } else { - (user_id_2, user_id_1) - }; - - let query = " - SELECT 1 FROM contacts - WHERE user_id_a = $1 AND user_id_b = $2 AND accepted = TRUE - LIMIT 1 - "; - Ok(sqlx::query_scalar::<_, i32>(query) - .bind(id_a.0) - .bind(id_b.0) - .fetch_optional(&mut tx) - .await? - .is_some()) - }) - .await - } - - pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { - self.transact(|mut tx| async move { - let (id_a, id_b, a_to_b) = if sender_id < receiver_id { - (sender_id, receiver_id, true) - } else { - (receiver_id, sender_id, false) - }; - let query = " - INSERT into contacts (user_id_a, user_id_b, a_to_b, accepted, should_notify) - VALUES ($1, $2, $3, FALSE, TRUE) - ON CONFLICT (user_id_a, user_id_b) DO UPDATE - SET - accepted = TRUE, - should_notify = FALSE - WHERE - NOT contacts.accepted AND - ((contacts.a_to_b = excluded.a_to_b AND contacts.user_id_a = excluded.user_id_b) OR - (contacts.a_to_b != excluded.a_to_b AND contacts.user_id_a = excluded.user_id_a)); - "; - let result = sqlx::query(query) - .bind(id_a.0) - .bind(id_b.0) - .bind(a_to_b) - .execute(&mut tx) - .await?; - - if result.rows_affected() == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("contact already requested"))? - } - }).await - } - - pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { - self.transact(|mut tx| async move { - let (id_a, id_b) = if responder_id < requester_id { - (responder_id, requester_id) - } else { - (requester_id, responder_id) - }; - let query = " - DELETE FROM contacts - WHERE user_id_a = $1 AND user_id_b = $2; - "; - let result = sqlx::query(query) - .bind(id_a.0) - .bind(id_b.0) - .execute(&mut tx) - .await?; - - if result.rows_affected() == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("no such contact"))? - } - }) - .await - } - - pub async fn dismiss_contact_notification( - &self, - user_id: UserId, - contact_user_id: UserId, - ) -> Result<()> { - self.transact(|mut tx| async move { - let (id_a, id_b, a_to_b) = if user_id < contact_user_id { - (user_id, contact_user_id, true) - } else { - (contact_user_id, user_id, false) - }; - - let query = " - UPDATE contacts - SET should_notify = FALSE - WHERE - user_id_a = $1 AND user_id_b = $2 AND - ( - (a_to_b = $3 AND accepted) OR - (a_to_b != $3 AND NOT accepted) - ); - "; - - let result = sqlx::query(query) - .bind(id_a.0) - .bind(id_b.0) - .bind(a_to_b) - .execute(&mut tx) - .await?; - - if result.rows_affected() == 0 { - Err(anyhow!("no such contact request"))? - } else { - tx.commit().await?; - Ok(()) - } - }) - .await - } - - pub async fn respond_to_contact_request( - &self, - responder_id: UserId, - requester_id: UserId, - accept: bool, - ) -> Result<()> { - self.transact(|mut tx| async move { - let (id_a, id_b, a_to_b) = if responder_id < requester_id { - (responder_id, requester_id, false) - } else { - (requester_id, responder_id, true) - }; - let result = if accept { - let query = " - UPDATE contacts - SET accepted = TRUE, should_notify = TRUE - WHERE user_id_a = $1 AND user_id_b = $2 AND a_to_b = $3; - "; - sqlx::query(query) - .bind(id_a.0) - .bind(id_b.0) - .bind(a_to_b) - .execute(&mut tx) - .await? - } else { - let query = " - DELETE FROM contacts - WHERE user_id_a = $1 AND user_id_b = $2 AND a_to_b = $3 AND NOT accepted; - "; - sqlx::query(query) - .bind(id_a.0) - .bind(id_b.0) - .bind(a_to_b) - .execute(&mut tx) - .await? - }; - if result.rows_affected() == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("no such contact request"))? - } - }) - .await - } - - // access tokens - pub async fn create_access_token_hash( &self, user_id: UserId, @@ -2545,49 +1064,51 @@ where max_access_token_count: usize, ) -> Result<()> { self.transact(|tx| async { - let mut tx = tx; - let insert_query = " - INSERT INTO access_tokens (user_id, hash) - VALUES ($1, $2); - "; - let cleanup_query = " - DELETE FROM access_tokens - WHERE id IN ( - SELECT id from access_tokens - WHERE user_id = $1 - ORDER BY id DESC - LIMIT 10000 - OFFSET $3 - ) - "; + let tx = tx; - sqlx::query(insert_query) - .bind(user_id.0) - .bind(access_token_hash) - .execute(&mut tx) + access_token::ActiveModel { + user_id: ActiveValue::set(user_id), + hash: ActiveValue::set(access_token_hash.into()), + ..Default::default() + } + .insert(&tx) + .await?; + + access_token::Entity::delete_many() + .filter( + access_token::Column::Id.in_subquery( + Query::select() + .column(access_token::Column::Id) + .from(access_token::Entity) + .and_where(access_token::Column::UserId.eq(user_id)) + .order_by(access_token::Column::Id, sea_orm::Order::Desc) + .limit(10000) + .offset(max_access_token_count as u64) + .to_owned(), + ), + ) + .exec(&tx) .await?; - sqlx::query(cleanup_query) - .bind(user_id.0) - .bind(access_token_hash) - .bind(max_access_token_count as i32) - .execute(&mut tx) - .await?; - Ok(tx.commit().await?) + tx.commit().await?; + Ok(()) }) .await } pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result> { - self.transact(|mut tx| async move { - let query = " - SELECT hash - FROM access_tokens - WHERE user_id = $1 - ORDER BY id DESC - "; - Ok(sqlx::query_scalar(query) - .bind(user_id.0) - .fetch_all(&mut tx) + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + Hash, + } + + self.transact(|tx| async move { + Ok(access_token::Entity::find() + .select_only() + .column(access_token::Column::Hash) + .filter(access_token::Column::UserId.eq(user_id)) + .order_by_desc(access_token::Column::Id) + .into_values::<_, QueryAs>() + .all(&tx) .await?) }) .await @@ -2595,21 +1116,33 @@ where async fn transact(&self, f: F) -> Result where - F: Send + Fn(sqlx::Transaction<'static, D>) -> Fut, + F: Send + Fn(DatabaseTransaction) -> Fut, Fut: Send + Future>, { let body = async { loop { - let tx = self.begin_transaction().await?; + let tx = self.pool.begin().await?; + + // In Postgres, serializable transactions are opt-in + if let DatabaseBackend::Postgres = self.pool.get_database_backend() { + tx.execute(Statement::from_string( + DatabaseBackend::Postgres, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), + )) + .await?; + } + match f(tx).await { Ok(result) => return Ok(result), Err(error) => match error { - Error::Database(error) - if error - .as_database_error() - .and_then(|error| error.code()) - .as_deref() - == Some("40001") => + Error::Database2( + DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) + | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), + ) if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some("40001") => { // Retry (don't break the loop) } @@ -2635,6 +1168,49 @@ where } } +pub struct RoomGuard { + data: T, + _guard: OwnedMutexGuard<()>, + _not_send: PhantomData>, +} + +impl Deref for RoomGuard { + type Target = T; + + fn deref(&self) -> &T { + &self.data + } +} + +impl DerefMut for RoomGuard { + fn deref_mut(&mut self) -> &mut T { + &mut self.data + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct NewUserParams { + pub github_login: String, + pub github_user_id: i32, + pub invite_count: i32, +} + +#[derive(Debug)] +pub struct NewUserResult { + pub user_id: UserId, + pub metrics_id: String, + pub inviting_user_id: Option, + pub signup_device_id: Option, +} + +fn random_invite_code() -> String { + nanoid::nanoid!(16) +} + +fn random_email_confirmation_code() -> String { + nanoid::nanoid!(64) +} + macro_rules! id_type { ($name:ident) => { #[derive( @@ -2681,196 +1257,90 @@ macro_rules! id_type { sea_query::Value::Int(Some(value.0)) } } + + impl sea_orm::TryGetable for $name { + fn try_get( + res: &sea_orm::QueryResult, + pre: &str, + col: &str, + ) -> Result { + Ok(Self(i32::try_get(res, pre, col)?)) + } + } + + impl sea_query::ValueType for $name { + fn try_from(v: Value) -> Result { + match v { + Value::TinyInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::SmallInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::Int(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::BigInt(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::TinyUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::SmallUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::Unsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + Value::BigUnsigned(Some(int)) => { + Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) + } + _ => Err(sea_query::ValueTypeErr), + } + } + + fn type_name() -> String { + stringify!($name).into() + } + + fn array_type() -> sea_query::ArrayType { + sea_query::ArrayType::Int + } + + fn column_type() -> sea_query::ColumnType { + sea_query::ColumnType::Integer(None) + } + } + + impl sea_orm::TryFromU64 for $name { + fn try_from_u64(n: u64) -> Result { + Ok(Self(n.try_into().map_err(|_| { + DbErr::ConvertFromU64(concat!( + "error converting ", + stringify!($name), + " to u64" + )) + })?)) + } + } + + impl sea_query::Nullable for $name { + fn null() -> Value { + Value::Int(None) + } + } }; } +id_type!(AccessTokenId); +id_type!(ContactId); id_type!(UserId); -#[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] -pub struct User { - pub id: UserId, - pub github_login: String, - pub github_user_id: Option, - pub email_address: Option, - pub admin: bool, - pub invite_code: Option, - pub invite_count: i32, - pub connected_once: bool, -} - id_type!(RoomId); -#[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)] -pub struct Room { - pub id: RoomId, - pub live_kit_room: String, -} - +id_type!(RoomParticipantId); id_type!(ProjectId); -pub struct Project { - pub collaborators: Vec, - pub worktrees: BTreeMap, - pub language_servers: Vec, -} - -id_type!(ReplicaId); -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -pub struct ProjectCollaborator { - pub project_id: ProjectId, - pub connection_id: i32, - pub user_id: UserId, - pub replica_id: ReplicaId, - pub is_host: bool, -} - +id_type!(ProjectCollaboratorId); +id_type!(SignupId); id_type!(WorktreeId); -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -struct WorktreeRow { - pub id: WorktreeId, - pub project_id: ProjectId, - pub abs_path: String, - pub root_name: String, - pub visible: bool, - pub scan_id: i64, - pub is_complete: bool, -} - -pub struct Worktree { - pub id: WorktreeId, - pub abs_path: String, - pub root_name: String, - pub visible: bool, - pub entries: Vec, - pub diagnostic_summaries: Vec, - pub scan_id: u64, - pub is_complete: bool, -} - -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -struct WorktreeEntry { - id: i64, - worktree_id: WorktreeId, - is_dir: bool, - path: String, - inode: i64, - mtime_seconds: i64, - mtime_nanos: i32, - is_symlink: bool, - is_ignored: bool, -} - -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -struct WorktreeDiagnosticSummary { - worktree_id: WorktreeId, - path: String, - language_server_id: i64, - error_count: i32, - warning_count: i32, -} - -id_type!(LanguageServerId); -#[derive(Clone, Debug, Default, FromRow, PartialEq)] -struct LanguageServer { - id: LanguageServerId, - name: String, -} - -pub struct LeftProject { - pub id: ProjectId, - pub host_user_id: UserId, - pub host_connection_id: ConnectionId, - pub connection_ids: Vec, -} - -pub struct LeftRoom { - pub room: proto::Room, - pub left_projects: HashMap, - pub canceled_calls_to_user_ids: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Contact { - Accepted { - user_id: UserId, - should_notify: bool, - busy: bool, - }, - Outgoing { - user_id: UserId, - }, - Incoming { - user_id: UserId, - should_notify: bool, - }, -} - -impl Contact { - pub fn user_id(&self) -> UserId { - match self { - Contact::Accepted { user_id, .. } => *user_id, - Contact::Outgoing { user_id } => *user_id, - Contact::Incoming { user_id, .. } => *user_id, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct IncomingContactRequest { - pub requester_id: UserId, - pub should_notify: bool, -} - -#[derive(Clone, Deserialize)] -pub struct Signup { - pub email_address: String, - pub platform_mac: bool, - pub platform_windows: bool, - pub platform_linux: bool, - pub editor_features: Vec, - pub programming_languages: Vec, - pub device_id: Option, -} - -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromRow)] -pub struct WaitlistSummary { - #[sqlx(default)] - pub count: i64, - #[sqlx(default)] - pub linux_count: i64, - #[sqlx(default)] - pub mac_count: i64, - #[sqlx(default)] - pub windows_count: i64, - #[sqlx(default)] - pub unknown_count: i64, -} - -#[derive(FromRow, PartialEq, Debug, Serialize, Deserialize)] -pub struct Invite { - pub email_address: String, - pub email_confirmation_code: String, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct NewUserParams { - pub github_login: String, - pub github_user_id: i32, - pub invite_count: i32, -} - -#[derive(Debug)] -pub struct NewUserResult { - pub user_id: UserId, - pub metrics_id: String, - pub inviting_user_id: Option, - pub signup_device_id: Option, -} - -fn random_invite_code() -> String { - nanoid::nanoid!(16) -} - -fn random_email_confirmation_code() -> String { - nanoid::nanoid!(64) -} #[cfg(test)] pub use test::*; @@ -2882,35 +1352,40 @@ mod test { use lazy_static::lazy_static; use parking_lot::Mutex; use rand::prelude::*; + use sea_orm::ConnectionTrait; use sqlx::migrate::MigrateDatabase; use std::sync::Arc; - pub struct SqliteTestDb { - pub db: Option>>, - pub conn: sqlx::sqlite::SqliteConnection, + pub struct TestDb { + pub db: Option>, + pub connection: Option, } - pub struct PostgresTestDb { - pub db: Option>>, - pub url: String, - } - - impl SqliteTestDb { - pub fn new(background: Arc) -> Self { - let mut rng = StdRng::from_entropy(); - let url = format!("file:zed-test-{}?mode=memory", rng.gen::()); + impl TestDb { + pub fn sqlite(background: Arc) -> Self { + let url = format!("sqlite::memory:"); let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() .enable_time() .build() .unwrap(); - let (mut db, conn) = runtime.block_on(async { - let db = Db::::new(&url, 5).await.unwrap(); - let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations.sqlite"); - db.migrate(migrations_path.as_ref(), false).await.unwrap(); - let conn = db.pool.acquire().await.unwrap().detach(); - (db, conn) + let mut db = runtime.block_on(async { + let mut options = ConnectOptions::new(url); + options.max_connections(5); + let db = Database::new(options).await.unwrap(); + let sql = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/migrations.sqlite/20221109000000_test_schema.sql" + )); + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + sql.into(), + )) + .await + .unwrap(); + db }); db.background = Some(background); @@ -2918,17 +1393,11 @@ mod test { Self { db: Some(Arc::new(db)), - conn, + connection: None, } } - pub fn db(&self) -> &Arc> { - self.db.as_ref().unwrap() - } - } - - impl PostgresTestDb { - pub fn new(background: Arc) -> Self { + pub fn postgres(background: Arc) -> Self { lazy_static! { static ref LOCK: Mutex<()> = Mutex::new(()); } @@ -2949,7 +1418,11 @@ mod test { sqlx::Postgres::create_database(&url) .await .expect("failed to create test db"); - let db = Db::::new(&url, 5).await.unwrap(); + let mut options = ConnectOptions::new(url); + options + .max_connections(5) + .idle_timeout(Duration::from_secs(0)); + let db = Database::new(options).await.unwrap(); let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); db.migrate(Path::new(migrations_path), false).await.unwrap(); db @@ -2960,19 +1433,40 @@ mod test { Self { db: Some(Arc::new(db)), - url, + connection: None, } } - pub fn db(&self) -> &Arc> { + pub fn db(&self) -> &Arc { self.db.as_ref().unwrap() } } - impl Drop for PostgresTestDb { + impl Drop for TestDb { fn drop(&mut self) { let db = self.db.take().unwrap(); - db.teardown(&self.url); + if let DatabaseBackend::Postgres = db.pool.get_database_backend() { + db.runtime.as_ref().unwrap().block_on(async { + use util::ResultExt; + let query = " + SELECT pg_terminate_backend(pg_stat_activity.pid) + FROM pg_stat_activity + WHERE + pg_stat_activity.datname = current_database() AND + pid <> pg_backend_pid(); + "; + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + query.into(), + )) + .await + .log_err(); + sqlx::Postgres::drop_database(db.options.get_url()) + .await + .log_err(); + }) + } } } } diff --git a/crates/collab/src/db2/access_token.rs b/crates/collab/src/db/access_token.rs similarity index 100% rename from crates/collab/src/db2/access_token.rs rename to crates/collab/src/db/access_token.rs diff --git a/crates/collab/src/db2/contact.rs b/crates/collab/src/db/contact.rs similarity index 100% rename from crates/collab/src/db2/contact.rs rename to crates/collab/src/db/contact.rs diff --git a/crates/collab/src/db2/project.rs b/crates/collab/src/db/project.rs similarity index 100% rename from crates/collab/src/db2/project.rs rename to crates/collab/src/db/project.rs diff --git a/crates/collab/src/db2/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs similarity index 100% rename from crates/collab/src/db2/project_collaborator.rs rename to crates/collab/src/db/project_collaborator.rs diff --git a/crates/collab/src/db2/room.rs b/crates/collab/src/db/room.rs similarity index 100% rename from crates/collab/src/db2/room.rs rename to crates/collab/src/db/room.rs diff --git a/crates/collab/src/db2/room_participant.rs b/crates/collab/src/db/room_participant.rs similarity index 100% rename from crates/collab/src/db2/room_participant.rs rename to crates/collab/src/db/room_participant.rs diff --git a/crates/collab/src/db/schema.rs b/crates/collab/src/db/schema.rs deleted file mode 100644 index 40a3e334d1..0000000000 --- a/crates/collab/src/db/schema.rs +++ /dev/null @@ -1,43 +0,0 @@ -pub mod project { - use sea_query::Iden; - - #[derive(Iden)] - pub enum Definition { - #[iden = "projects"] - Table, - Id, - RoomId, - HostUserId, - HostConnectionId, - } -} - -pub mod worktree { - use sea_query::Iden; - - #[derive(Iden)] - pub enum Definition { - #[iden = "worktrees"] - Table, - Id, - ProjectId, - AbsPath, - RootName, - Visible, - ScanId, - IsComplete, - } -} - -pub mod room_participant { - use sea_query::Iden; - - #[derive(Iden)] - pub enum Definition { - #[iden = "room_participants"] - Table, - RoomId, - UserId, - AnsweringConnectionId, - } -} diff --git a/crates/collab/src/db2/signup.rs b/crates/collab/src/db/signup.rs similarity index 95% rename from crates/collab/src/db2/signup.rs rename to crates/collab/src/db/signup.rs index 8fab8daa36..9857018a0c 100644 --- a/crates/collab/src/db2/signup.rs +++ b/crates/collab/src/db/signup.rs @@ -27,7 +27,7 @@ pub enum Relation {} impl ActiveModelBehavior for ActiveModel {} -#[derive(Debug, PartialEq, Eq, FromQueryResult)] +#[derive(Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)] pub struct Invite { pub email_address: String, pub email_confirmation_code: String, diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 88488b10d2..b276bd5057 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -6,14 +6,14 @@ macro_rules! test_both_dbs { ($postgres_test_name:ident, $sqlite_test_name:ident, $db:ident, $body:block) => { #[gpui::test] async fn $postgres_test_name() { - let test_db = PostgresTestDb::new(Deterministic::new(0).build_background()); + let test_db = TestDb::postgres(Deterministic::new(0).build_background()); let $db = test_db.db(); $body } #[gpui::test] async fn $sqlite_test_name() { - let test_db = SqliteTestDb::new(Deterministic::new(0).build_background()); + let test_db = TestDb::sqlite(Deterministic::new(0).build_background()); let $db = test_db.db(); $body } @@ -26,9 +26,10 @@ test_both_dbs!( db, { let mut user_ids = Vec::new(); + let mut user_metric_ids = Vec::new(); for i in 1..=4 { - user_ids.push( - db.create_user( + let user = db + .create_user( &format!("user{i}@example.com"), false, NewUserParams { @@ -38,9 +39,9 @@ test_both_dbs!( }, ) .await - .unwrap() - .user_id, - ); + .unwrap(); + user_ids.push(user.user_id); + user_metric_ids.push(user.metrics_id); } assert_eq!( @@ -52,6 +53,7 @@ test_both_dbs!( github_user_id: Some(1), email_address: Some("user1@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[0].parse().unwrap(), ..Default::default() }, User { @@ -60,6 +62,7 @@ test_both_dbs!( github_user_id: Some(2), email_address: Some("user2@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[1].parse().unwrap(), ..Default::default() }, User { @@ -68,6 +71,7 @@ test_both_dbs!( github_user_id: Some(3), email_address: Some("user3@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[2].parse().unwrap(), ..Default::default() }, User { @@ -76,6 +80,7 @@ test_both_dbs!( github_user_id: Some(4), email_address: Some("user4@example.com".to_string()), admin: false, + metrics_id: user_metric_ids[3].parse().unwrap(), ..Default::default() } ] @@ -399,14 +404,14 @@ test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { #[test] fn test_fuzzy_like_string() { - assert_eq!(DefaultDb::fuzzy_like_string("abcd"), "%a%b%c%d%"); - assert_eq!(DefaultDb::fuzzy_like_string("x y"), "%x%y%"); - assert_eq!(DefaultDb::fuzzy_like_string(" z "), "%z%"); + assert_eq!(Database::fuzzy_like_string("abcd"), "%a%b%c%d%"); + assert_eq!(Database::fuzzy_like_string("x y"), "%x%y%"); + assert_eq!(Database::fuzzy_like_string(" z "), "%z%"); } #[gpui::test] async fn test_fuzzy_search_users() { - let test_db = PostgresTestDb::new(build_background_executor()); + let test_db = TestDb::postgres(build_background_executor()); let db = test_db.db(); for (i, github_login) in [ "California", @@ -442,7 +447,7 @@ async fn test_fuzzy_search_users() { &["rhode-island", "colorado", "oregon"], ); - async fn fuzzy_search_user_names(db: &Db, query: &str) -> Vec { + async fn fuzzy_search_user_names(db: &Database, query: &str) -> Vec { db.fuzzy_search_users(query, 10) .await .unwrap() @@ -454,7 +459,7 @@ async fn test_fuzzy_search_users() { #[gpui::test] async fn test_invite_codes() { - let test_db = PostgresTestDb::new(build_background_executor()); + let test_db = TestDb::postgres(build_background_executor()); let db = test_db.db(); let NewUserResult { user_id: user1, .. } = db @@ -659,12 +664,12 @@ async fn test_invite_codes() { #[gpui::test] async fn test_signups() { - let test_db = PostgresTestDb::new(build_background_executor()); + let test_db = TestDb::postgres(build_background_executor()); let db = test_db.db(); // people sign up on the waitlist for i in 0..8 { - db.create_signup(Signup { + db.create_signup(NewSignup { email_address: format!("person-{i}@example.com"), platform_mac: true, platform_linux: i % 2 == 0, diff --git a/crates/collab/src/db2/user.rs b/crates/collab/src/db/user.rs similarity index 93% rename from crates/collab/src/db2/user.rs rename to crates/collab/src/db/user.rs index f6bac9dc77..b6e096f667 100644 --- a/crates/collab/src/db2/user.rs +++ b/crates/collab/src/db/user.rs @@ -1,7 +1,8 @@ use super::UserId; use sea_orm::entity::prelude::*; +use serde::Serialize; -#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)] +#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel, Serialize)] #[sea_orm(table_name = "users")] pub struct Model { #[sea_orm(primary_key)] @@ -12,6 +13,7 @@ pub struct Model { pub admin: bool, pub invite_code: Option, pub invite_count: i32, + pub inviter_id: Option, pub connected_once: bool, pub metrics_id: Uuid, } diff --git a/crates/collab/src/db2/worktree.rs b/crates/collab/src/db/worktree.rs similarity index 100% rename from crates/collab/src/db2/worktree.rs rename to crates/collab/src/db/worktree.rs diff --git a/crates/collab/src/db2.rs b/crates/collab/src/db2.rs deleted file mode 100644 index 3aa21c6059..0000000000 --- a/crates/collab/src/db2.rs +++ /dev/null @@ -1,1416 +0,0 @@ -mod access_token; -mod contact; -mod project; -mod project_collaborator; -mod room; -mod room_participant; -mod signup; -#[cfg(test)] -mod tests; -mod user; -mod worktree; - -use crate::{Error, Result}; -use anyhow::anyhow; -use collections::HashMap; -use dashmap::DashMap; -use futures::StreamExt; -use hyper::StatusCode; -use rpc::{proto, ConnectionId}; -use sea_orm::{ - entity::prelude::*, ConnectOptions, DatabaseConnection, DatabaseTransaction, DbErr, - TransactionTrait, -}; -use sea_orm::{ - ActiveValue, ConnectionTrait, DatabaseBackend, FromQueryResult, IntoActiveModel, JoinType, - QueryOrder, QuerySelect, Statement, -}; -use sea_query::{Alias, Expr, OnConflict, Query}; -use serde::{Deserialize, Serialize}; -use sqlx::migrate::{Migrate, Migration, MigrationSource}; -use sqlx::Connection; -use std::ops::{Deref, DerefMut}; -use std::path::Path; -use std::time::Duration; -use std::{future::Future, marker::PhantomData, rc::Rc, sync::Arc}; -use tokio::sync::{Mutex, OwnedMutexGuard}; - -pub use contact::Contact; -pub use signup::{Invite, NewSignup, WaitlistSummary}; -pub use user::Model as User; - -pub struct Database { - options: ConnectOptions, - pool: DatabaseConnection, - rooms: DashMap>>, - #[cfg(test)] - background: Option>, - #[cfg(test)] - runtime: Option, -} - -impl Database { - pub async fn new(options: ConnectOptions) -> Result { - Ok(Self { - options: options.clone(), - pool: sea_orm::Database::connect(options).await?, - rooms: DashMap::with_capacity(16384), - #[cfg(test)] - background: None, - #[cfg(test)] - runtime: None, - }) - } - - pub async fn migrate( - &self, - migrations_path: &Path, - ignore_checksum_mismatch: bool, - ) -> anyhow::Result> { - let migrations = MigrationSource::resolve(migrations_path) - .await - .map_err(|err| anyhow!("failed to load migrations: {err:?}"))?; - - let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?; - - connection.ensure_migrations_table().await?; - let applied_migrations: HashMap<_, _> = connection - .list_applied_migrations() - .await? - .into_iter() - .map(|m| (m.version, m)) - .collect(); - - let mut new_migrations = Vec::new(); - for migration in migrations { - match applied_migrations.get(&migration.version) { - Some(applied_migration) => { - if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch - { - Err(anyhow!( - "checksum mismatch for applied migration {}", - migration.description - ))?; - } - } - None => { - let elapsed = connection.apply(&migration).await?; - new_migrations.push((migration, elapsed)); - } - } - } - - Ok(new_migrations) - } - - // users - - pub async fn create_user( - &self, - email_address: &str, - admin: bool, - params: NewUserParams, - ) -> Result { - self.transact(|tx| async { - let user = user::Entity::insert(user::ActiveModel { - email_address: ActiveValue::set(Some(email_address.into())), - github_login: ActiveValue::set(params.github_login.clone()), - github_user_id: ActiveValue::set(Some(params.github_user_id)), - admin: ActiveValue::set(admin), - metrics_id: ActiveValue::set(Uuid::new_v4()), - ..Default::default() - }) - .on_conflict( - OnConflict::column(user::Column::GithubLogin) - .update_column(user::Column::GithubLogin) - .to_owned(), - ) - .exec_with_returning(&tx) - .await?; - - tx.commit().await?; - - Ok(NewUserResult { - user_id: user.id, - metrics_id: user.metrics_id.to_string(), - signup_device_id: None, - inviting_user_id: None, - }) - }) - .await - } - - pub async fn get_user_by_id(&self, id: UserId) -> Result> { - self.transact(|tx| async move { Ok(user::Entity::find_by_id(id).one(&tx).await?) }) - .await - } - - pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - self.transact(|tx| async { - let tx = tx; - Ok(user::Entity::find() - .filter(user::Column::Id.is_in(ids.iter().copied())) - .all(&tx) - .await?) - }) - .await - } - - pub async fn get_user_by_github_account( - &self, - github_login: &str, - github_user_id: Option, - ) -> Result> { - self.transact(|tx| async { - let tx = tx; - if let Some(github_user_id) = github_user_id { - if let Some(user_by_github_user_id) = user::Entity::find() - .filter(user::Column::GithubUserId.eq(github_user_id)) - .one(&tx) - .await? - { - let mut user_by_github_user_id = user_by_github_user_id.into_active_model(); - user_by_github_user_id.github_login = ActiveValue::set(github_login.into()); - Ok(Some(user_by_github_user_id.update(&tx).await?)) - } else if let Some(user_by_github_login) = user::Entity::find() - .filter(user::Column::GithubLogin.eq(github_login)) - .one(&tx) - .await? - { - let mut user_by_github_login = user_by_github_login.into_active_model(); - user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id)); - Ok(Some(user_by_github_login.update(&tx).await?)) - } else { - Ok(None) - } - } else { - Ok(user::Entity::find() - .filter(user::Column::GithubLogin.eq(github_login)) - .one(&tx) - .await?) - } - }) - .await - } - - pub async fn get_user_metrics_id(&self, id: UserId) -> Result { - #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] - enum QueryAs { - MetricsId, - } - - self.transact(|tx| async move { - let metrics_id: Uuid = user::Entity::find_by_id(id) - .select_only() - .column(user::Column::MetricsId) - .into_values::<_, QueryAs>() - .one(&tx) - .await? - .ok_or_else(|| anyhow!("could not find user"))?; - Ok(metrics_id.to_string()) - }) - .await - } - - // contacts - - pub async fn get_contacts(&self, user_id: UserId) -> Result> { - #[derive(Debug, FromQueryResult)] - struct ContactWithUserBusyStatuses { - user_id_a: UserId, - user_id_b: UserId, - a_to_b: bool, - accepted: bool, - should_notify: bool, - user_a_busy: bool, - user_b_busy: bool, - } - - self.transact(|tx| async move { - let user_a_participant = Alias::new("user_a_participant"); - let user_b_participant = Alias::new("user_b_participant"); - let mut db_contacts = contact::Entity::find() - .column_as( - Expr::tbl(user_a_participant.clone(), room_participant::Column::Id) - .is_not_null(), - "user_a_busy", - ) - .column_as( - Expr::tbl(user_b_participant.clone(), room_participant::Column::Id) - .is_not_null(), - "user_b_busy", - ) - .filter( - contact::Column::UserIdA - .eq(user_id) - .or(contact::Column::UserIdB.eq(user_id)), - ) - .join_as( - JoinType::LeftJoin, - contact::Relation::UserARoomParticipant.def(), - user_a_participant, - ) - .join_as( - JoinType::LeftJoin, - contact::Relation::UserBRoomParticipant.def(), - user_b_participant, - ) - .into_model::() - .stream(&tx) - .await?; - - let mut contacts = Vec::new(); - while let Some(db_contact) = db_contacts.next().await { - let db_contact = db_contact?; - if db_contact.user_id_a == user_id { - if db_contact.accepted { - contacts.push(Contact::Accepted { - user_id: db_contact.user_id_b, - should_notify: db_contact.should_notify && db_contact.a_to_b, - busy: db_contact.user_b_busy, - }); - } else if db_contact.a_to_b { - contacts.push(Contact::Outgoing { - user_id: db_contact.user_id_b, - }) - } else { - contacts.push(Contact::Incoming { - user_id: db_contact.user_id_b, - should_notify: db_contact.should_notify, - }); - } - } else if db_contact.accepted { - contacts.push(Contact::Accepted { - user_id: db_contact.user_id_a, - should_notify: db_contact.should_notify && !db_contact.a_to_b, - busy: db_contact.user_a_busy, - }); - } else if db_contact.a_to_b { - contacts.push(Contact::Incoming { - user_id: db_contact.user_id_a, - should_notify: db_contact.should_notify, - }); - } else { - contacts.push(Contact::Outgoing { - user_id: db_contact.user_id_a, - }); - } - } - - contacts.sort_unstable_by_key(|contact| contact.user_id()); - - Ok(contacts) - }) - .await - } - - pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { - self.transact(|tx| async move { - let (id_a, id_b) = if user_id_1 < user_id_2 { - (user_id_1, user_id_2) - } else { - (user_id_2, user_id_1) - }; - - Ok(contact::Entity::find() - .filter( - contact::Column::UserIdA - .eq(id_a) - .and(contact::Column::UserIdB.eq(id_b)) - .and(contact::Column::Accepted.eq(true)), - ) - .one(&tx) - .await? - .is_some()) - }) - .await - } - - pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { - self.transact(|tx| async move { - let (id_a, id_b, a_to_b) = if sender_id < receiver_id { - (sender_id, receiver_id, true) - } else { - (receiver_id, sender_id, false) - }; - - let rows_affected = contact::Entity::insert(contact::ActiveModel { - user_id_a: ActiveValue::set(id_a), - user_id_b: ActiveValue::set(id_b), - a_to_b: ActiveValue::set(a_to_b), - accepted: ActiveValue::set(false), - should_notify: ActiveValue::set(true), - ..Default::default() - }) - .on_conflict( - OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB]) - .values([ - (contact::Column::Accepted, true.into()), - (contact::Column::ShouldNotify, false.into()), - ]) - .action_and_where( - contact::Column::Accepted.eq(false).and( - contact::Column::AToB - .eq(a_to_b) - .and(contact::Column::UserIdA.eq(id_b)) - .or(contact::Column::AToB - .ne(a_to_b) - .and(contact::Column::UserIdA.eq(id_a))), - ), - ) - .to_owned(), - ) - .exec_without_returning(&tx) - .await?; - - if rows_affected == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("contact already requested"))? - } - }) - .await - } - - pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { - self.transact(|tx| async move { - let (id_a, id_b) = if responder_id < requester_id { - (responder_id, requester_id) - } else { - (requester_id, responder_id) - }; - - let result = contact::Entity::delete_many() - .filter( - contact::Column::UserIdA - .eq(id_a) - .and(contact::Column::UserIdB.eq(id_b)), - ) - .exec(&tx) - .await?; - - if result.rows_affected == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("no such contact"))? - } - }) - .await - } - - pub async fn dismiss_contact_notification( - &self, - user_id: UserId, - contact_user_id: UserId, - ) -> Result<()> { - self.transact(|tx| async move { - let (id_a, id_b, a_to_b) = if user_id < contact_user_id { - (user_id, contact_user_id, true) - } else { - (contact_user_id, user_id, false) - }; - - let result = contact::Entity::update_many() - .set(contact::ActiveModel { - should_notify: ActiveValue::set(false), - ..Default::default() - }) - .filter( - contact::Column::UserIdA - .eq(id_a) - .and(contact::Column::UserIdB.eq(id_b)) - .and( - contact::Column::AToB - .eq(a_to_b) - .and(contact::Column::Accepted.eq(true)) - .or(contact::Column::AToB - .ne(a_to_b) - .and(contact::Column::Accepted.eq(false))), - ), - ) - .exec(&tx) - .await?; - if result.rows_affected == 0 { - Err(anyhow!("no such contact request"))? - } else { - tx.commit().await?; - Ok(()) - } - }) - .await - } - - pub async fn respond_to_contact_request( - &self, - responder_id: UserId, - requester_id: UserId, - accept: bool, - ) -> Result<()> { - self.transact(|tx| async move { - let (id_a, id_b, a_to_b) = if responder_id < requester_id { - (responder_id, requester_id, false) - } else { - (requester_id, responder_id, true) - }; - let rows_affected = if accept { - let result = contact::Entity::update_many() - .set(contact::ActiveModel { - accepted: ActiveValue::set(true), - should_notify: ActiveValue::set(true), - ..Default::default() - }) - .filter( - contact::Column::UserIdA - .eq(id_a) - .and(contact::Column::UserIdB.eq(id_b)) - .and(contact::Column::AToB.eq(a_to_b)), - ) - .exec(&tx) - .await?; - result.rows_affected - } else { - let result = contact::Entity::delete_many() - .filter( - contact::Column::UserIdA - .eq(id_a) - .and(contact::Column::UserIdB.eq(id_b)) - .and(contact::Column::AToB.eq(a_to_b)) - .and(contact::Column::Accepted.eq(false)), - ) - .exec(&tx) - .await?; - - result.rows_affected - }; - - if rows_affected == 1 { - tx.commit().await?; - Ok(()) - } else { - Err(anyhow!("no such contact request"))? - } - }) - .await - } - - pub fn fuzzy_like_string(string: &str) -> String { - let mut result = String::with_capacity(string.len() * 2 + 1); - for c in string.chars() { - if c.is_alphanumeric() { - result.push('%'); - result.push(c); - } - } - result.push('%'); - result - } - - pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { - self.transact(|tx| async { - let tx = tx; - let like_string = Self::fuzzy_like_string(name_query); - let query = " - SELECT users.* - FROM users - WHERE github_login ILIKE $1 - ORDER BY github_login <-> $2 - LIMIT $3 - "; - - Ok(user::Entity::find() - .from_raw_sql(Statement::from_sql_and_values( - self.pool.get_database_backend(), - query.into(), - vec![like_string.into(), name_query.into(), limit.into()], - )) - .all(&tx) - .await?) - }) - .await - } - - // signups - - pub async fn create_signup(&self, signup: NewSignup) -> Result<()> { - self.transact(|tx| async { - signup::ActiveModel { - email_address: ActiveValue::set(signup.email_address.clone()), - email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), - email_confirmation_sent: ActiveValue::set(false), - platform_mac: ActiveValue::set(signup.platform_mac), - platform_windows: ActiveValue::set(signup.platform_windows), - platform_linux: ActiveValue::set(signup.platform_linux), - platform_unknown: ActiveValue::set(false), - editor_features: ActiveValue::set(Some(signup.editor_features.clone())), - programming_languages: ActiveValue::set(Some(signup.programming_languages.clone())), - device_id: ActiveValue::set(signup.device_id.clone()), - ..Default::default() - } - .insert(&tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn get_waitlist_summary(&self) -> Result { - self.transact(|tx| async move { - let query = " - SELECT - COUNT(*) as count, - COALESCE(SUM(CASE WHEN platform_linux THEN 1 ELSE 0 END), 0) as linux_count, - COALESCE(SUM(CASE WHEN platform_mac THEN 1 ELSE 0 END), 0) as mac_count, - COALESCE(SUM(CASE WHEN platform_windows THEN 1 ELSE 0 END), 0) as windows_count, - COALESCE(SUM(CASE WHEN platform_unknown THEN 1 ELSE 0 END), 0) as unknown_count - FROM ( - SELECT * - FROM signups - WHERE - NOT email_confirmation_sent - ) AS unsent - "; - Ok( - WaitlistSummary::find_by_statement(Statement::from_sql_and_values( - self.pool.get_database_backend(), - query.into(), - vec![], - )) - .one(&tx) - .await? - .ok_or_else(|| anyhow!("invalid result"))?, - ) - }) - .await - } - - pub async fn record_sent_invites(&self, invites: &[Invite]) -> Result<()> { - let emails = invites - .iter() - .map(|s| s.email_address.as_str()) - .collect::>(); - self.transact(|tx| async { - signup::Entity::update_many() - .filter(signup::Column::EmailAddress.is_in(emails.iter().copied())) - .col_expr(signup::Column::EmailConfirmationSent, true.into()) - .exec(&tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn get_unsent_invites(&self, count: usize) -> Result> { - self.transact(|tx| async move { - Ok(signup::Entity::find() - .select_only() - .column(signup::Column::EmailAddress) - .column(signup::Column::EmailConfirmationCode) - .filter( - signup::Column::EmailConfirmationSent.eq(false).and( - signup::Column::PlatformMac - .eq(true) - .or(signup::Column::PlatformUnknown.eq(true)), - ), - ) - .limit(count as u64) - .into_model() - .all(&tx) - .await?) - }) - .await - } - - // invite codes - - pub async fn create_invite_from_code( - &self, - code: &str, - email_address: &str, - device_id: Option<&str>, - ) -> Result { - self.transact(|tx| async move { - let existing_user = user::Entity::find() - .filter(user::Column::EmailAddress.eq(email_address)) - .one(&tx) - .await?; - - if existing_user.is_some() { - Err(anyhow!("email address is already in use"))?; - } - - let inviter = match user::Entity::find() - .filter(user::Column::InviteCode.eq(code)) - .one(&tx) - .await? - { - Some(inviter) => inviter, - None => { - return Err(Error::Http( - StatusCode::NOT_FOUND, - "invite code not found".to_string(), - ))? - } - }; - - if inviter.invite_count == 0 { - Err(Error::Http( - StatusCode::UNAUTHORIZED, - "no invites remaining".to_string(), - ))?; - } - - let signup = signup::Entity::insert(signup::ActiveModel { - email_address: ActiveValue::set(email_address.into()), - email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), - email_confirmation_sent: ActiveValue::set(false), - inviting_user_id: ActiveValue::set(Some(inviter.id)), - platform_linux: ActiveValue::set(false), - platform_mac: ActiveValue::set(false), - platform_windows: ActiveValue::set(false), - platform_unknown: ActiveValue::set(true), - device_id: ActiveValue::set(device_id.map(|device_id| device_id.into())), - ..Default::default() - }) - .on_conflict( - OnConflict::column(signup::Column::EmailAddress) - .update_column(signup::Column::InvitingUserId) - .to_owned(), - ) - .exec_with_returning(&tx) - .await?; - tx.commit().await?; - - Ok(Invite { - email_address: signup.email_address, - email_confirmation_code: signup.email_confirmation_code, - }) - }) - .await - } - - pub async fn create_user_from_invite( - &self, - invite: &Invite, - user: NewUserParams, - ) -> Result> { - self.transact(|tx| async { - let tx = tx; - let signup = signup::Entity::find() - .filter( - signup::Column::EmailAddress - .eq(invite.email_address.as_str()) - .and( - signup::Column::EmailConfirmationCode - .eq(invite.email_confirmation_code.as_str()), - ), - ) - .one(&tx) - .await? - .ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "no such invite".to_string()))?; - - if signup.user_id.is_some() { - return Ok(None); - } - - let user = user::Entity::insert(user::ActiveModel { - email_address: ActiveValue::set(Some(invite.email_address.clone())), - github_login: ActiveValue::set(user.github_login.clone()), - github_user_id: ActiveValue::set(Some(user.github_user_id)), - admin: ActiveValue::set(false), - invite_count: ActiveValue::set(user.invite_count), - invite_code: ActiveValue::set(Some(random_invite_code())), - metrics_id: ActiveValue::set(Uuid::new_v4()), - ..Default::default() - }) - .on_conflict( - OnConflict::column(user::Column::GithubLogin) - .update_columns([ - user::Column::EmailAddress, - user::Column::GithubUserId, - user::Column::Admin, - ]) - .to_owned(), - ) - .exec_with_returning(&tx) - .await?; - - let mut signup = signup.into_active_model(); - signup.user_id = ActiveValue::set(Some(user.id)); - let signup = signup.update(&tx).await?; - - if let Some(inviting_user_id) = signup.inviting_user_id { - let result = user::Entity::update_many() - .filter( - user::Column::Id - .eq(inviting_user_id) - .and(user::Column::InviteCount.gt(0)), - ) - .col_expr( - user::Column::InviteCount, - Expr::col(user::Column::InviteCount).sub(1), - ) - .exec(&tx) - .await?; - - if result.rows_affected == 0 { - Err(Error::Http( - StatusCode::UNAUTHORIZED, - "no invites remaining".to_string(), - ))?; - } - - contact::Entity::insert(contact::ActiveModel { - user_id_a: ActiveValue::set(inviting_user_id), - user_id_b: ActiveValue::set(user.id), - a_to_b: ActiveValue::set(true), - should_notify: ActiveValue::set(true), - accepted: ActiveValue::set(true), - ..Default::default() - }) - .on_conflict(OnConflict::new().do_nothing().to_owned()) - .exec_without_returning(&tx) - .await?; - } - - tx.commit().await?; - Ok(Some(NewUserResult { - user_id: user.id, - metrics_id: user.metrics_id.to_string(), - inviting_user_id: signup.inviting_user_id, - signup_device_id: signup.device_id, - })) - }) - .await - } - - pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { - self.transact(|tx| async move { - if count > 0 { - user::Entity::update_many() - .filter( - user::Column::Id - .eq(id) - .and(user::Column::InviteCode.is_null()), - ) - .col_expr(user::Column::InviteCode, random_invite_code().into()) - .exec(&tx) - .await?; - } - - user::Entity::update_many() - .filter(user::Column::Id.eq(id)) - .col_expr(user::Column::InviteCount, count.into()) - .exec(&tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { - self.transact(|tx| async move { - match user::Entity::find_by_id(id).one(&tx).await? { - Some(user) if user.invite_code.is_some() => { - Ok(Some((user.invite_code.unwrap(), user.invite_count as u32))) - } - _ => Ok(None), - } - }) - .await - } - - pub async fn get_user_for_invite_code(&self, code: &str) -> Result { - self.transact(|tx| async move { - user::Entity::find() - .filter(user::Column::InviteCode.eq(code)) - .one(&tx) - .await? - .ok_or_else(|| { - Error::Http( - StatusCode::NOT_FOUND, - "that invite code does not exist".to_string(), - ) - }) - }) - .await - } - - // projects - - pub async fn share_project( - &self, - room_id: RoomId, - connection_id: ConnectionId, - worktrees: &[proto::WorktreeMetadata], - ) -> Result> { - self.transact(|tx| async move { - let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) - .one(&tx) - .await? - .ok_or_else(|| anyhow!("could not find participant"))?; - if participant.room_id != room_id { - return Err(anyhow!("shared project on unexpected room"))?; - } - - let project = project::ActiveModel { - room_id: ActiveValue::set(participant.room_id), - host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection_id.0 as i32), - ..Default::default() - } - .insert(&tx) - .await?; - - worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i32), - project_id: ActiveValue::set(project.id), - abs_path: ActiveValue::set(worktree.abs_path.clone()), - root_name: ActiveValue::set(worktree.root_name.clone()), - visible: ActiveValue::set(worktree.visible), - scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), - })) - .exec(&tx) - .await?; - - project_collaborator::ActiveModel { - project_id: ActiveValue::set(project.id), - connection_id: ActiveValue::set(connection_id.0 as i32), - user_id: ActiveValue::set(participant.user_id), - replica_id: ActiveValue::set(0), - is_host: ActiveValue::set(true), - ..Default::default() - } - .insert(&tx) - .await?; - - let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, (project.id, room)) - .await - }) - .await - } - - async fn get_room(&self, room_id: RoomId, tx: &DatabaseTransaction) -> Result { - let db_room = room::Entity::find_by_id(room_id) - .one(tx) - .await? - .ok_or_else(|| anyhow!("could not find room"))?; - - let mut db_participants = db_room - .find_related(room_participant::Entity) - .stream(tx) - .await?; - let mut participants = HashMap::default(); - let mut pending_participants = Vec::new(); - while let Some(db_participant) = db_participants.next().await { - let db_participant = db_participant?; - if let Some(answering_connection_id) = db_participant.answering_connection_id { - let location = match ( - db_participant.location_kind, - db_participant.location_project_id, - ) { - (Some(0), Some(project_id)) => { - Some(proto::participant_location::Variant::SharedProject( - proto::participant_location::SharedProject { - id: project_id.to_proto(), - }, - )) - } - (Some(1), _) => Some(proto::participant_location::Variant::UnsharedProject( - Default::default(), - )), - _ => Some(proto::participant_location::Variant::External( - Default::default(), - )), - }; - participants.insert( - answering_connection_id, - proto::Participant { - user_id: db_participant.user_id.to_proto(), - peer_id: answering_connection_id as u32, - projects: Default::default(), - location: Some(proto::ParticipantLocation { variant: location }), - }, - ); - } else { - pending_participants.push(proto::PendingParticipant { - user_id: db_participant.user_id.to_proto(), - calling_user_id: db_participant.calling_user_id.to_proto(), - initial_project_id: db_participant.initial_project_id.map(|id| id.to_proto()), - }); - } - } - - let mut db_projects = db_room - .find_related(project::Entity) - .find_with_related(worktree::Entity) - .stream(tx) - .await?; - - while let Some(row) = db_projects.next().await { - let (db_project, db_worktree) = row?; - if let Some(participant) = participants.get_mut(&db_project.host_connection_id) { - let project = if let Some(project) = participant - .projects - .iter_mut() - .find(|project| project.id == db_project.id.to_proto()) - { - project - } else { - participant.projects.push(proto::ParticipantProject { - id: db_project.id.to_proto(), - worktree_root_names: Default::default(), - }); - participant.projects.last_mut().unwrap() - }; - - if let Some(db_worktree) = db_worktree { - project.worktree_root_names.push(db_worktree.root_name); - } - } - } - - Ok(proto::Room { - id: db_room.id.to_proto(), - live_kit_room: db_room.live_kit_room, - participants: participants.into_values().collect(), - pending_participants, - }) - } - - async fn commit_room_transaction( - &self, - room_id: RoomId, - tx: DatabaseTransaction, - data: T, - ) -> Result> { - let lock = self.rooms.entry(room_id).or_default().clone(); - let _guard = lock.lock_owned().await; - tx.commit().await?; - Ok(RoomGuard { - data, - _guard, - _not_send: PhantomData, - }) - } - - pub async fn create_access_token_hash( - &self, - user_id: UserId, - access_token_hash: &str, - max_access_token_count: usize, - ) -> Result<()> { - self.transact(|tx| async { - let tx = tx; - - access_token::ActiveModel { - user_id: ActiveValue::set(user_id), - hash: ActiveValue::set(access_token_hash.into()), - ..Default::default() - } - .insert(&tx) - .await?; - - access_token::Entity::delete_many() - .filter( - access_token::Column::Id.in_subquery( - Query::select() - .column(access_token::Column::Id) - .from(access_token::Entity) - .and_where(access_token::Column::UserId.eq(user_id)) - .order_by(access_token::Column::Id, sea_orm::Order::Desc) - .limit(10000) - .offset(max_access_token_count as u64) - .to_owned(), - ), - ) - .exec(&tx) - .await?; - tx.commit().await?; - Ok(()) - }) - .await - } - - pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result> { - #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] - enum QueryAs { - Hash, - } - - self.transact(|tx| async move { - Ok(access_token::Entity::find() - .select_only() - .column(access_token::Column::Hash) - .filter(access_token::Column::UserId.eq(user_id)) - .order_by_desc(access_token::Column::Id) - .into_values::<_, QueryAs>() - .all(&tx) - .await?) - }) - .await - } - - async fn transact(&self, f: F) -> Result - where - F: Send + Fn(DatabaseTransaction) -> Fut, - Fut: Send + Future>, - { - let body = async { - loop { - let tx = self.pool.begin().await?; - - // In Postgres, serializable transactions are opt-in - if let DatabaseBackend::Postgres = self.pool.get_database_backend() { - tx.execute(Statement::from_string( - DatabaseBackend::Postgres, - "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), - )) - .await?; - } - - match f(tx).await { - Ok(result) => return Ok(result), - Err(error) => match error { - Error::Database2( - DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) - | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), - ) if error - .as_database_error() - .and_then(|error| error.code()) - .as_deref() - == Some("40001") => - { - // Retry (don't break the loop) - } - error @ _ => return Err(error), - }, - } - } - }; - - #[cfg(test)] - { - if let Some(background) = self.background.as_ref() { - background.simulate_random_delay().await; - } - - self.runtime.as_ref().unwrap().block_on(body) - } - - #[cfg(not(test))] - { - body.await - } - } -} - -pub struct RoomGuard { - data: T, - _guard: OwnedMutexGuard<()>, - _not_send: PhantomData>, -} - -impl Deref for RoomGuard { - type Target = T; - - fn deref(&self) -> &T { - &self.data - } -} - -impl DerefMut for RoomGuard { - fn deref_mut(&mut self) -> &mut T { - &mut self.data - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct NewUserParams { - pub github_login: String, - pub github_user_id: i32, - pub invite_count: i32, -} - -#[derive(Debug)] -pub struct NewUserResult { - pub user_id: UserId, - pub metrics_id: String, - pub inviting_user_id: Option, - pub signup_device_id: Option, -} - -fn random_invite_code() -> String { - nanoid::nanoid!(16) -} - -fn random_email_confirmation_code() -> String { - nanoid::nanoid!(64) -} - -macro_rules! id_type { - ($name:ident) => { - #[derive( - Clone, - Copy, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - sqlx::Type, - Serialize, - Deserialize, - )] - #[sqlx(transparent)] - #[serde(transparent)] - pub struct $name(pub i32); - - impl $name { - #[allow(unused)] - pub const MAX: Self = Self(i32::MAX); - - #[allow(unused)] - pub fn from_proto(value: u64) -> Self { - Self(value as i32) - } - - #[allow(unused)] - pub fn to_proto(self) -> u64 { - self.0 as u64 - } - } - - impl std::fmt::Display for $name { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.fmt(f) - } - } - - impl From<$name> for sea_query::Value { - fn from(value: $name) -> Self { - sea_query::Value::Int(Some(value.0)) - } - } - - impl sea_orm::TryGetable for $name { - fn try_get( - res: &sea_orm::QueryResult, - pre: &str, - col: &str, - ) -> Result { - Ok(Self(i32::try_get(res, pre, col)?)) - } - } - - impl sea_query::ValueType for $name { - fn try_from(v: Value) -> Result { - match v { - Value::TinyInt(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::SmallInt(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::Int(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::BigInt(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::TinyUnsigned(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::SmallUnsigned(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::Unsigned(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - Value::BigUnsigned(Some(int)) => { - Ok(Self(int.try_into().map_err(|_| sea_query::ValueTypeErr)?)) - } - _ => Err(sea_query::ValueTypeErr), - } - } - - fn type_name() -> String { - stringify!($name).into() - } - - fn array_type() -> sea_query::ArrayType { - sea_query::ArrayType::Int - } - - fn column_type() -> sea_query::ColumnType { - sea_query::ColumnType::Integer(None) - } - } - - impl sea_orm::TryFromU64 for $name { - fn try_from_u64(n: u64) -> Result { - Ok(Self(n.try_into().map_err(|_| { - DbErr::ConvertFromU64(concat!( - "error converting ", - stringify!($name), - " to u64" - )) - })?)) - } - } - - impl sea_query::Nullable for $name { - fn null() -> Value { - Value::Int(None) - } - } - }; -} - -id_type!(AccessTokenId); -id_type!(ContactId); -id_type!(UserId); -id_type!(RoomId); -id_type!(RoomParticipantId); -id_type!(ProjectId); -id_type!(ProjectCollaboratorId); -id_type!(SignupId); -id_type!(WorktreeId); - -#[cfg(test)] -pub use test::*; - -#[cfg(test)] -mod test { - use super::*; - use gpui::executor::Background; - use lazy_static::lazy_static; - use parking_lot::Mutex; - use rand::prelude::*; - use sea_orm::ConnectionTrait; - use sqlx::migrate::MigrateDatabase; - use std::sync::Arc; - - pub struct TestDb { - pub db: Option>, - pub connection: Option, - } - - impl TestDb { - pub fn sqlite(background: Arc) -> Self { - let url = format!("sqlite::memory:"); - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - - let mut db = runtime.block_on(async { - let mut options = ConnectOptions::new(url); - options.max_connections(5); - let db = Database::new(options).await.unwrap(); - let sql = include_str!(concat!( - env!("CARGO_MANIFEST_DIR"), - "/migrations.sqlite/20221109000000_test_schema.sql" - )); - db.pool - .execute(sea_orm::Statement::from_string( - db.pool.get_database_backend(), - sql.into(), - )) - .await - .unwrap(); - db - }); - - db.background = Some(background); - db.runtime = Some(runtime); - - Self { - db: Some(Arc::new(db)), - connection: None, - } - } - - pub fn postgres(background: Arc) -> Self { - lazy_static! { - static ref LOCK: Mutex<()> = Mutex::new(()); - } - - let _guard = LOCK.lock(); - let mut rng = StdRng::from_entropy(); - let url = format!( - "postgres://postgres@localhost/zed-test-{}", - rng.gen::() - ); - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - - let mut db = runtime.block_on(async { - sqlx::Postgres::create_database(&url) - .await - .expect("failed to create test db"); - let mut options = ConnectOptions::new(url); - options - .max_connections(5) - .idle_timeout(Duration::from_secs(0)); - let db = Database::new(options).await.unwrap(); - let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); - db.migrate(Path::new(migrations_path), false).await.unwrap(); - db - }); - - db.background = Some(background); - db.runtime = Some(runtime); - - Self { - db: Some(Arc::new(db)), - connection: None, - } - } - - pub fn db(&self) -> &Arc { - self.db.as_ref().unwrap() - } - } - - impl Drop for TestDb { - fn drop(&mut self) { - let db = self.db.take().unwrap(); - if let DatabaseBackend::Postgres = db.pool.get_database_backend() { - db.runtime.as_ref().unwrap().block_on(async { - use util::ResultExt; - let query = " - SELECT pg_terminate_backend(pg_stat_activity.pid) - FROM pg_stat_activity - WHERE - pg_stat_activity.datname = current_database() AND - pid <> pg_backend_pid(); - "; - db.pool - .execute(sea_orm::Statement::from_string( - db.pool.get_database_backend(), - query.into(), - )) - .await - .log_err(); - sqlx::Postgres::drop_database(db.options.get_url()) - .await - .log_err(); - }) - } - } - } -} diff --git a/crates/collab/src/db2/tests.rs b/crates/collab/src/db2/tests.rs deleted file mode 100644 index b276bd5057..0000000000 --- a/crates/collab/src/db2/tests.rs +++ /dev/null @@ -1,813 +0,0 @@ -use super::*; -use gpui::executor::{Background, Deterministic}; -use std::sync::Arc; - -macro_rules! test_both_dbs { - ($postgres_test_name:ident, $sqlite_test_name:ident, $db:ident, $body:block) => { - #[gpui::test] - async fn $postgres_test_name() { - let test_db = TestDb::postgres(Deterministic::new(0).build_background()); - let $db = test_db.db(); - $body - } - - #[gpui::test] - async fn $sqlite_test_name() { - let test_db = TestDb::sqlite(Deterministic::new(0).build_background()); - let $db = test_db.db(); - $body - } - }; -} - -test_both_dbs!( - test_get_users_by_ids_postgres, - test_get_users_by_ids_sqlite, - db, - { - let mut user_ids = Vec::new(); - let mut user_metric_ids = Vec::new(); - for i in 1..=4 { - let user = db - .create_user( - &format!("user{i}@example.com"), - false, - NewUserParams { - github_login: format!("user{i}"), - github_user_id: i, - invite_count: 0, - }, - ) - .await - .unwrap(); - user_ids.push(user.user_id); - user_metric_ids.push(user.metrics_id); - } - - assert_eq!( - db.get_users_by_ids(user_ids.clone()).await.unwrap(), - vec![ - User { - id: user_ids[0], - github_login: "user1".to_string(), - github_user_id: Some(1), - email_address: Some("user1@example.com".to_string()), - admin: false, - metrics_id: user_metric_ids[0].parse().unwrap(), - ..Default::default() - }, - User { - id: user_ids[1], - github_login: "user2".to_string(), - github_user_id: Some(2), - email_address: Some("user2@example.com".to_string()), - admin: false, - metrics_id: user_metric_ids[1].parse().unwrap(), - ..Default::default() - }, - User { - id: user_ids[2], - github_login: "user3".to_string(), - github_user_id: Some(3), - email_address: Some("user3@example.com".to_string()), - admin: false, - metrics_id: user_metric_ids[2].parse().unwrap(), - ..Default::default() - }, - User { - id: user_ids[3], - github_login: "user4".to_string(), - github_user_id: Some(4), - email_address: Some("user4@example.com".to_string()), - admin: false, - metrics_id: user_metric_ids[3].parse().unwrap(), - ..Default::default() - } - ] - ); - } -); - -test_both_dbs!( - test_get_user_by_github_account_postgres, - test_get_user_by_github_account_sqlite, - db, - { - let user_id1 = db - .create_user( - "user1@example.com", - false, - NewUserParams { - github_login: "login1".into(), - github_user_id: 101, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; - let user_id2 = db - .create_user( - "user2@example.com", - false, - NewUserParams { - github_login: "login2".into(), - github_user_id: 102, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; - - let user = db - .get_user_by_github_account("login1", None) - .await - .unwrap() - .unwrap(); - assert_eq!(user.id, user_id1); - assert_eq!(&user.github_login, "login1"); - assert_eq!(user.github_user_id, Some(101)); - - assert!(db - .get_user_by_github_account("non-existent-login", None) - .await - .unwrap() - .is_none()); - - let user = db - .get_user_by_github_account("the-new-login2", Some(102)) - .await - .unwrap() - .unwrap(); - assert_eq!(user.id, user_id2); - assert_eq!(&user.github_login, "the-new-login2"); - assert_eq!(user.github_user_id, Some(102)); - } -); - -test_both_dbs!( - test_create_access_tokens_postgres, - test_create_access_tokens_sqlite, - db, - { - let user = db - .create_user( - "u1@example.com", - false, - NewUserParams { - github_login: "u1".into(), - github_user_id: 1, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id; - - db.create_access_token_hash(user, "h1", 3).await.unwrap(); - db.create_access_token_hash(user, "h2", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h2".to_string(), "h1".to_string()] - ); - - db.create_access_token_hash(user, "h3", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h3".to_string(), "h2".to_string(), "h1".to_string(),] - ); - - db.create_access_token_hash(user, "h4", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h4".to_string(), "h3".to_string(), "h2".to_string(),] - ); - - db.create_access_token_hash(user, "h5", 3).await.unwrap(); - assert_eq!( - db.get_access_token_hashes(user).await.unwrap(), - &["h5".to_string(), "h4".to_string(), "h3".to_string()] - ); - } -); - -test_both_dbs!(test_add_contacts_postgres, test_add_contacts_sqlite, db, { - let mut user_ids = Vec::new(); - for i in 0..3 { - user_ids.push( - db.create_user( - &format!("user{i}@example.com"), - false, - NewUserParams { - github_login: format!("user{i}"), - github_user_id: i, - invite_count: 0, - }, - ) - .await - .unwrap() - .user_id, - ); - } - - let user_1 = user_ids[0]; - let user_2 = user_ids[1]; - let user_3 = user_ids[2]; - - // User starts with no contacts - assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]); - - // User requests a contact. Both users see the pending request. - db.send_contact_request(user_1, user_2).await.unwrap(); - assert!(!db.has_contact(user_1, user_2).await.unwrap()); - assert!(!db.has_contact(user_2, user_1).await.unwrap()); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Outgoing { user_id: user_2 }], - ); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Incoming { - user_id: user_1, - should_notify: true - }] - ); - - // User 2 dismisses the contact request notification without accepting or rejecting. - // We shouldn't notify them again. - db.dismiss_contact_notification(user_1, user_2) - .await - .unwrap_err(); - db.dismiss_contact_notification(user_2, user_1) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Incoming { - user_id: user_1, - should_notify: false - }] - ); - - // User can't accept their own contact request - db.respond_to_contact_request(user_1, user_2, true) - .await - .unwrap_err(); - - // User accepts a contact request. Both users see the contact. - db.respond_to_contact_request(user_2, user_1, true) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: true, - busy: false, - }], - ); - assert!(db.has_contact(user_1, user_2).await.unwrap()); - assert!(db.has_contact(user_2, user_1).await.unwrap()); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }] - ); - - // Users cannot re-request existing contacts. - db.send_contact_request(user_1, user_2).await.unwrap_err(); - db.send_contact_request(user_2, user_1).await.unwrap_err(); - - // Users can't dismiss notifications of them accepting other users' requests. - db.dismiss_contact_notification(user_2, user_1) - .await - .unwrap_err(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: true, - busy: false, - }] - ); - - // Users can dismiss notifications of other users accepting their requests. - db.dismiss_contact_notification(user_1, user_2) - .await - .unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[Contact::Accepted { - user_id: user_2, - should_notify: false, - busy: false, - }] - ); - - // Users send each other concurrent contact requests and - // see that they are immediately accepted. - db.send_contact_request(user_1, user_3).await.unwrap(); - db.send_contact_request(user_3, user_1).await.unwrap(); - assert_eq!( - db.get_contacts(user_1).await.unwrap(), - &[ - Contact::Accepted { - user_id: user_2, - should_notify: false, - busy: false, - }, - Contact::Accepted { - user_id: user_3, - should_notify: false, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user_3).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }], - ); - - // User declines a contact request. Both users see that it is gone. - db.send_contact_request(user_2, user_3).await.unwrap(); - db.respond_to_contact_request(user_3, user_2, false) - .await - .unwrap(); - assert!(!db.has_contact(user_2, user_3).await.unwrap()); - assert!(!db.has_contact(user_3, user_2).await.unwrap()); - assert_eq!( - db.get_contacts(user_2).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_contacts(user_3).await.unwrap(), - &[Contact::Accepted { - user_id: user_1, - should_notify: false, - busy: false, - }], - ); -}); - -test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { - let NewUserResult { - user_id: user1, - metrics_id: metrics_id1, - .. - } = db - .create_user( - "person1@example.com", - false, - NewUserParams { - github_login: "person1".into(), - github_user_id: 101, - invite_count: 5, - }, - ) - .await - .unwrap(); - let NewUserResult { - user_id: user2, - metrics_id: metrics_id2, - .. - } = db - .create_user( - "person2@example.com", - false, - NewUserParams { - github_login: "person2".into(), - github_user_id: 102, - invite_count: 5, - }, - ) - .await - .unwrap(); - - assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1); - assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2); - assert_eq!(metrics_id1.len(), 36); - assert_eq!(metrics_id2.len(), 36); - assert_ne!(metrics_id1, metrics_id2); -}); - -#[test] -fn test_fuzzy_like_string() { - assert_eq!(Database::fuzzy_like_string("abcd"), "%a%b%c%d%"); - assert_eq!(Database::fuzzy_like_string("x y"), "%x%y%"); - assert_eq!(Database::fuzzy_like_string(" z "), "%z%"); -} - -#[gpui::test] -async fn test_fuzzy_search_users() { - let test_db = TestDb::postgres(build_background_executor()); - let db = test_db.db(); - for (i, github_login) in [ - "California", - "colorado", - "oregon", - "washington", - "florida", - "delaware", - "rhode-island", - ] - .into_iter() - .enumerate() - { - db.create_user( - &format!("{github_login}@example.com"), - false, - NewUserParams { - github_login: github_login.into(), - github_user_id: i as i32, - invite_count: 0, - }, - ) - .await - .unwrap(); - } - - assert_eq!( - fuzzy_search_user_names(db, "clr").await, - &["colorado", "California"] - ); - assert_eq!( - fuzzy_search_user_names(db, "ro").await, - &["rhode-island", "colorado", "oregon"], - ); - - async fn fuzzy_search_user_names(db: &Database, query: &str) -> Vec { - db.fuzzy_search_users(query, 10) - .await - .unwrap() - .into_iter() - .map(|user| user.github_login) - .collect::>() - } -} - -#[gpui::test] -async fn test_invite_codes() { - let test_db = TestDb::postgres(build_background_executor()); - let db = test_db.db(); - - let NewUserResult { user_id: user1, .. } = db - .create_user( - "user1@example.com", - false, - NewUserParams { - github_login: "user1".into(), - github_user_id: 0, - invite_count: 0, - }, - ) - .await - .unwrap(); - - // Initially, user 1 has no invite code - assert_eq!(db.get_invite_code_for_user(user1).await.unwrap(), None); - - // Setting invite count to 0 when no code is assigned does not assign a new code - db.set_invite_count_for_user(user1, 0).await.unwrap(); - assert!(db.get_invite_code_for_user(user1).await.unwrap().is_none()); - - // User 1 creates an invite code that can be used twice. - db.set_invite_count_for_user(user1, 2).await.unwrap(); - let (invite_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 2); - - // User 2 redeems the invite code and becomes a contact of user 1. - let user2_invite = db - .create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) - .await - .unwrap(); - let NewUserResult { - user_id: user2, - inviting_user_id, - signup_device_id, - metrics_id, - } = db - .create_user_from_invite( - &user2_invite, - NewUserParams { - github_login: "user2".into(), - github_user_id: 2, - invite_count: 7, - }, - ) - .await - .unwrap() - .unwrap(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); - assert_eq!(inviting_user_id, Some(user1)); - assert_eq!(signup_device_id.unwrap(), "user-2-device-id"); - assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }] - ); - assert_eq!( - db.get_contacts(user2).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, - 7 - ); - - // User 3 redeems the invite code and becomes a contact of user 1. - let user3_invite = db - .create_invite_from_code(&invite_code, "user3@example.com", None) - .await - .unwrap(); - let NewUserResult { - user_id: user3, - inviting_user_id, - signup_device_id, - .. - } = db - .create_user_from_invite( - &user3_invite, - NewUserParams { - github_login: "user-3".into(), - github_user_id: 3, - invite_count: 3, - }, - ) - .await - .unwrap() - .unwrap(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 0); - assert_eq!(inviting_user_id, Some(user1)); - assert!(signup_device_id.is_none()); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [ - Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user3, - should_notify: true, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user3).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, - 3 - ); - - // Trying to reedem the code for the third time results in an error. - db.create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) - .await - .unwrap_err(); - - // Invite count can be updated after the code has been created. - db.set_invite_count_for_user(user1, 2).await.unwrap(); - let (latest_code, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(latest_code, invite_code); // Invite code doesn't change when we increment above 0 - assert_eq!(invite_count, 2); - - // User 4 can now redeem the invite code and becomes a contact of user 1. - let user4_invite = db - .create_invite_from_code(&invite_code, "user4@example.com", Some("user-4-device-id")) - .await - .unwrap(); - let user4 = db - .create_user_from_invite( - &user4_invite, - NewUserParams { - github_login: "user-4".into(), - github_user_id: 4, - invite_count: 5, - }, - ) - .await - .unwrap() - .unwrap() - .user_id; - - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); - assert_eq!( - db.get_contacts(user1).await.unwrap(), - [ - Contact::Accepted { - user_id: user2, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user3, - should_notify: true, - busy: false, - }, - Contact::Accepted { - user_id: user4, - should_notify: true, - busy: false, - } - ] - ); - assert_eq!( - db.get_contacts(user4).await.unwrap(), - [Contact::Accepted { - user_id: user1, - should_notify: false, - busy: false, - }] - ); - assert_eq!( - db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, - 5 - ); - - // An existing user cannot redeem invite codes. - db.create_invite_from_code(&invite_code, "user2@example.com", Some("user-2-device-id")) - .await - .unwrap_err(); - let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); - assert_eq!(invite_count, 1); -} - -#[gpui::test] -async fn test_signups() { - let test_db = TestDb::postgres(build_background_executor()); - let db = test_db.db(); - - // people sign up on the waitlist - for i in 0..8 { - db.create_signup(NewSignup { - email_address: format!("person-{i}@example.com"), - platform_mac: true, - platform_linux: i % 2 == 0, - platform_windows: i % 4 == 0, - editor_features: vec!["speed".into()], - programming_languages: vec!["rust".into(), "c".into()], - device_id: Some(format!("device_id_{i}")), - }) - .await - .unwrap(); - } - - assert_eq!( - db.get_waitlist_summary().await.unwrap(), - WaitlistSummary { - count: 8, - mac_count: 8, - linux_count: 4, - windows_count: 2, - unknown_count: 0, - } - ); - - // retrieve the next batch of signup emails to send - let signups_batch1 = db.get_unsent_invites(3).await.unwrap(); - let addresses = signups_batch1 - .iter() - .map(|s| &s.email_address) - .collect::>(); - assert_eq!( - addresses, - &[ - "person-0@example.com", - "person-1@example.com", - "person-2@example.com" - ] - ); - assert_ne!( - signups_batch1[0].email_confirmation_code, - signups_batch1[1].email_confirmation_code - ); - - // the waitlist isn't updated until we record that the emails - // were successfully sent. - let signups_batch = db.get_unsent_invites(3).await.unwrap(); - assert_eq!(signups_batch, signups_batch1); - - // once the emails go out, we can retrieve the next batch - // of signups. - db.record_sent_invites(&signups_batch1).await.unwrap(); - let signups_batch2 = db.get_unsent_invites(3).await.unwrap(); - let addresses = signups_batch2 - .iter() - .map(|s| &s.email_address) - .collect::>(); - assert_eq!( - addresses, - &[ - "person-3@example.com", - "person-4@example.com", - "person-5@example.com" - ] - ); - - // the sent invites are excluded from the summary. - assert_eq!( - db.get_waitlist_summary().await.unwrap(), - WaitlistSummary { - count: 5, - mac_count: 5, - linux_count: 2, - windows_count: 1, - unknown_count: 0, - } - ); - - // user completes the signup process by providing their - // github account. - let NewUserResult { - user_id, - inviting_user_id, - signup_device_id, - .. - } = db - .create_user_from_invite( - &Invite { - email_address: signups_batch1[0].email_address.clone(), - email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), - }, - NewUserParams { - github_login: "person-0".into(), - github_user_id: 0, - invite_count: 5, - }, - ) - .await - .unwrap() - .unwrap(); - let user = db.get_user_by_id(user_id).await.unwrap().unwrap(); - assert!(inviting_user_id.is_none()); - assert_eq!(user.github_login, "person-0"); - assert_eq!(user.email_address.as_deref(), Some("person-0@example.com")); - assert_eq!(user.invite_count, 5); - assert_eq!(signup_device_id.unwrap(), "device_id_0"); - - // cannot redeem the same signup again. - assert!(db - .create_user_from_invite( - &Invite { - email_address: signups_batch1[0].email_address.clone(), - email_confirmation_code: signups_batch1[0].email_confirmation_code.clone(), - }, - NewUserParams { - github_login: "some-other-github_account".into(), - github_user_id: 1, - invite_count: 5, - }, - ) - .await - .unwrap() - .is_none()); - - // cannot redeem a signup with the wrong confirmation code. - db.create_user_from_invite( - &Invite { - email_address: signups_batch1[1].email_address.clone(), - email_confirmation_code: "the-wrong-code".to_string(), - }, - NewUserParams { - github_login: "person-1".into(), - github_user_id: 2, - invite_count: 5, - }, - ) - .await - .unwrap_err(); -} - -fn build_background_executor() -> Arc { - Deterministic::new(0).build_background() -} diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 93ff73fc83..225501c71d 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,5 +1,5 @@ use crate::{ - db::{self, NewUserParams, SqliteTestDb as TestDb, UserId}, + db::{self, NewUserParams, TestDb, UserId}, rpc::{Executor, Server}, AppState, }; @@ -5665,7 +5665,7 @@ impl TestServer { async fn start(background: Arc) -> Self { static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0); - let test_db = TestDb::new(background.clone()); + let test_db = TestDb::sqlite(background.clone()); let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst); let live_kit_server = live_kit_client::TestServer::create( format!("http://livekit.{}.test", live_kit_server_id), diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 8a2cdc980f..4802fd82b4 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -1,7 +1,6 @@ mod api; mod auth; mod db; -mod db2; mod env; mod rpc; @@ -11,7 +10,7 @@ mod integration_tests; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; -use db::DefaultDb as Db; +use db::Database; use serde::Deserialize; use std::{ env::args, @@ -45,14 +44,16 @@ pub struct MigrateConfig { } pub struct AppState { - db: Arc, + db: Arc, live_kit_client: Option>, config: Config, } impl AppState { async fn new(config: Config) -> Result> { - let db = Db::new(&config.database_url, 5).await?; + let mut db_options = db::ConnectOptions::new(config.database_url.clone()); + db_options.max_connections(5); + let db = Database::new(db_options).await?; let live_kit_client = if let Some(((server, key), secret)) = config .live_kit_server .as_ref() @@ -92,7 +93,9 @@ async fn main() -> Result<()> { } Some("migrate") => { let config = envy::from_env::().expect("error loading config"); - let db = Db::new(&config.database_url, 5).await?; + let mut db_options = db::ConnectOptions::new(config.database_url.clone()); + db_options.max_connections(5); + let db = Database::new(db_options).await?; let migrations_path = config .migrations_path diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 07b9891480..beefe54a9d 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2,7 +2,7 @@ mod connection_pool; use crate::{ auth, - db::{self, DefaultDb, ProjectId, RoomId, User, UserId}, + db::{self, Database, ProjectId, RoomId, User, UserId}, AppState, Result, }; use anyhow::anyhow; @@ -128,10 +128,10 @@ impl fmt::Debug for Session { } } -struct DbHandle(Arc); +struct DbHandle(Arc); impl Deref for DbHandle { - type Target = DefaultDb; + type Target = Database; fn deref(&self) -> &Self::Target { self.0.as_ref() From db1d93576f8aea0364e52ddf1abdf92f74ea0dc1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 15:13:34 +0100 Subject: [PATCH 093/240] Go back to a compiling state, panicking on unimplemented db methods --- crates/collab/src/db.rs | 1267 +++++++++++++++++- crates/collab/src/db/project.rs | 12 + crates/collab/src/db/project_collaborator.rs | 4 +- crates/collab/src/db/user.rs | 8 + 4 files changed, 1240 insertions(+), 51 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index d89d041f2a..c5f2f98d0b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -12,7 +12,7 @@ mod worktree; use crate::{Error, Result}; use anyhow::anyhow; -use collections::HashMap; +use collections::{BTreeMap, HashMap, HashSet}; pub use contact::Contact; use dashmap::DashMap; use futures::StreamExt; @@ -255,6 +255,19 @@ impl Database { .await } + pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> { + self.transact(|tx| async move { + user::Entity::update_many() + .filter(user::Column::Id.eq(id)) + .col_expr(user::Column::ConnectedOnce, connected_once.into()) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + pub async fn destroy_user(&self, id: UserId) -> Result<()> { self.transact(|tx| async move { access_token::Entity::delete_many() @@ -360,6 +373,17 @@ impl Database { .await } + pub async fn is_user_busy(&self, user_id: UserId) -> Result { + self.transact(|tx| async move { + let participant = room_participant::Entity::find() + .filter(room_participant::Column::UserId.eq(user_id)) + .one(&tx) + .await?; + Ok(participant.is_some()) + }) + .await + } + pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { self.transact(|tx| async move { let (id_a, id_b) = if user_id_1 < user_id_2 { @@ -896,63 +920,447 @@ impl Database { .await } - // projects + // rooms - pub async fn share_project( + pub async fn incoming_call_for_user( + &self, + user_id: UserId, + ) -> Result> { + self.transact(|tx| async move { + let pending_participant = room_participant::Entity::find() + .filter( + room_participant::Column::UserId + .eq(user_id) + .and(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .one(&tx) + .await?; + + if let Some(pending_participant) = pending_participant { + let room = self.get_room(pending_participant.room_id, &tx).await?; + Ok(Self::build_incoming_call(&room, user_id)) + } else { + Ok(None) + } + }) + .await + } + + pub async fn create_room( + &self, + user_id: UserId, + connection_id: ConnectionId, + live_kit_room: &str, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let room_id = sqlx::query_scalar( + // " + // INSERT INTO rooms (live_kit_room) + // VALUES ($1) + // RETURNING id + // ", + // ) + // .bind(&live_kit_room) + // .fetch_one(&mut tx) + // .await + // .map(RoomId)?; + + // sqlx::query( + // " + // INSERT INTO room_participants (room_id, user_id, answering_connection_id, calling_user_id, calling_connection_id) + // VALUES ($1, $2, $3, $4, $5) + // ", + // ) + // .bind(room_id) + // .bind(user_id) + // .bind(connection_id.0 as i32) + // .bind(user_id) + // .bind(connection_id.0 as i32) + // .execute(&mut tx) + // .await?; + + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await + }) + .await + } + + pub async fn call( + &self, + room_id: RoomId, + calling_user_id: UserId, + calling_connection_id: ConnectionId, + called_user_id: UserId, + initial_project_id: Option, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // sqlx::query( + // " + // INSERT INTO room_participants ( + // room_id, + // user_id, + // calling_user_id, + // calling_connection_id, + // initial_project_id + // ) + // VALUES ($1, $2, $3, $4, $5) + // ", + // ) + // .bind(room_id) + // .bind(called_user_id) + // .bind(calling_user_id) + // .bind(calling_connection_id.0 as i32) + // .bind(initial_project_id) + // .execute(&mut tx) + // .await?; + + // let room = self.get_room(room_id, &mut tx).await?; + // let incoming_call = Self::build_incoming_call(&room, called_user_id) + // .ok_or_else(|| anyhow!("failed to build incoming call"))?; + // self.commit_room_transaction(room_id, tx, (room, incoming_call)) + // .await + }) + .await + } + + pub async fn call_failed( + &self, + room_id: RoomId, + called_user_id: UserId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // sqlx::query( + // " + // DELETE FROM room_participants + // WHERE room_id = $1 AND user_id = $2 + // ", + // ) + // .bind(room_id) + // .bind(called_user_id) + // .execute(&mut tx) + // .await?; + + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await + }) + .await + } + + pub async fn decline_call( + &self, + expected_room_id: Option, + user_id: UserId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let room_id = sqlx::query_scalar( + // " + // DELETE FROM room_participants + // WHERE user_id = $1 AND answering_connection_id IS NULL + // RETURNING room_id + // ", + // ) + // .bind(user_id) + // .fetch_one(&mut tx) + // .await?; + // if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + // return Err(anyhow!("declining call on unexpected room"))?; + // } + + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await + }) + .await + } + + pub async fn cancel_call( + &self, + expected_room_id: Option, + calling_connection_id: ConnectionId, + called_user_id: UserId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let room_id = sqlx::query_scalar( + // " + // DELETE FROM room_participants + // WHERE user_id = $1 AND calling_connection_id = $2 AND answering_connection_id IS NULL + // RETURNING room_id + // ", + // ) + // .bind(called_user_id) + // .bind(calling_connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + // if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + // return Err(anyhow!("canceling call on unexpected room"))?; + // } + + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await + }) + .await + } + + pub async fn join_room( + &self, + room_id: RoomId, + user_id: UserId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // sqlx::query( + // " + // UPDATE room_participants + // SET answering_connection_id = $1 + // WHERE room_id = $2 AND user_id = $3 + // RETURNING 1 + // ", + // ) + // .bind(connection_id.0 as i32) + // .bind(room_id) + // .bind(user_id) + // .fetch_one(&mut tx) + // .await?; + + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await + }) + .await + } + + pub async fn leave_room( + &self, + connection_id: ConnectionId, + ) -> Result>> { + self.transact(|tx| async move { + todo!() + // // Leave room. + // let room_id = sqlx::query_scalar::<_, RoomId>( + // " + // DELETE FROM room_participants + // WHERE answering_connection_id = $1 + // RETURNING room_id + // ", + // ) + // .bind(connection_id.0 as i32) + // .fetch_optional(&mut tx) + // .await?; + + // if let Some(room_id) = room_id { + // // Cancel pending calls initiated by the leaving user. + // let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( + // " + // DELETE FROM room_participants + // WHERE calling_connection_id = $1 AND answering_connection_id IS NULL + // RETURNING user_id + // ", + // ) + // .bind(connection_id.0 as i32) + // .fetch_all(&mut tx) + // .await?; + + // let project_ids = sqlx::query_scalar::<_, ProjectId>( + // " + // SELECT project_id + // FROM project_collaborators + // WHERE connection_id = $1 + // ", + // ) + // .bind(connection_id.0 as i32) + // .fetch_all(&mut tx) + // .await?; + + // // Leave projects. + // let mut left_projects = HashMap::default(); + // if !project_ids.is_empty() { + // let mut params = "?,".repeat(project_ids.len()); + // params.pop(); + // let query = format!( + // " + // SELECT * + // FROM project_collaborators + // WHERE project_id IN ({params}) + // " + // ); + // let mut query = sqlx::query_as::<_, ProjectCollaborator>(&query); + // for project_id in project_ids { + // query = query.bind(project_id); + // } + + // let mut project_collaborators = query.fetch(&mut tx); + // while let Some(collaborator) = project_collaborators.next().await { + // let collaborator = collaborator?; + // let left_project = + // left_projects + // .entry(collaborator.project_id) + // .or_insert(LeftProject { + // id: collaborator.project_id, + // host_user_id: Default::default(), + // connection_ids: Default::default(), + // host_connection_id: Default::default(), + // }); + + // let collaborator_connection_id = + // ConnectionId(collaborator.connection_id as u32); + // if collaborator_connection_id != connection_id { + // left_project.connection_ids.push(collaborator_connection_id); + // } + + // if collaborator.is_host { + // left_project.host_user_id = collaborator.user_id; + // left_project.host_connection_id = + // ConnectionId(collaborator.connection_id as u32); + // } + // } + // } + // sqlx::query( + // " + // DELETE FROM project_collaborators + // WHERE connection_id = $1 + // ", + // ) + // .bind(connection_id.0 as i32) + // .execute(&mut tx) + // .await?; + + // // Unshare projects. + // sqlx::query( + // " + // DELETE FROM projects + // WHERE room_id = $1 AND host_connection_id = $2 + // ", + // ) + // .bind(room_id) + // .bind(connection_id.0 as i32) + // .execute(&mut tx) + // .await?; + + // let room = self.get_room(room_id, &mut tx).await?; + // Ok(Some( + // self.commit_room_transaction( + // room_id, + // tx, + // LeftRoom { + // room, + // left_projects, + // canceled_calls_to_user_ids, + // }, + // ) + // .await?, + // )) + // } else { + // Ok(None) + // } + }) + .await + } + + pub async fn update_room_participant_location( &self, room_id: RoomId, connection_id: ConnectionId, - worktrees: &[proto::WorktreeMetadata], - ) -> Result> { - self.transact(|tx| async move { - let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) - .one(&tx) - .await? - .ok_or_else(|| anyhow!("could not find participant"))?; - if participant.room_id != room_id { - return Err(anyhow!("shared project on unexpected room"))?; - } + location: proto::ParticipantLocation, + ) -> Result> { + self.transact(|tx| async { + todo!() + // let mut tx = tx; + // let location_kind; + // let location_project_id; + // match location + // .variant + // .as_ref() + // .ok_or_else(|| anyhow!("invalid location"))? + // { + // proto::participant_location::Variant::SharedProject(project) => { + // location_kind = 0; + // location_project_id = Some(ProjectId::from_proto(project.id)); + // } + // proto::participant_location::Variant::UnsharedProject(_) => { + // location_kind = 1; + // location_project_id = None; + // } + // proto::participant_location::Variant::External(_) => { + // location_kind = 2; + // location_project_id = None; + // } + // } - let project = project::ActiveModel { - room_id: ActiveValue::set(participant.room_id), - host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection_id.0 as i32), - ..Default::default() - } - .insert(&tx) - .await?; + // sqlx::query( + // " + // UPDATE room_participants + // SET location_kind = $1, location_project_id = $2 + // WHERE room_id = $3 AND answering_connection_id = $4 + // RETURNING 1 + // ", + // ) + // .bind(location_kind) + // .bind(location_project_id) + // .bind(room_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; - worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i32), - project_id: ActiveValue::set(project.id), - abs_path: ActiveValue::set(worktree.abs_path.clone()), - root_name: ActiveValue::set(worktree.root_name.clone()), - visible: ActiveValue::set(worktree.visible), - scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), - })) - .exec(&tx) - .await?; - - project_collaborator::ActiveModel { - project_id: ActiveValue::set(project.id), - connection_id: ActiveValue::set(connection_id.0 as i32), - user_id: ActiveValue::set(participant.user_id), - replica_id: ActiveValue::set(0), - is_host: ActiveValue::set(true), - ..Default::default() - } - .insert(&tx) - .await?; - - let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, (project.id, room)) - .await + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, room).await }) .await } + async fn get_guest_connection_ids( + &self, + project_id: ProjectId, + tx: &DatabaseTransaction, + ) -> Result> { + todo!() + // let mut guest_connection_ids = Vec::new(); + // let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( + // " + // SELECT connection_id + // FROM project_collaborators + // WHERE project_id = $1 AND is_host = FALSE + // ", + // ) + // .bind(project_id) + // .fetch(tx); + // while let Some(connection_id) = db_guest_connection_ids.next().await { + // guest_connection_ids.push(ConnectionId(connection_id? as u32)); + // } + // Ok(guest_connection_ids) + } + + fn build_incoming_call( + room: &proto::Room, + called_user_id: UserId, + ) -> Option { + let pending_participant = room + .pending_participants + .iter() + .find(|participant| participant.user_id == called_user_id.to_proto())?; + + Some(proto::IncomingCall { + room_id: room.id, + calling_user_id: pending_participant.calling_user_id, + participant_user_ids: room + .participants + .iter() + .map(|participant| participant.user_id) + .collect(), + initial_project: room.participants.iter().find_map(|participant| { + let initial_project_id = pending_participant.initial_project_id?; + participant + .projects + .iter() + .find(|project| project.id == initial_project_id) + .cloned() + }), + }) + } + async fn get_room(&self, room_id: RoomId, tx: &DatabaseTransaction) -> Result { let db_room = room::Entity::find_by_id(room_id) .one(tx) @@ -1057,6 +1465,736 @@ impl Database { }) } + // projects + + pub async fn project_count_excluding_admins(&self) -> Result { + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + Count, + } + + self.transact(|tx| async move { + Ok(project::Entity::find() + .select_only() + .column_as(project::Column::Id.count(), QueryAs::Count) + .inner_join(user::Entity) + .filter(user::Column::Admin.eq(false)) + .into_values::<_, QueryAs>() + .one(&tx) + .await? + .unwrap_or(0) as usize) + }) + .await + } + + pub async fn share_project( + &self, + room_id: RoomId, + connection_id: ConnectionId, + worktrees: &[proto::WorktreeMetadata], + ) -> Result> { + self.transact(|tx| async move { + let participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not find participant"))?; + if participant.room_id != room_id { + return Err(anyhow!("shared project on unexpected room"))?; + } + + let project = project::ActiveModel { + room_id: ActiveValue::set(participant.room_id), + host_user_id: ActiveValue::set(participant.user_id), + host_connection_id: ActiveValue::set(connection_id.0 as i32), + ..Default::default() + } + .insert(&tx) + .await?; + + worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i32), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + })) + .exec(&tx) + .await?; + + project_collaborator::ActiveModel { + project_id: ActiveValue::set(project.id), + connection_id: ActiveValue::set(connection_id.0 as i32), + user_id: ActiveValue::set(participant.user_id), + replica_id: ActiveValue::set(ReplicaId(0)), + is_host: ActiveValue::set(true), + ..Default::default() + } + .insert(&tx) + .await?; + + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, (project.id, room)) + .await + }) + .await + } + + pub async fn unshare_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result)>> { + self.transact(|tx| async move { + todo!() + // let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + // let room_id: RoomId = sqlx::query_scalar( + // " + // DELETE FROM projects + // WHERE id = $1 AND host_connection_id = $2 + // RETURNING room_id + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) + // .await + }) + .await + } + + pub async fn update_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + worktrees: &[proto::WorktreeMetadata], + ) -> Result)>> { + self.transact(|tx| async move { + todo!() + // let room_id: RoomId = sqlx::query_scalar( + // " + // SELECT room_id + // FROM projects + // WHERE id = $1 AND host_connection_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + + // if !worktrees.is_empty() { + // let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); + // params.pop(); + // let query = format!( + // " + // INSERT INTO worktrees ( + // project_id, + // id, + // root_name, + // abs_path, + // visible, + // scan_id, + // is_complete + // ) + // VALUES {params} + // ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name + // " + // ); + + // let mut query = sqlx::query(&query); + // for worktree in worktrees { + // query = query + // .bind(project_id) + // .bind(worktree.id as i32) + // .bind(&worktree.root_name) + // .bind(&worktree.abs_path) + // .bind(worktree.visible) + // .bind(0) + // .bind(false) + // } + // query.execute(&mut tx).await?; + // } + + // let mut params = "?,".repeat(worktrees.len()); + // if !worktrees.is_empty() { + // params.pop(); + // } + // let query = format!( + // " + // DELETE FROM worktrees + // WHERE project_id = ? AND id NOT IN ({params}) + // ", + // ); + + // let mut query = sqlx::query(&query).bind(project_id); + // for worktree in worktrees { + // query = query.bind(WorktreeId(worktree.id as i32)); + // } + // query.execute(&mut tx).await?; + + // let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + // let room = self.get_room(room_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) + // .await + }) + .await + } + + pub async fn update_worktree( + &self, + update: &proto::UpdateWorktree, + connection_id: ConnectionId, + ) -> Result>> { + self.transact(|tx| async move { + todo!() + // let project_id = ProjectId::from_proto(update.project_id); + // let worktree_id = WorktreeId::from_proto(update.worktree_id); + + // // Ensure the update comes from the host. + // let room_id: RoomId = sqlx::query_scalar( + // " + // SELECT room_id + // FROM projects + // WHERE id = $1 AND host_connection_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + + // // Update metadata. + // sqlx::query( + // " + // UPDATE worktrees + // SET + // root_name = $1, + // scan_id = $2, + // is_complete = $3, + // abs_path = $4 + // WHERE project_id = $5 AND id = $6 + // RETURNING 1 + // ", + // ) + // .bind(&update.root_name) + // .bind(update.scan_id as i64) + // .bind(update.is_last_update) + // .bind(&update.abs_path) + // .bind(project_id) + // .bind(worktree_id) + // .fetch_one(&mut tx) + // .await?; + + // if !update.updated_entries.is_empty() { + // let mut params = + // "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?),".repeat(update.updated_entries.len()); + // params.pop(); + + // let query = format!( + // " + // INSERT INTO worktree_entries ( + // project_id, + // worktree_id, + // id, + // is_dir, + // path, + // inode, + // mtime_seconds, + // mtime_nanos, + // is_symlink, + // is_ignored + // ) + // VALUES {params} + // ON CONFLICT (project_id, worktree_id, id) DO UPDATE SET + // is_dir = excluded.is_dir, + // path = excluded.path, + // inode = excluded.inode, + // mtime_seconds = excluded.mtime_seconds, + // mtime_nanos = excluded.mtime_nanos, + // is_symlink = excluded.is_symlink, + // is_ignored = excluded.is_ignored + // " + // ); + // let mut query = sqlx::query(&query); + // for entry in &update.updated_entries { + // let mtime = entry.mtime.clone().unwrap_or_default(); + // query = query + // .bind(project_id) + // .bind(worktree_id) + // .bind(entry.id as i64) + // .bind(entry.is_dir) + // .bind(&entry.path) + // .bind(entry.inode as i64) + // .bind(mtime.seconds as i64) + // .bind(mtime.nanos as i32) + // .bind(entry.is_symlink) + // .bind(entry.is_ignored); + // } + // query.execute(&mut tx).await?; + // } + + // if !update.removed_entries.is_empty() { + // let mut params = "?,".repeat(update.removed_entries.len()); + // params.pop(); + // let query = format!( + // " + // DELETE FROM worktree_entries + // WHERE project_id = ? AND worktree_id = ? AND id IN ({params}) + // " + // ); + + // let mut query = sqlx::query(&query).bind(project_id).bind(worktree_id); + // for entry_id in &update.removed_entries { + // query = query.bind(*entry_id as i64); + // } + // query.execute(&mut tx).await?; + // } + + // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, connection_ids) + // .await + }) + .await + } + + pub async fn update_diagnostic_summary( + &self, + update: &proto::UpdateDiagnosticSummary, + connection_id: ConnectionId, + ) -> Result>> { + self.transact(|tx| async { + todo!() + // let project_id = ProjectId::from_proto(update.project_id); + // let worktree_id = WorktreeId::from_proto(update.worktree_id); + // let summary = update + // .summary + // .as_ref() + // .ok_or_else(|| anyhow!("invalid summary"))?; + + // // Ensure the update comes from the host. + // let room_id: RoomId = sqlx::query_scalar( + // " + // SELECT room_id + // FROM projects + // WHERE id = $1 AND host_connection_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + + // // Update summary. + // sqlx::query( + // " + // INSERT INTO worktree_diagnostic_summaries ( + // project_id, + // worktree_id, + // path, + // language_server_id, + // error_count, + // warning_count + // ) + // VALUES ($1, $2, $3, $4, $5, $6) + // ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET + // language_server_id = excluded.language_server_id, + // error_count = excluded.error_count, + // warning_count = excluded.warning_count + // ", + // ) + // .bind(project_id) + // .bind(worktree_id) + // .bind(&summary.path) + // .bind(summary.language_server_id as i64) + // .bind(summary.error_count as i32) + // .bind(summary.warning_count as i32) + // .execute(&mut tx) + // .await?; + + // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, connection_ids) + // .await + }) + .await + } + + pub async fn start_language_server( + &self, + update: &proto::StartLanguageServer, + connection_id: ConnectionId, + ) -> Result>> { + self.transact(|tx| async { + todo!() + // let project_id = ProjectId::from_proto(update.project_id); + // let server = update + // .server + // .as_ref() + // .ok_or_else(|| anyhow!("invalid language server"))?; + + // // Ensure the update comes from the host. + // let room_id: RoomId = sqlx::query_scalar( + // " + // SELECT room_id + // FROM projects + // WHERE id = $1 AND host_connection_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + + // // Add the newly-started language server. + // sqlx::query( + // " + // INSERT INTO language_servers (project_id, id, name) + // VALUES ($1, $2, $3) + // ON CONFLICT (project_id, id) DO UPDATE SET + // name = excluded.name + // ", + // ) + // .bind(project_id) + // .bind(server.id as i64) + // .bind(&server.name) + // .execute(&mut tx) + // .await?; + + // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; + // self.commit_room_transaction(room_id, tx, connection_ids) + // .await + }) + .await + } + + pub async fn join_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( + // " + // SELECT room_id, user_id + // FROM room_participants + // WHERE answering_connection_id = $1 + // ", + // ) + // .bind(connection_id.0 as i32) + // .fetch_one(&mut tx) + // .await?; + + // // Ensure project id was shared on this room. + // sqlx::query( + // " + // SELECT 1 + // FROM projects + // WHERE id = $1 AND room_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(room_id) + // .fetch_one(&mut tx) + // .await?; + + // let mut collaborators = sqlx::query_as::<_, ProjectCollaborator>( + // " + // SELECT * + // FROM project_collaborators + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await?; + // let replica_ids = collaborators + // .iter() + // .map(|c| c.replica_id) + // .collect::>(); + // let mut replica_id = ReplicaId(1); + // while replica_ids.contains(&replica_id) { + // replica_id.0 += 1; + // } + // let new_collaborator = ProjectCollaborator { + // project_id, + // connection_id: connection_id.0 as i32, + // user_id, + // replica_id, + // is_host: false, + // }; + + // sqlx::query( + // " + // INSERT INTO project_collaborators ( + // project_id, + // connection_id, + // user_id, + // replica_id, + // is_host + // ) + // VALUES ($1, $2, $3, $4, $5) + // ", + // ) + // .bind(new_collaborator.project_id) + // .bind(new_collaborator.connection_id) + // .bind(new_collaborator.user_id) + // .bind(new_collaborator.replica_id) + // .bind(new_collaborator.is_host) + // .execute(&mut tx) + // .await?; + // collaborators.push(new_collaborator); + + // let worktree_rows = sqlx::query_as::<_, WorktreeRow>( + // " + // SELECT * + // FROM worktrees + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await?; + // let mut worktrees = worktree_rows + // .into_iter() + // .map(|worktree_row| { + // ( + // worktree_row.id, + // Worktree { + // id: worktree_row.id, + // abs_path: worktree_row.abs_path, + // root_name: worktree_row.root_name, + // visible: worktree_row.visible, + // entries: Default::default(), + // diagnostic_summaries: Default::default(), + // scan_id: worktree_row.scan_id as u64, + // is_complete: worktree_row.is_complete, + // }, + // ) + // }) + // .collect::>(); + + // // Populate worktree entries. + // { + // let mut entries = sqlx::query_as::<_, WorktreeEntry>( + // " + // SELECT * + // FROM worktree_entries + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch(&mut tx); + // while let Some(entry) = entries.next().await { + // let entry = entry?; + // if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { + // worktree.entries.push(proto::Entry { + // id: entry.id as u64, + // is_dir: entry.is_dir, + // path: entry.path, + // inode: entry.inode as u64, + // mtime: Some(proto::Timestamp { + // seconds: entry.mtime_seconds as u64, + // nanos: entry.mtime_nanos as u32, + // }), + // is_symlink: entry.is_symlink, + // is_ignored: entry.is_ignored, + // }); + // } + // } + // } + + // // Populate worktree diagnostic summaries. + // { + // let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>( + // " + // SELECT * + // FROM worktree_diagnostic_summaries + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch(&mut tx); + // while let Some(summary) = summaries.next().await { + // let summary = summary?; + // if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) { + // worktree + // .diagnostic_summaries + // .push(proto::DiagnosticSummary { + // path: summary.path, + // language_server_id: summary.language_server_id as u64, + // error_count: summary.error_count as u32, + // warning_count: summary.warning_count as u32, + // }); + // } + // } + // } + + // // Populate language servers. + // let language_servers = sqlx::query_as::<_, LanguageServer>( + // " + // SELECT * + // FROM language_servers + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await?; + + // self.commit_room_transaction( + // room_id, + // tx, + // ( + // Project { + // collaborators, + // worktrees, + // language_servers: language_servers + // .into_iter() + // .map(|language_server| proto::LanguageServer { + // id: language_server.id.to_proto(), + // name: language_server.name, + // }) + // .collect(), + // }, + // replica_id as ReplicaId, + // ), + // ) + // .await + }) + .await + } + + pub async fn leave_project( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let result = sqlx::query( + // " + // DELETE FROM project_collaborators + // WHERE project_id = $1 AND connection_id = $2 + // ", + // ) + // .bind(project_id) + // .bind(connection_id.0 as i32) + // .execute(&mut tx) + // .await?; + + // if result.rows_affected() == 0 { + // Err(anyhow!("not a collaborator on this project"))?; + // } + + // let connection_ids = sqlx::query_scalar::<_, i32>( + // " + // SELECT connection_id + // FROM project_collaborators + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await? + // .into_iter() + // .map(|id| ConnectionId(id as u32)) + // .collect(); + + // let (room_id, host_user_id, host_connection_id) = + // sqlx::query_as::<_, (RoomId, i32, i32)>( + // " + // SELECT room_id, host_user_id, host_connection_id + // FROM projects + // WHERE id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_one(&mut tx) + // .await?; + + // self.commit_room_transaction( + // room_id, + // tx, + // LeftProject { + // id: project_id, + // host_user_id: UserId(host_user_id), + // host_connection_id: ConnectionId(host_connection_id as u32), + // connection_ids, + // }, + // ) + // .await + }) + .await + } + + pub async fn project_collaborators( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let collaborators = sqlx::query_as::<_, ProjectCollaborator>( + // " + // SELECT * + // FROM project_collaborators + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await?; + + // if collaborators + // .iter() + // .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) + // { + // Ok(collaborators) + // } else { + // Err(anyhow!("no such project"))? + // } + }) + .await + } + + pub async fn project_connection_ids( + &self, + project_id: ProjectId, + connection_id: ConnectionId, + ) -> Result> { + self.transact(|tx| async move { + todo!() + // let connection_ids = sqlx::query_scalar::<_, i32>( + // " + // SELECT connection_id + // FROM project_collaborators + // WHERE project_id = $1 + // ", + // ) + // .bind(project_id) + // .fetch_all(&mut tx) + // .await?; + + // if connection_ids.contains(&(connection_id.0 as i32)) { + // Ok(connection_ids + // .into_iter() + // .map(|connection_id| ConnectionId(connection_id as u32)) + // .collect()) + // } else { + // Err(anyhow!("no such project"))? + // } + }) + .await + } + + // access tokens + pub async fn create_access_token_hash( &self, user_id: UserId, @@ -1334,14 +2472,45 @@ macro_rules! id_type { id_type!(AccessTokenId); id_type!(ContactId); -id_type!(UserId); id_type!(RoomId); id_type!(RoomParticipantId); id_type!(ProjectId); id_type!(ProjectCollaboratorId); +id_type!(ReplicaId); id_type!(SignupId); +id_type!(UserId); id_type!(WorktreeId); +pub struct LeftRoom { + pub room: proto::Room, + pub left_projects: HashMap, + pub canceled_calls_to_user_ids: Vec, +} + +pub struct Project { + pub collaborators: Vec, + pub worktrees: BTreeMap, + pub language_servers: Vec, +} + +pub struct LeftProject { + pub id: ProjectId, + pub host_user_id: UserId, + pub host_connection_id: ConnectionId, + pub connection_ids: Vec, +} + +pub struct Worktree { + pub id: WorktreeId, + pub abs_path: String, + pub root_name: String, + pub visible: bool, + pub entries: Vec, + pub diagnostic_summaries: Vec, + pub scan_id: u64, + pub is_complete: bool, +} + #[cfg(test)] pub use test::*; diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index 21ee0b27d1..a9f0d1cb47 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -13,6 +13,12 @@ pub struct Model { #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation { + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::HostUserId", + to = "super::user::Column::Id" + )] + HostUser, #[sea_orm( belongs_to = "super::room::Entity", from = "Column::RoomId", @@ -23,6 +29,12 @@ pub enum Relation { Worktree, } +impl Related for Entity { + fn to() -> RelationDef { + Relation::HostUser.def() + } +} + impl Related for Entity { fn to() -> RelationDef { Relation::Room.def() diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index 3e572fe5d4..fb1d565e3a 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -1,4 +1,4 @@ -use super::{ProjectCollaboratorId, ProjectId, UserId}; +use super::{ProjectCollaboratorId, ProjectId, ReplicaId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -9,7 +9,7 @@ pub struct Model { pub project_id: ProjectId, pub connection_id: i32, pub user_id: UserId, - pub replica_id: i32, + pub replica_id: ReplicaId, pub is_host: bool, } diff --git a/crates/collab/src/db/user.rs b/crates/collab/src/db/user.rs index b6e096f667..c2b157bd0a 100644 --- a/crates/collab/src/db/user.rs +++ b/crates/collab/src/db/user.rs @@ -24,6 +24,8 @@ pub enum Relation { AccessToken, #[sea_orm(has_one = "super::room_participant::Entity")] RoomParticipant, + #[sea_orm(has_many = "super::project::Entity")] + HostedProjects, } impl Related for Entity { @@ -38,4 +40,10 @@ impl Related for Entity { } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::HostedProjects.def() + } +} + impl ActiveModelBehavior for ActiveModel {} From aebc6326a9960545fc164c17d1f19ecd0e9cf010 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 15:22:12 +0100 Subject: [PATCH 094/240] Implement `Database::create_room` --- crates/collab/src/db.rs | 47 +++++++++++++++--------------------- crates/collab/src/db/room.rs | 2 +- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index c5f2f98d0b..30049f2d05 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -953,35 +953,27 @@ impl Database { live_kit_room: &str, ) -> Result> { self.transact(|tx| async move { - todo!() - // let room_id = sqlx::query_scalar( - // " - // INSERT INTO rooms (live_kit_room) - // VALUES ($1) - // RETURNING id - // ", - // ) - // .bind(&live_kit_room) - // .fetch_one(&mut tx) - // .await - // .map(RoomId)?; + let room = room::ActiveModel { + live_kit_room: ActiveValue::set(live_kit_room.into()), + ..Default::default() + } + .insert(&tx) + .await?; + let room_id = room.id; - // sqlx::query( - // " - // INSERT INTO room_participants (room_id, user_id, answering_connection_id, calling_user_id, calling_connection_id) - // VALUES ($1, $2, $3, $4, $5) - // ", - // ) - // .bind(room_id) - // .bind(user_id) - // .bind(connection_id.0 as i32) - // .bind(user_id) - // .bind(connection_id.0 as i32) - // .execute(&mut tx) - // .await?; + room_participant::ActiveModel { + room_id: ActiveValue::set(room_id), + user_id: ActiveValue::set(user_id), + answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + calling_user_id: ActiveValue::set(user_id), + calling_connection_id: ActiveValue::set(connection_id.0 as i32), + ..Default::default() + } + .insert(&tx) + .await?; - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1411,6 +1403,7 @@ impl Database { }); } } + drop(db_participants); let mut db_projects = db_room .find_related(project::Entity) diff --git a/crates/collab/src/db/room.rs b/crates/collab/src/db/room.rs index b57e612d46..7dbf03a780 100644 --- a/crates/collab/src/db/room.rs +++ b/crates/collab/src/db/room.rs @@ -2,7 +2,7 @@ use super::RoomId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] -#[sea_orm(table_name = "room_participants")] +#[sea_orm(table_name = "rooms")] pub struct Model { #[sea_orm(primary_key)] pub id: RoomId, From 256e3e8e0fbcd03fcfe9e849d5252eb53318ed54 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 16:06:01 +0100 Subject: [PATCH 095/240] Get basic calls working again with sea-orm --- crates/collab/src/db.rs | 467 ++++++++++++++++++++-------------------- 1 file changed, 230 insertions(+), 237 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 30049f2d05..bb1bff7ff8 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -987,32 +987,22 @@ impl Database { initial_project_id: Option, ) -> Result> { self.transact(|tx| async move { - todo!() - // sqlx::query( - // " - // INSERT INTO room_participants ( - // room_id, - // user_id, - // calling_user_id, - // calling_connection_id, - // initial_project_id - // ) - // VALUES ($1, $2, $3, $4, $5) - // ", - // ) - // .bind(room_id) - // .bind(called_user_id) - // .bind(calling_user_id) - // .bind(calling_connection_id.0 as i32) - // .bind(initial_project_id) - // .execute(&mut tx) - // .await?; + room_participant::ActiveModel { + room_id: ActiveValue::set(room_id), + user_id: ActiveValue::set(called_user_id), + calling_user_id: ActiveValue::set(calling_user_id), + calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), + initial_project_id: ActiveValue::set(initial_project_id), + ..Default::default() + } + .insert(&tx) + .await?; - // let room = self.get_room(room_id, &mut tx).await?; - // let incoming_call = Self::build_incoming_call(&room, called_user_id) - // .ok_or_else(|| anyhow!("failed to build incoming call"))?; - // self.commit_room_transaction(room_id, tx, (room, incoming_call)) - // .await + let room = self.get_room(room_id, &tx).await?; + let incoming_call = Self::build_incoming_call(&room, called_user_id) + .ok_or_else(|| anyhow!("failed to build incoming call"))?; + self.commit_room_transaction(room_id, tx, (room, incoming_call)) + .await }) .await } @@ -1023,20 +1013,16 @@ impl Database { called_user_id: UserId, ) -> Result> { self.transact(|tx| async move { - todo!() - // sqlx::query( - // " - // DELETE FROM room_participants - // WHERE room_id = $1 AND user_id = $2 - // ", - // ) - // .bind(room_id) - // .bind(called_user_id) - // .execute(&mut tx) - // .await?; - - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + room_participant::Entity::delete_many() + .filter( + room_participant::Column::RoomId + .eq(room_id) + .and(room_participant::Column::UserId.eq(called_user_id)), + ) + .exec(&tx) + .await?; + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1047,23 +1033,27 @@ impl Database { user_id: UserId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let room_id = sqlx::query_scalar( - // " - // DELETE FROM room_participants - // WHERE user_id = $1 AND answering_connection_id IS NULL - // RETURNING room_id - // ", - // ) - // .bind(user_id) - // .fetch_one(&mut tx) - // .await?; - // if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - // return Err(anyhow!("declining call on unexpected room"))?; - // } + let participant = room_participant::Entity::find() + .filter( + room_participant::Column::UserId + .eq(user_id) + .and(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not decline call"))?; + let room_id = participant.room_id; - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + return Err(anyhow!("declining call on unexpected room"))?; + } + + room_participant::Entity::delete(participant.into_active_model()) + .exec(&tx) + .await?; + + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1075,24 +1065,30 @@ impl Database { called_user_id: UserId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let room_id = sqlx::query_scalar( - // " - // DELETE FROM room_participants - // WHERE user_id = $1 AND calling_connection_id = $2 AND answering_connection_id IS NULL - // RETURNING room_id - // ", - // ) - // .bind(called_user_id) - // .bind(calling_connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; - // if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - // return Err(anyhow!("canceling call on unexpected room"))?; - // } + let participant = room_participant::Entity::find() + .filter( + room_participant::Column::UserId + .eq(called_user_id) + .and( + room_participant::Column::CallingConnectionId + .eq(calling_connection_id.0 as i32), + ) + .and(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("could not cancel call"))?; + let room_id = participant.room_id; + if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { + return Err(anyhow!("canceling call on unexpected room"))?; + } - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + room_participant::Entity::delete(participant.into_active_model()) + .exec(&tx) + .await?; + + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, room).await }) .await } @@ -1104,23 +1100,25 @@ impl Database { connection_id: ConnectionId, ) -> Result> { self.transact(|tx| async move { - todo!() - // sqlx::query( - // " - // UPDATE room_participants - // SET answering_connection_id = $1 - // WHERE room_id = $2 AND user_id = $3 - // RETURNING 1 - // ", - // ) - // .bind(connection_id.0 as i32) - // .bind(room_id) - // .bind(user_id) - // .fetch_one(&mut tx) - // .await?; - - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + let result = room_participant::Entity::update_many() + .filter( + room_participant::Column::RoomId + .eq(room_id) + .and(room_participant::Column::UserId.eq(user_id)) + .and(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .col_expr( + room_participant::Column::AnsweringConnectionId, + connection_id.0.into(), + ) + .exec(&tx) + .await?; + if result.rows_affected == 0 { + Err(anyhow!("room does not exist or was already joined"))? + } else { + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, room).await + } }) .await } @@ -1130,124 +1128,117 @@ impl Database { connection_id: ConnectionId, ) -> Result>> { self.transact(|tx| async move { - todo!() - // // Leave room. - // let room_id = sqlx::query_scalar::<_, RoomId>( - // " - // DELETE FROM room_participants - // WHERE answering_connection_id = $1 - // RETURNING room_id - // ", - // ) - // .bind(connection_id.0 as i32) - // .fetch_optional(&mut tx) - // .await?; + let leaving_participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .one(&tx) + .await?; - // if let Some(room_id) = room_id { - // // Cancel pending calls initiated by the leaving user. - // let canceled_calls_to_user_ids: Vec = sqlx::query_scalar( - // " - // DELETE FROM room_participants - // WHERE calling_connection_id = $1 AND answering_connection_id IS NULL - // RETURNING user_id - // ", - // ) - // .bind(connection_id.0 as i32) - // .fetch_all(&mut tx) - // .await?; + if let Some(leaving_participant) = leaving_participant { + // Leave room. + let room_id = leaving_participant.room_id; + room_participant::Entity::delete_by_id(leaving_participant.id) + .exec(&tx) + .await?; - // let project_ids = sqlx::query_scalar::<_, ProjectId>( - // " - // SELECT project_id - // FROM project_collaborators - // WHERE connection_id = $1 - // ", - // ) - // .bind(connection_id.0 as i32) - // .fetch_all(&mut tx) - // .await?; + // Cancel pending calls initiated by the leaving user. + let called_participants = room_participant::Entity::find() + .filter( + room_participant::Column::CallingConnectionId + .eq(connection_id.0) + .and(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .all(&tx) + .await?; + room_participant::Entity::delete_many() + .filter( + room_participant::Column::Id + .is_in(called_participants.iter().map(|participant| participant.id)), + ) + .exec(&tx) + .await?; + let canceled_calls_to_user_ids = called_participants + .into_iter() + .map(|participant| participant.user_id) + .collect(); - // // Leave projects. - // let mut left_projects = HashMap::default(); - // if !project_ids.is_empty() { - // let mut params = "?,".repeat(project_ids.len()); - // params.pop(); - // let query = format!( - // " - // SELECT * - // FROM project_collaborators - // WHERE project_id IN ({params}) - // " - // ); - // let mut query = sqlx::query_as::<_, ProjectCollaborator>(&query); - // for project_id in project_ids { - // query = query.bind(project_id); - // } + // Detect left projects. + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryProjectIds { + ProjectId, + } + let project_ids: Vec = project_collaborator::Entity::find() + .select_only() + .column_as( + project_collaborator::Column::ProjectId, + QueryProjectIds::ProjectId, + ) + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .into_values::<_, QueryProjectIds>() + .all(&tx) + .await?; + let mut left_projects = HashMap::default(); + let mut collaborators = project_collaborator::Entity::find() + .filter(project_collaborator::Column::ProjectId.is_in(project_ids)) + .stream(&tx) + .await?; + while let Some(collaborator) = collaborators.next().await { + let collaborator = collaborator?; + let left_project = + left_projects + .entry(collaborator.project_id) + .or_insert(LeftProject { + id: collaborator.project_id, + host_user_id: Default::default(), + connection_ids: Default::default(), + host_connection_id: Default::default(), + }); - // let mut project_collaborators = query.fetch(&mut tx); - // while let Some(collaborator) = project_collaborators.next().await { - // let collaborator = collaborator?; - // let left_project = - // left_projects - // .entry(collaborator.project_id) - // .or_insert(LeftProject { - // id: collaborator.project_id, - // host_user_id: Default::default(), - // connection_ids: Default::default(), - // host_connection_id: Default::default(), - // }); + let collaborator_connection_id = + ConnectionId(collaborator.connection_id as u32); + if collaborator_connection_id != connection_id { + left_project.connection_ids.push(collaborator_connection_id); + } - // let collaborator_connection_id = - // ConnectionId(collaborator.connection_id as u32); - // if collaborator_connection_id != connection_id { - // left_project.connection_ids.push(collaborator_connection_id); - // } + if collaborator.is_host { + left_project.host_user_id = collaborator.user_id; + left_project.host_connection_id = + ConnectionId(collaborator.connection_id as u32); + } + } + drop(collaborators); - // if collaborator.is_host { - // left_project.host_user_id = collaborator.user_id; - // left_project.host_connection_id = - // ConnectionId(collaborator.connection_id as u32); - // } - // } - // } - // sqlx::query( - // " - // DELETE FROM project_collaborators - // WHERE connection_id = $1 - // ", - // ) - // .bind(connection_id.0 as i32) - // .execute(&mut tx) - // .await?; + // Leave projects. + project_collaborator::Entity::delete_many() + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .exec(&tx) + .await?; - // // Unshare projects. - // sqlx::query( - // " - // DELETE FROM projects - // WHERE room_id = $1 AND host_connection_id = $2 - // ", - // ) - // .bind(room_id) - // .bind(connection_id.0 as i32) - // .execute(&mut tx) - // .await?; + // Unshare projects. + project::Entity::delete_many() + .filter( + project::Column::RoomId + .eq(room_id) + .and(project::Column::HostConnectionId.eq(connection_id.0)), + ) + .exec(&tx) + .await?; - // let room = self.get_room(room_id, &mut tx).await?; - // Ok(Some( - // self.commit_room_transaction( - // room_id, - // tx, - // LeftRoom { - // room, - // left_projects, - // canceled_calls_to_user_ids, - // }, - // ) - // .await?, - // )) - // } else { - // Ok(None) - // } + let room = self.get_room(room_id, &tx).await?; + Ok(Some( + self.commit_room_transaction( + room_id, + tx, + LeftRoom { + room, + left_projects, + canceled_calls_to_user_ids, + }, + ) + .await?, + )) + } else { + Ok(None) + } }) .await } @@ -1259,46 +1250,48 @@ impl Database { location: proto::ParticipantLocation, ) -> Result> { self.transact(|tx| async { - todo!() - // let mut tx = tx; - // let location_kind; - // let location_project_id; - // match location - // .variant - // .as_ref() - // .ok_or_else(|| anyhow!("invalid location"))? - // { - // proto::participant_location::Variant::SharedProject(project) => { - // location_kind = 0; - // location_project_id = Some(ProjectId::from_proto(project.id)); - // } - // proto::participant_location::Variant::UnsharedProject(_) => { - // location_kind = 1; - // location_project_id = None; - // } - // proto::participant_location::Variant::External(_) => { - // location_kind = 2; - // location_project_id = None; - // } - // } + let mut tx = tx; + let location_kind; + let location_project_id; + match location + .variant + .as_ref() + .ok_or_else(|| anyhow!("invalid location"))? + { + proto::participant_location::Variant::SharedProject(project) => { + location_kind = 0; + location_project_id = Some(ProjectId::from_proto(project.id)); + } + proto::participant_location::Variant::UnsharedProject(_) => { + location_kind = 1; + location_project_id = None; + } + proto::participant_location::Variant::External(_) => { + location_kind = 2; + location_project_id = None; + } + } - // sqlx::query( - // " - // UPDATE room_participants - // SET location_kind = $1, location_project_id = $2 - // WHERE room_id = $3 AND answering_connection_id = $4 - // RETURNING 1 - // ", - // ) - // .bind(location_kind) - // .bind(location_project_id) - // .bind(room_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + let result = room_participant::Entity::update_many() + .filter( + room_participant::Column::RoomId + .eq(room_id) + .and(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)), + ) + .set(room_participant::ActiveModel { + location_kind: ActiveValue::set(Some(location_kind)), + location_project_id: ActiveValue::set(location_project_id), + ..Default::default() + }) + .exec(&tx) + .await?; - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, room).await + if result.rows_affected == 1 { + let room = self.get_room(room_id, &mut tx).await?; + self.commit_room_transaction(room_id, tx, room).await + } else { + Err(anyhow!("could not update room participant location"))? + } }) .await } From 62624b81d88ae2661125f912a598d3feccddbb5b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 16:10:37 +0100 Subject: [PATCH 096/240] Avoid using `col_expr` whenever possible ...and use the more type-safe `::set`. --- crates/collab/src/db.rs | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index bb1bff7ff8..dce217d955 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -246,7 +246,10 @@ impl Database { self.transact(|tx| async move { user::Entity::update_many() .filter(user::Column::Id.eq(id)) - .col_expr(user::Column::Admin, is_admin.into()) + .set(user::ActiveModel { + admin: ActiveValue::set(is_admin), + ..Default::default() + }) .exec(&tx) .await?; tx.commit().await?; @@ -259,7 +262,10 @@ impl Database { self.transact(|tx| async move { user::Entity::update_many() .filter(user::Column::Id.eq(id)) - .col_expr(user::Column::ConnectedOnce, connected_once.into()) + .set(user::ActiveModel { + connected_once: ActiveValue::set(connected_once), + ..Default::default() + }) .exec(&tx) .await?; tx.commit().await?; @@ -674,7 +680,10 @@ impl Database { self.transact(|tx| async { signup::Entity::update_many() .filter(signup::Column::EmailAddress.is_in(emails.iter().copied())) - .col_expr(signup::Column::EmailConfirmationSent, true.into()) + .set(signup::ActiveModel { + email_confirmation_sent: ActiveValue::set(true), + ..Default::default() + }) .exec(&tx) .await?; tx.commit().await?; @@ -876,14 +885,20 @@ impl Database { .eq(id) .and(user::Column::InviteCode.is_null()), ) - .col_expr(user::Column::InviteCode, random_invite_code().into()) + .set(user::ActiveModel { + invite_code: ActiveValue::set(Some(random_invite_code())), + ..Default::default() + }) .exec(&tx) .await?; } user::Entity::update_many() .filter(user::Column::Id.eq(id)) - .col_expr(user::Column::InviteCount, count.into()) + .set(user::ActiveModel { + invite_count: ActiveValue::set(count as i32), + ..Default::default() + }) .exec(&tx) .await?; tx.commit().await?; @@ -1107,10 +1122,10 @@ impl Database { .and(room_participant::Column::UserId.eq(user_id)) .and(room_participant::Column::AnsweringConnectionId.is_null()), ) - .col_expr( - room_participant::Column::AnsweringConnectionId, - connection_id.0.into(), - ) + .set(room_participant::ActiveModel { + answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + ..Default::default() + }) .exec(&tx) .await?; if result.rows_affected == 0 { From e3ac67784a8131f8c56212f201bddd57f4ea0a75 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 16:23:29 +0100 Subject: [PATCH 097/240] Implement `Database::project_guest_connection_ids` --- crates/collab/src/db.rs | 54 ++++++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index dce217d955..96ca4e9530 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1311,28 +1311,6 @@ impl Database { .await } - async fn get_guest_connection_ids( - &self, - project_id: ProjectId, - tx: &DatabaseTransaction, - ) -> Result> { - todo!() - // let mut guest_connection_ids = Vec::new(); - // let mut db_guest_connection_ids = sqlx::query_scalar::<_, i32>( - // " - // SELECT connection_id - // FROM project_collaborators - // WHERE project_id = $1 AND is_host = FALSE - // ", - // ) - // .bind(project_id) - // .fetch(tx); - // while let Some(connection_id) = db_guest_connection_ids.next().await { - // guest_connection_ids.push(ConnectionId(connection_id? as u32)); - // } - // Ok(guest_connection_ids) - } - fn build_incoming_call( room: &proto::Room, called_user_id: UserId, @@ -2194,6 +2172,38 @@ impl Database { .await } + async fn project_guest_connection_ids( + &self, + project_id: ProjectId, + tx: &DatabaseTransaction, + ) -> Result> { + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + ConnectionId, + } + + let mut db_guest_connection_ids = project_collaborator::Entity::find() + .select_only() + .column_as( + project_collaborator::Column::ConnectionId, + QueryAs::ConnectionId, + ) + .filter( + project_collaborator::Column::ProjectId + .eq(project_id) + .and(project_collaborator::Column::IsHost.eq(false)), + ) + .into_values::() + .stream(tx) + .await?; + + let mut guest_connection_ids = Vec::new(); + while let Some(connection_id) = db_guest_connection_ids.next().await { + guest_connection_ids.push(ConnectionId(connection_id? as u32)); + } + Ok(guest_connection_ids) + } + // access tokens pub async fn create_access_token_hash( From 944d6554deb85dcb8ab14d1a05d4b0f77b707230 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 16:26:13 +0100 Subject: [PATCH 098/240] Implement `Database::unshare_project` --- crates/collab/src/db.rs | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 96ca4e9530..fc377ff7ac 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1527,22 +1527,23 @@ impl Database { connection_id: ConnectionId, ) -> Result)>> { self.transact(|tx| async move { - todo!() - // let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - // let room_id: RoomId = sqlx::query_scalar( - // " - // DELETE FROM projects - // WHERE id = $1 AND host_connection_id = $2 - // RETURNING room_id - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) - // .await + let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; + + let project = project::Entity::find_by_id(project_id) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("project not found"))?; + if project.host_connection_id == connection_id.0 as i32 { + let room_id = project.room_id; + project::Entity::delete(project.into_active_model()) + .exec(&tx) + .await?; + let room = self.get_room(room_id, &tx).await?; + self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) + .await + } else { + Err(anyhow!("cannot unshare a project hosted by another user"))? + } }) .await } From cfdf0a57b8f4915018135a31309c53e1765bd8c3 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 17:36:36 +0100 Subject: [PATCH 099/240] Implement `Database::update_project` --- crates/collab/src/db.rs | 172 ++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 103 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index fc377ff7ac..971a8cd612 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1555,73 +1555,40 @@ impl Database { worktrees: &[proto::WorktreeMetadata], ) -> Result)>> { self.transact(|tx| async move { - todo!() - // let room_id: RoomId = sqlx::query_scalar( - // " - // SELECT room_id - // FROM projects - // WHERE id = $1 AND host_connection_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + let project = project::Entity::find_by_id(project_id) + .filter(project::Column::HostConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; - // if !worktrees.is_empty() { - // let mut params = "(?, ?, ?, ?, ?, ?, ?),".repeat(worktrees.len()); - // params.pop(); - // let query = format!( - // " - // INSERT INTO worktrees ( - // project_id, - // id, - // root_name, - // abs_path, - // visible, - // scan_id, - // is_complete - // ) - // VALUES {params} - // ON CONFLICT (project_id, id) DO UPDATE SET root_name = excluded.root_name - // " - // ); + worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i32), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + })) + .exec(&tx) + .await?; + worktree::Entity::delete_many() + .filter( + worktree::Column::ProjectId.eq(project.id).and( + worktree::Column::Id.is_not_in( + worktrees + .iter() + .map(|worktree| WorktreeId(worktree.id as i32)), + ), + ), + ) + .exec(&tx) + .await?; - // let mut query = sqlx::query(&query); - // for worktree in worktrees { - // query = query - // .bind(project_id) - // .bind(worktree.id as i32) - // .bind(&worktree.root_name) - // .bind(&worktree.abs_path) - // .bind(worktree.visible) - // .bind(0) - // .bind(false) - // } - // query.execute(&mut tx).await?; - // } - - // let mut params = "?,".repeat(worktrees.len()); - // if !worktrees.is_empty() { - // params.pop(); - // } - // let query = format!( - // " - // DELETE FROM worktrees - // WHERE project_id = ? AND id NOT IN ({params}) - // ", - // ); - - // let mut query = sqlx::query(&query).bind(project_id); - // for worktree in worktrees { - // query = query.bind(WorktreeId(worktree.id as i32)); - // } - // query.execute(&mut tx).await?; - - // let guest_connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - // let room = self.get_room(room_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) - // .await + let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?; + let room = self.get_room(project.room_id, &tx).await?; + self.commit_room_transaction(project.room_id, tx, (room, guest_connection_ids)) + .await }) .await } @@ -2119,26 +2086,19 @@ impl Database { connection_id: ConnectionId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let collaborators = sqlx::query_as::<_, ProjectCollaborator>( - // " - // SELECT * - // FROM project_collaborators - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await?; + let collaborators = project_collaborator::Entity::find() + .filter(project_collaborator::Column::ProjectId.eq(project_id)) + .all(&tx) + .await?; - // if collaborators - // .iter() - // .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) - // { - // Ok(collaborators) - // } else { - // Err(anyhow!("no such project"))? - // } + if collaborators + .iter() + .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) + { + Ok(collaborators) + } else { + Err(anyhow!("no such project"))? + } }) .await } @@ -2149,26 +2109,32 @@ impl Database { connection_id: ConnectionId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let connection_ids = sqlx::query_scalar::<_, i32>( - // " - // SELECT connection_id - // FROM project_collaborators - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await?; + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + ConnectionId, + } - // if connection_ids.contains(&(connection_id.0 as i32)) { - // Ok(connection_ids - // .into_iter() - // .map(|connection_id| ConnectionId(connection_id as u32)) - // .collect()) - // } else { - // Err(anyhow!("no such project"))? - // } + let mut db_connection_ids = project_collaborator::Entity::find() + .select_only() + .column_as( + project_collaborator::Column::ConnectionId, + QueryAs::ConnectionId, + ) + .filter(project_collaborator::Column::ProjectId.eq(project_id)) + .into_values::() + .stream(&tx) + .await?; + + let mut connection_ids = HashSet::default(); + while let Some(connection_id) = db_connection_ids.next().await { + connection_ids.insert(ConnectionId(connection_id? as u32)); + } + + if connection_ids.contains(&connection_id) { + Ok(connection_ids) + } else { + Err(anyhow!("no such project"))? + } }) .await } From 29a4baf3469e38e1dd77aaad0f2b07e2a11830c9 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 17:47:51 +0100 Subject: [PATCH 100/240] Replace i32 with u32 for database columns We never expect to return signed integers and so we shouldn't use a signed type. I think this was a limitation of sqlx. --- crates/collab/src/api.rs | 10 ++--- crates/collab/src/db.rs | 46 ++++++++++---------- crates/collab/src/db/project.rs | 2 +- crates/collab/src/db/project_collaborator.rs | 2 +- crates/collab/src/db/room_participant.rs | 6 +-- crates/collab/src/db/tests.rs | 2 +- crates/collab/src/db/user.rs | 4 +- crates/collab/src/db/worktree.rs | 7 ++- crates/collab/src/integration_tests.rs | 2 +- crates/collab/src/rpc.rs | 5 +-- 10 files changed, 41 insertions(+), 45 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index bf183edf54..a055494791 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -76,7 +76,7 @@ pub async fn validate_api_token(req: Request, next: Next) -> impl IntoR #[derive(Debug, Deserialize)] struct AuthenticatedUserParams { - github_user_id: Option, + github_user_id: Option, github_login: String, } @@ -123,14 +123,14 @@ async fn get_users( #[derive(Deserialize, Debug)] struct CreateUserParams { - github_user_id: i32, + github_user_id: u32, github_login: String, email_address: String, email_confirmation_code: Option, #[serde(default)] admin: bool, #[serde(default)] - invite_count: i32, + invite_count: u32, } #[derive(Serialize, Debug)] @@ -208,7 +208,7 @@ struct UpdateUserParams { } async fn update_user( - Path(user_id): Path, + Path(user_id): Path, Json(params): Json, Extension(app): Extension>, Extension(rpc_server): Extension>, @@ -230,7 +230,7 @@ async fn update_user( } async fn destroy_user( - Path(user_id): Path, + Path(user_id): Path, Extension(app): Extension>, ) -> Result<()> { app.db.destroy_user(UserId(user_id)).await?; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 971a8cd612..31ee381857 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -156,7 +156,7 @@ impl Database { pub async fn get_user_by_github_account( &self, github_login: &str, - github_user_id: Option, + github_user_id: Option, ) -> Result> { self.transact(|tx| async { let tx = tx; @@ -896,7 +896,7 @@ impl Database { user::Entity::update_many() .filter(user::Column::Id.eq(id)) .set(user::ActiveModel { - invite_count: ActiveValue::set(count as i32), + invite_count: ActiveValue::set(count), ..Default::default() }) .exec(&tx) @@ -979,9 +979,9 @@ impl Database { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(user_id), - answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + answering_connection_id: ActiveValue::set(Some(connection_id.0)), calling_user_id: ActiveValue::set(user_id), - calling_connection_id: ActiveValue::set(connection_id.0 as i32), + calling_connection_id: ActiveValue::set(connection_id.0), ..Default::default() } .insert(&tx) @@ -1006,7 +1006,7 @@ impl Database { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(called_user_id), calling_user_id: ActiveValue::set(calling_user_id), - calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), + calling_connection_id: ActiveValue::set(calling_connection_id.0), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } @@ -1123,7 +1123,7 @@ impl Database { .and(room_participant::Column::AnsweringConnectionId.is_null()), ) .set(room_participant::ActiveModel { - answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + answering_connection_id: ActiveValue::set(Some(connection_id.0)), ..Default::default() }) .exec(&tx) @@ -1485,14 +1485,14 @@ impl Database { let project = project::ActiveModel { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection_id.0 as i32), + host_connection_id: ActiveValue::set(connection_id.0), ..Default::default() } .insert(&tx) .await?; worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i32), + id: ActiveValue::set(WorktreeId(worktree.id as u32)), project_id: ActiveValue::set(project.id), abs_path: ActiveValue::set(worktree.abs_path.clone()), root_name: ActiveValue::set(worktree.root_name.clone()), @@ -1505,7 +1505,7 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), - connection_id: ActiveValue::set(connection_id.0 as i32), + connection_id: ActiveValue::set(connection_id.0), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1533,7 +1533,7 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("project not found"))?; - if project.host_connection_id == connection_id.0 as i32 { + if project.host_connection_id == connection_id.0 { let room_id = project.room_id; project::Entity::delete(project.into_active_model()) .exec(&tx) @@ -1562,7 +1562,7 @@ impl Database { .ok_or_else(|| anyhow!("no such project"))?; worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i32), + id: ActiveValue::set(WorktreeId(worktree.id as u32)), project_id: ActiveValue::set(project.id), abs_path: ActiveValue::set(worktree.abs_path.clone()), root_name: ActiveValue::set(worktree.root_name.clone()), @@ -1578,7 +1578,7 @@ impl Database { worktree::Column::Id.is_not_in( worktrees .iter() - .map(|worktree| WorktreeId(worktree.id as i32)), + .map(|worktree| WorktreeId(worktree.id as u32)), ), ), ) @@ -2093,7 +2093,7 @@ impl Database { if collaborators .iter() - .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) + .any(|collaborator| collaborator.connection_id == connection_id.0) { Ok(collaborators) } else { @@ -2307,8 +2307,8 @@ impl DerefMut for RoomGuard { #[derive(Debug, Serialize, Deserialize)] pub struct NewUserParams { pub github_login: String, - pub github_user_id: i32, - pub invite_count: i32, + pub github_user_id: u32, + pub invite_count: u32, } #[derive(Debug)] @@ -2339,21 +2339,19 @@ macro_rules! id_type { PartialOrd, Ord, Hash, - sqlx::Type, Serialize, Deserialize, )] - #[sqlx(transparent)] #[serde(transparent)] - pub struct $name(pub i32); + pub struct $name(pub u32); impl $name { #[allow(unused)] - pub const MAX: Self = Self(i32::MAX); + pub const MAX: Self = Self(u32::MAX); #[allow(unused)] pub fn from_proto(value: u64) -> Self { - Self(value as i32) + Self(value as u32) } #[allow(unused)] @@ -2370,7 +2368,7 @@ macro_rules! id_type { impl From<$name> for sea_query::Value { fn from(value: $name) -> Self { - sea_query::Value::Int(Some(value.0)) + sea_query::Value::Unsigned(Some(value.0)) } } @@ -2380,7 +2378,7 @@ macro_rules! id_type { pre: &str, col: &str, ) -> Result { - Ok(Self(i32::try_get(res, pre, col)?)) + Ok(Self(u32::try_get(res, pre, col)?)) } } @@ -2420,11 +2418,11 @@ macro_rules! id_type { } fn array_type() -> sea_query::ArrayType { - sea_query::ArrayType::Int + sea_query::ArrayType::Unsigned } fn column_type() -> sea_query::ColumnType { - sea_query::ColumnType::Integer(None) + sea_query::ColumnType::Unsigned(None) } } diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index a9f0d1cb47..c8083402a3 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -8,7 +8,7 @@ pub struct Model { pub id: ProjectId, pub room_id: RoomId, pub host_user_id: UserId, - pub host_connection_id: i32, + pub host_connection_id: u32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index fb1d565e3a..bccf451a63 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -7,7 +7,7 @@ pub struct Model { #[sea_orm(primary_key)] pub id: ProjectCollaboratorId, pub project_id: ProjectId, - pub connection_id: i32, + pub connection_id: u32, pub user_id: UserId, pub replica_id: ReplicaId, pub is_host: bool, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index c7c804581b..e8f38cf693 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -8,12 +8,12 @@ pub struct Model { pub id: RoomParticipantId, pub room_id: RoomId, pub user_id: UserId, - pub answering_connection_id: Option, - pub location_kind: Option, + pub answering_connection_id: Option, + pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, pub calling_user_id: UserId, - pub calling_connection_id: i32, + pub calling_connection_id: u32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index b276bd5057..6ca287746a 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -430,7 +430,7 @@ async fn test_fuzzy_search_users() { false, NewUserParams { github_login: github_login.into(), - github_user_id: i as i32, + github_user_id: i as u32, invite_count: 0, }, ) diff --git a/crates/collab/src/db/user.rs b/crates/collab/src/db/user.rs index c2b157bd0a..9929233044 100644 --- a/crates/collab/src/db/user.rs +++ b/crates/collab/src/db/user.rs @@ -8,11 +8,11 @@ pub struct Model { #[sea_orm(primary_key)] pub id: UserId, pub github_login: String, - pub github_user_id: Option, + pub github_user_id: Option, pub email_address: Option, pub admin: bool, pub invite_code: Option, - pub invite_count: i32, + pub invite_count: u32, pub inviter_id: Option, pub connected_once: bool, pub metrics_id: Uuid, diff --git a/crates/collab/src/db/worktree.rs b/crates/collab/src/db/worktree.rs index 3c6f7c0c1d..8cad41e8a9 100644 --- a/crates/collab/src/db/worktree.rs +++ b/crates/collab/src/db/worktree.rs @@ -1,18 +1,17 @@ +use super::{ProjectId, WorktreeId}; use sea_orm::entity::prelude::*; -use super::ProjectId; - #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "worktrees")] pub struct Model { #[sea_orm(primary_key)] - pub id: i32, + pub id: WorktreeId, #[sea_orm(primary_key)] pub project_id: ProjectId, pub abs_path: String, pub root_name: String, pub visible: bool, - pub scan_id: i64, + pub scan_id: u32, pub is_complete: bool, } diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 225501c71d..c554028407 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -5383,7 +5383,7 @@ async fn test_random_collaboration( false, NewUserParams { github_login: username.clone(), - github_user_id: (ix + 1) as i32, + github_user_id: (ix + 1) as u32, invite_count: 0, }, ) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index beefe54a9d..01866b074d 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1008,7 +1008,7 @@ async fn join_project( let collaborators = project .collaborators .iter() - .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) + .filter(|collaborator| collaborator.connection_id != session.connection_id.0) .map(|collaborator| proto::Collaborator { peer_id: collaborator.connection_id as u32, replica_id: collaborator.replica_id.0 as u32, @@ -1313,8 +1313,7 @@ async fn save_buffer( .await .project_collaborators(project_id, session.connection_id) .await?; - collaborators - .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); + collaborators.retain(|collaborator| collaborator.connection_id != session.connection_id.0); let project_connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); From 585ac3e1beb6aea75f929e7e80116b4c081acfa0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 1 Dec 2022 18:39:24 +0100 Subject: [PATCH 101/240] WIP --- crates/collab/src/db.rs | 60 +++++++++++--------------- crates/collab/src/db/worktree_entry.rs | 23 ++++++++++ 2 files changed, 47 insertions(+), 36 deletions(-) create mode 100644 crates/collab/src/db/worktree_entry.rs diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 31ee381857..3d828b2e79 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -9,6 +9,7 @@ mod signup; mod tests; mod user; mod worktree; +mod worktree_entry; use crate::{Error, Result}; use anyhow::anyhow; @@ -1599,44 +1600,28 @@ impl Database { connection_id: ConnectionId, ) -> Result>> { self.transact(|tx| async move { - todo!() - // let project_id = ProjectId::from_proto(update.project_id); - // let worktree_id = WorktreeId::from_proto(update.worktree_id); + let project_id = ProjectId::from_proto(update.project_id); + let worktree_id = WorktreeId::from_proto(update.worktree_id); - // // Ensure the update comes from the host. - // let room_id: RoomId = sqlx::query_scalar( - // " - // SELECT room_id - // FROM projects - // WHERE id = $1 AND host_connection_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + // Ensure the update comes from the host. + let project = project::Entity::find_by_id(project_id) + .filter(project::Column::HostConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; - // // Update metadata. - // sqlx::query( - // " - // UPDATE worktrees - // SET - // root_name = $1, - // scan_id = $2, - // is_complete = $3, - // abs_path = $4 - // WHERE project_id = $5 AND id = $6 - // RETURNING 1 - // ", - // ) - // .bind(&update.root_name) - // .bind(update.scan_id as i64) - // .bind(update.is_last_update) - // .bind(&update.abs_path) - // .bind(project_id) - // .bind(worktree_id) - // .fetch_one(&mut tx) - // .await?; + // Update metadata. + worktree::Entity::update(worktree::ActiveModel { + id: ActiveValue::set(worktree_id), + project_id: ActiveValue::set(project_id), + root_name: ActiveValue::set(update.root_name.clone()), + scan_id: ActiveValue::set(update.scan_id as u32), + is_complete: ActiveValue::set(update.is_last_update), + abs_path: ActiveValue::set(update.abs_path.clone()), + ..Default::default() + }) + .exec(&tx) + .await?; // if !update.updated_entries.is_empty() { // let mut params = @@ -1706,6 +1691,8 @@ impl Database { // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; // self.commit_room_transaction(room_id, tx, connection_ids) // .await + + todo!() }) .await } @@ -2456,6 +2443,7 @@ id_type!(ReplicaId); id_type!(SignupId); id_type!(UserId); id_type!(WorktreeId); +id_type!(WorktreeEntryId); pub struct LeftRoom { pub room: proto::Room, diff --git a/crates/collab/src/db/worktree_entry.rs b/crates/collab/src/db/worktree_entry.rs new file mode 100644 index 0000000000..8698d844c1 --- /dev/null +++ b/crates/collab/src/db/worktree_entry.rs @@ -0,0 +1,23 @@ +use super::{ProjectId, WorktreeEntryId, WorktreeId}; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "worktree_entries")] +pub struct Model { + #[sea_orm(primary_key)] + project_id: ProjectId, + #[sea_orm(primary_key)] + worktree_id: WorktreeId, + #[sea_orm(primary_key)] + id: WorktreeEntryId, + is_dir: bool, + path: String, + inode: u64, + mtime_seconds: u64, + mtime_nanos: u32, + is_symlink: bool, + is_ignored: bool, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} From 239a04ea5bfc5c318881a7b8c33e7e211af13aa4 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 00:31:16 -0500 Subject: [PATCH 102/240] Add test that should have exercised tab reordering while following Except it doesn't, it passes both with and without the prior commit. Investigate further --- crates/collab/src/integration_tests.rs | 121 +++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3e0b2171a8..7115ed6c60 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -4953,6 +4953,127 @@ async fn test_following( ); } +#[gpui::test] +async fn test_following_tab_order( + deterministic: Arc, + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, +) { + cx_a.update(editor::init); + cx_b.update(editor::init); + + let mut server = TestServer::start(cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + let active_call_a = cx_a.read(ActiveCall::global); + let active_call_b = cx_b.read(ActiveCall::global); + + client_a + .fs + .insert_tree( + "/a", + json!({ + "1.txt": "one", + "2.txt": "two", + "3.txt": "three", + }), + ) + .await; + let (project_a, worktree_id) = client_a.build_local_project("/a", cx_a).await; + active_call_a + .update(cx_a, |call, cx| call.set_location(Some(&project_a), cx)) + .await + .unwrap(); + + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + let project_b = client_b.build_remote_project(project_id, cx_b).await; + active_call_b + .update(cx_b, |call, cx| call.set_location(Some(&project_b), cx)) + .await + .unwrap(); + + let workspace_a = client_a.build_workspace(&project_a, cx_a); + let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); + + let workspace_b = client_b.build_workspace(&project_b, cx_b); + let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); + + let client_b_id = project_a.read_with(cx_a, |project, _| { + project.collaborators().values().next().unwrap().peer_id + }); + + //Open 1, 3 in that order on client A + workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), None, true, cx) + }) + .await + .unwrap(); + workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "3.txt"), None, true, cx) + }) + .await + .unwrap(); + + let pane_paths = |pane: &ViewHandle, cx: &mut TestAppContext| { + pane.update(cx, |pane, cx| { + pane.items() + .map(|item| { + item.project_path(cx) + .unwrap() + .path + .to_str() + .unwrap() + .to_owned() + }) + .collect::>() + }) + }; + + //Verify that the tabs opened in the order we expect + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt"]); + + //Open just 2 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), None, true, cx) + }) + .await + .unwrap(); + + //Follow client B as client A + workspace_a + .update(cx_a, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(client_b_id), cx) + .unwrap() + }) + .await + .unwrap(); + + // Verify that newly opened followed file is at the end + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); + + //Open just 1 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), None, true, cx) + }) + .await + .unwrap(); + assert_eq!(&pane_paths(&pane_b, cx_b), &["2.txt", "1.txt"]); + + // Verify that following into 1 did not reorder + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); +} + #[gpui::test(iterations = 10)] async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { cx_a.foreground().forbid_parking(); From dec5f37e4e4f13abb33cc5717f58390496bcf32c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 13:58:23 +0100 Subject: [PATCH 103/240] Finish porting remaining db methods to sea-orm --- .../20221109000000_test_schema.sql | 2 + .../20221111092550_reconnection_support.sql | 6 +- crates/collab/src/db.rs | 687 ++++++++---------- crates/collab/src/db/language_server.rs | 30 + crates/collab/src/db/project.rs | 20 +- crates/collab/src/db/project_collaborator.rs | 15 +- crates/collab/src/db/worktree.rs | 6 +- .../src/db/worktree_diagnostic_summary.rs | 21 + crates/collab/src/db/worktree_entry.rs | 24 +- crates/collab/src/rpc.rs | 6 +- 10 files changed, 399 insertions(+), 418 deletions(-) create mode 100644 crates/collab/src/db/language_server.rs create mode 100644 crates/collab/src/db/worktree_diagnostic_summary.rs diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index aeb6b7f720..e62f834fbf 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -72,6 +72,7 @@ CREATE TABLE "worktree_entries" ( PRIMARY KEY(project_id, worktree_id, id), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); +CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id"); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( @@ -84,6 +85,7 @@ CREATE TABLE "worktree_diagnostic_summaries" ( PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); +CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id"); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE TABLE "language_servers" ( diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index b742f8e0cd..a7d45a9759 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -22,18 +22,19 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( "project_id" INTEGER NOT NULL, - "worktree_id" INTEGER NOT NULL, + "worktree_id" INT8 NOT NULL, "id" INTEGER NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INT8 NOT NULL, - "mtime_seconds" INTEGER NOT NULL, + "mtime_seconds" INT8 NOT NULL, "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, PRIMARY KEY(project_id, worktree_id, id), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); +CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id"); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE TABLE "worktree_diagnostic_summaries" ( @@ -46,6 +47,7 @@ CREATE TABLE "worktree_diagnostic_summaries" ( PRIMARY KEY(project_id, worktree_id, path), FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE ); +CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id"); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE TABLE "language_servers" ( diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 3d828b2e79..b01c6e7504 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,5 +1,6 @@ mod access_token; mod contact; +mod language_server; mod project; mod project_collaborator; mod room; @@ -9,6 +10,7 @@ mod signup; mod tests; mod user; mod worktree; +mod worktree_diagnostic_summary; mod worktree_entry; use crate::{Error, Result}; @@ -1493,7 +1495,7 @@ impl Database { .await?; worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(WorktreeId(worktree.id as u32)), + id: ActiveValue::set(worktree.id as i64), project_id: ActiveValue::set(project.id), abs_path: ActiveValue::set(worktree.abs_path.clone()), root_name: ActiveValue::set(worktree.root_name.clone()), @@ -1563,7 +1565,7 @@ impl Database { .ok_or_else(|| anyhow!("no such project"))?; worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(WorktreeId(worktree.id as u32)), + id: ActiveValue::set(worktree.id as i64), project_id: ActiveValue::set(project.id), abs_path: ActiveValue::set(worktree.abs_path.clone()), root_name: ActiveValue::set(worktree.root_name.clone()), @@ -1576,11 +1578,8 @@ impl Database { worktree::Entity::delete_many() .filter( worktree::Column::ProjectId.eq(project.id).and( - worktree::Column::Id.is_not_in( - worktrees - .iter() - .map(|worktree| WorktreeId(worktree.id as u32)), - ), + worktree::Column::Id + .is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)), ), ) .exec(&tx) @@ -1601,7 +1600,7 @@ impl Database { ) -> Result>> { self.transact(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); - let worktree_id = WorktreeId::from_proto(update.worktree_id); + let worktree_id = update.worktree_id as i64; // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) @@ -1609,13 +1608,14 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("no such project"))?; + let room_id = project.room_id; // Update metadata. worktree::Entity::update(worktree::ActiveModel { id: ActiveValue::set(worktree_id), project_id: ActiveValue::set(project_id), root_name: ActiveValue::set(update.root_name.clone()), - scan_id: ActiveValue::set(update.scan_id as u32), + scan_id: ActiveValue::set(update.scan_id as i64), is_complete: ActiveValue::set(update.is_last_update), abs_path: ActiveValue::set(update.abs_path.clone()), ..Default::default() @@ -1623,76 +1623,57 @@ impl Database { .exec(&tx) .await?; - // if !update.updated_entries.is_empty() { - // let mut params = - // "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?),".repeat(update.updated_entries.len()); - // params.pop(); + worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| { + let mtime = entry.mtime.clone().unwrap_or_default(); + worktree_entry::ActiveModel { + project_id: ActiveValue::set(project_id), + worktree_id: ActiveValue::set(worktree_id), + id: ActiveValue::set(entry.id as i64), + is_dir: ActiveValue::set(entry.is_dir), + path: ActiveValue::set(entry.path.clone()), + inode: ActiveValue::set(entry.inode as i64), + mtime_seconds: ActiveValue::set(mtime.seconds as i64), + mtime_nanos: ActiveValue::set(mtime.nanos), + is_symlink: ActiveValue::set(entry.is_symlink), + is_ignored: ActiveValue::set(entry.is_ignored), + } + })) + .on_conflict( + OnConflict::columns([ + worktree_entry::Column::ProjectId, + worktree_entry::Column::WorktreeId, + worktree_entry::Column::Id, + ]) + .update_columns([ + worktree_entry::Column::IsDir, + worktree_entry::Column::Path, + worktree_entry::Column::Inode, + worktree_entry::Column::MtimeSeconds, + worktree_entry::Column::MtimeNanos, + worktree_entry::Column::IsSymlink, + worktree_entry::Column::IsIgnored, + ]) + .to_owned(), + ) + .exec(&tx) + .await?; - // let query = format!( - // " - // INSERT INTO worktree_entries ( - // project_id, - // worktree_id, - // id, - // is_dir, - // path, - // inode, - // mtime_seconds, - // mtime_nanos, - // is_symlink, - // is_ignored - // ) - // VALUES {params} - // ON CONFLICT (project_id, worktree_id, id) DO UPDATE SET - // is_dir = excluded.is_dir, - // path = excluded.path, - // inode = excluded.inode, - // mtime_seconds = excluded.mtime_seconds, - // mtime_nanos = excluded.mtime_nanos, - // is_symlink = excluded.is_symlink, - // is_ignored = excluded.is_ignored - // " - // ); - // let mut query = sqlx::query(&query); - // for entry in &update.updated_entries { - // let mtime = entry.mtime.clone().unwrap_or_default(); - // query = query - // .bind(project_id) - // .bind(worktree_id) - // .bind(entry.id as i64) - // .bind(entry.is_dir) - // .bind(&entry.path) - // .bind(entry.inode as i64) - // .bind(mtime.seconds as i64) - // .bind(mtime.nanos as i32) - // .bind(entry.is_symlink) - // .bind(entry.is_ignored); - // } - // query.execute(&mut tx).await?; - // } + worktree_entry::Entity::delete_many() + .filter( + worktree_entry::Column::ProjectId + .eq(project_id) + .and(worktree_entry::Column::WorktreeId.eq(worktree_id)) + .and( + worktree_entry::Column::Id + .is_in(update.removed_entries.iter().map(|id| *id as i64)), + ), + ) + .exec(&tx) + .await?; - // if !update.removed_entries.is_empty() { - // let mut params = "?,".repeat(update.removed_entries.len()); - // params.pop(); - // let query = format!( - // " - // DELETE FROM worktree_entries - // WHERE project_id = ? AND worktree_id = ? AND id IN ({params}) - // " - // ); - - // let mut query = sqlx::query(&query).bind(project_id).bind(worktree_id); - // for entry_id in &update.removed_entries { - // query = query.bind(*entry_id as i64); - // } - // query.execute(&mut tx).await?; - // } - - // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, connection_ids) - // .await - - todo!() + let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; + self.commit_room_transaction(room_id, tx, connection_ids) + .await }) .await } @@ -1703,57 +1684,51 @@ impl Database { connection_id: ConnectionId, ) -> Result>> { self.transact(|tx| async { - todo!() - // let project_id = ProjectId::from_proto(update.project_id); - // let worktree_id = WorktreeId::from_proto(update.worktree_id); - // let summary = update - // .summary - // .as_ref() - // .ok_or_else(|| anyhow!("invalid summary"))?; + let project_id = ProjectId::from_proto(update.project_id); + let worktree_id = update.worktree_id as i64; + let summary = update + .summary + .as_ref() + .ok_or_else(|| anyhow!("invalid summary"))?; - // // Ensure the update comes from the host. - // let room_id: RoomId = sqlx::query_scalar( - // " - // SELECT room_id - // FROM projects - // WHERE id = $1 AND host_connection_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + // Ensure the update comes from the host. + let project = project::Entity::find_by_id(project_id) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; + if project.host_connection_id != connection_id.0 { + return Err(anyhow!("can't update a project hosted by someone else"))?; + } - // // Update summary. - // sqlx::query( - // " - // INSERT INTO worktree_diagnostic_summaries ( - // project_id, - // worktree_id, - // path, - // language_server_id, - // error_count, - // warning_count - // ) - // VALUES ($1, $2, $3, $4, $5, $6) - // ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET - // language_server_id = excluded.language_server_id, - // error_count = excluded.error_count, - // warning_count = excluded.warning_count - // ", - // ) - // .bind(project_id) - // .bind(worktree_id) - // .bind(&summary.path) - // .bind(summary.language_server_id as i64) - // .bind(summary.error_count as i32) - // .bind(summary.warning_count as i32) - // .execute(&mut tx) - // .await?; + // Update summary. + worktree_diagnostic_summary::Entity::insert(worktree_diagnostic_summary::ActiveModel { + project_id: ActiveValue::set(project_id), + worktree_id: ActiveValue::set(worktree_id), + path: ActiveValue::set(summary.path.clone()), + language_server_id: ActiveValue::set(summary.language_server_id as i64), + error_count: ActiveValue::set(summary.error_count), + warning_count: ActiveValue::set(summary.warning_count), + ..Default::default() + }) + .on_conflict( + OnConflict::columns([ + worktree_diagnostic_summary::Column::ProjectId, + worktree_diagnostic_summary::Column::WorktreeId, + worktree_diagnostic_summary::Column::Path, + ]) + .update_columns([ + worktree_diagnostic_summary::Column::LanguageServerId, + worktree_diagnostic_summary::Column::ErrorCount, + worktree_diagnostic_summary::Column::WarningCount, + ]) + .to_owned(), + ) + .exec(&tx) + .await?; - // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, connection_ids) - // .await + let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; + self.commit_room_transaction(project.room_id, tx, connection_ids) + .await }) .await } @@ -1764,44 +1739,42 @@ impl Database { connection_id: ConnectionId, ) -> Result>> { self.transact(|tx| async { - todo!() - // let project_id = ProjectId::from_proto(update.project_id); - // let server = update - // .server - // .as_ref() - // .ok_or_else(|| anyhow!("invalid language server"))?; + let project_id = ProjectId::from_proto(update.project_id); + let server = update + .server + .as_ref() + .ok_or_else(|| anyhow!("invalid language server"))?; - // // Ensure the update comes from the host. - // let room_id: RoomId = sqlx::query_scalar( - // " - // SELECT room_id - // FROM projects - // WHERE id = $1 AND host_connection_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + // Ensure the update comes from the host. + let project = project::Entity::find_by_id(project_id) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; + if project.host_connection_id != connection_id.0 { + return Err(anyhow!("can't update a project hosted by someone else"))?; + } - // // Add the newly-started language server. - // sqlx::query( - // " - // INSERT INTO language_servers (project_id, id, name) - // VALUES ($1, $2, $3) - // ON CONFLICT (project_id, id) DO UPDATE SET - // name = excluded.name - // ", - // ) - // .bind(project_id) - // .bind(server.id as i64) - // .bind(&server.name) - // .execute(&mut tx) - // .await?; + // Add the newly-started language server. + language_server::Entity::insert(language_server::ActiveModel { + project_id: ActiveValue::set(project_id), + id: ActiveValue::set(server.id as i64), + name: ActiveValue::set(server.name.clone()), + ..Default::default() + }) + .on_conflict( + OnConflict::columns([ + language_server::Column::ProjectId, + language_server::Column::Id, + ]) + .update_column(language_server::Column::Name) + .to_owned(), + ) + .exec(&tx) + .await?; - // let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; - // self.commit_room_transaction(room_id, tx, connection_ids) - // .await + let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; + self.commit_room_transaction(project.room_id, tx, connection_ids) + .await }) .await } @@ -1812,194 +1785,135 @@ impl Database { connection_id: ConnectionId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( - // " - // SELECT room_id, user_id - // FROM room_participants - // WHERE answering_connection_id = $1 - // ", - // ) - // .bind(connection_id.0 as i32) - // .fetch_one(&mut tx) - // .await?; + let participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("must join a room first"))?; - // // Ensure project id was shared on this room. - // sqlx::query( - // " - // SELECT 1 - // FROM projects - // WHERE id = $1 AND room_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(room_id) - // .fetch_one(&mut tx) - // .await?; + let project = project::Entity::find_by_id(project_id) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; + if project.room_id != participant.room_id { + return Err(anyhow!("no such project"))?; + } - // let mut collaborators = sqlx::query_as::<_, ProjectCollaborator>( - // " - // SELECT * - // FROM project_collaborators - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await?; - // let replica_ids = collaborators - // .iter() - // .map(|c| c.replica_id) - // .collect::>(); - // let mut replica_id = ReplicaId(1); - // while replica_ids.contains(&replica_id) { - // replica_id.0 += 1; - // } - // let new_collaborator = ProjectCollaborator { - // project_id, - // connection_id: connection_id.0 as i32, - // user_id, - // replica_id, - // is_host: false, - // }; + let mut collaborators = project + .find_related(project_collaborator::Entity) + .all(&tx) + .await?; + let replica_ids = collaborators + .iter() + .map(|c| c.replica_id) + .collect::>(); + let mut replica_id = ReplicaId(1); + while replica_ids.contains(&replica_id) { + replica_id.0 += 1; + } + let new_collaborator = project_collaborator::ActiveModel { + project_id: ActiveValue::set(project_id), + connection_id: ActiveValue::set(connection_id.0), + user_id: ActiveValue::set(participant.user_id), + replica_id: ActiveValue::set(replica_id), + is_host: ActiveValue::set(false), + ..Default::default() + } + .insert(&tx) + .await?; + collaborators.push(new_collaborator); - // sqlx::query( - // " - // INSERT INTO project_collaborators ( - // project_id, - // connection_id, - // user_id, - // replica_id, - // is_host - // ) - // VALUES ($1, $2, $3, $4, $5) - // ", - // ) - // .bind(new_collaborator.project_id) - // .bind(new_collaborator.connection_id) - // .bind(new_collaborator.user_id) - // .bind(new_collaborator.replica_id) - // .bind(new_collaborator.is_host) - // .execute(&mut tx) - // .await?; - // collaborators.push(new_collaborator); + let db_worktrees = project.find_related(worktree::Entity).all(&tx).await?; + let mut worktrees = db_worktrees + .into_iter() + .map(|db_worktree| { + ( + db_worktree.id as u64, + Worktree { + id: db_worktree.id as u64, + abs_path: db_worktree.abs_path, + root_name: db_worktree.root_name, + visible: db_worktree.visible, + entries: Default::default(), + diagnostic_summaries: Default::default(), + scan_id: db_worktree.scan_id as u64, + is_complete: db_worktree.is_complete, + }, + ) + }) + .collect::>(); - // let worktree_rows = sqlx::query_as::<_, WorktreeRow>( - // " - // SELECT * - // FROM worktrees - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await?; - // let mut worktrees = worktree_rows - // .into_iter() - // .map(|worktree_row| { - // ( - // worktree_row.id, - // Worktree { - // id: worktree_row.id, - // abs_path: worktree_row.abs_path, - // root_name: worktree_row.root_name, - // visible: worktree_row.visible, - // entries: Default::default(), - // diagnostic_summaries: Default::default(), - // scan_id: worktree_row.scan_id as u64, - // is_complete: worktree_row.is_complete, - // }, - // ) - // }) - // .collect::>(); + // Populate worktree entries. + { + let mut db_entries = worktree_entry::Entity::find() + .filter(worktree_entry::Column::ProjectId.eq(project_id)) + .stream(&tx) + .await?; + while let Some(db_entry) = db_entries.next().await { + let db_entry = db_entry?; + if let Some(worktree) = worktrees.get_mut(&(db_entry.worktree_id as u64)) { + worktree.entries.push(proto::Entry { + id: db_entry.id as u64, + is_dir: db_entry.is_dir, + path: db_entry.path, + inode: db_entry.inode as u64, + mtime: Some(proto::Timestamp { + seconds: db_entry.mtime_seconds as u64, + nanos: db_entry.mtime_nanos, + }), + is_symlink: db_entry.is_symlink, + is_ignored: db_entry.is_ignored, + }); + } + } + } - // // Populate worktree entries. - // { - // let mut entries = sqlx::query_as::<_, WorktreeEntry>( - // " - // SELECT * - // FROM worktree_entries - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch(&mut tx); - // while let Some(entry) = entries.next().await { - // let entry = entry?; - // if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { - // worktree.entries.push(proto::Entry { - // id: entry.id as u64, - // is_dir: entry.is_dir, - // path: entry.path, - // inode: entry.inode as u64, - // mtime: Some(proto::Timestamp { - // seconds: entry.mtime_seconds as u64, - // nanos: entry.mtime_nanos as u32, - // }), - // is_symlink: entry.is_symlink, - // is_ignored: entry.is_ignored, - // }); - // } - // } - // } + // Populate worktree diagnostic summaries. + { + let mut db_summaries = worktree_diagnostic_summary::Entity::find() + .filter(worktree_diagnostic_summary::Column::ProjectId.eq(project_id)) + .stream(&tx) + .await?; + while let Some(db_summary) = db_summaries.next().await { + let db_summary = db_summary?; + if let Some(worktree) = worktrees.get_mut(&(db_summary.worktree_id as u64)) { + worktree + .diagnostic_summaries + .push(proto::DiagnosticSummary { + path: db_summary.path, + language_server_id: db_summary.language_server_id as u64, + error_count: db_summary.error_count as u32, + warning_count: db_summary.warning_count as u32, + }); + } + } + } - // // Populate worktree diagnostic summaries. - // { - // let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>( - // " - // SELECT * - // FROM worktree_diagnostic_summaries - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch(&mut tx); - // while let Some(summary) = summaries.next().await { - // let summary = summary?; - // if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) { - // worktree - // .diagnostic_summaries - // .push(proto::DiagnosticSummary { - // path: summary.path, - // language_server_id: summary.language_server_id as u64, - // error_count: summary.error_count as u32, - // warning_count: summary.warning_count as u32, - // }); - // } - // } - // } + // Populate language servers. + let language_servers = project + .find_related(language_server::Entity) + .all(&tx) + .await?; - // // Populate language servers. - // let language_servers = sqlx::query_as::<_, LanguageServer>( - // " - // SELECT * - // FROM language_servers - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await?; - - // self.commit_room_transaction( - // room_id, - // tx, - // ( - // Project { - // collaborators, - // worktrees, - // language_servers: language_servers - // .into_iter() - // .map(|language_server| proto::LanguageServer { - // id: language_server.id.to_proto(), - // name: language_server.name, - // }) - // .collect(), - // }, - // replica_id as ReplicaId, - // ), - // ) - // .await + self.commit_room_transaction( + project.room_id, + tx, + ( + Project { + collaborators, + worktrees, + language_servers: language_servers + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id as u64, + name: language_server.name, + }) + .collect(), + }, + replica_id as ReplicaId, + ), + ) + .await }) .await } @@ -2010,59 +1924,42 @@ impl Database { connection_id: ConnectionId, ) -> Result> { self.transact(|tx| async move { - todo!() - // let result = sqlx::query( - // " - // DELETE FROM project_collaborators - // WHERE project_id = $1 AND connection_id = $2 - // ", - // ) - // .bind(project_id) - // .bind(connection_id.0 as i32) - // .execute(&mut tx) - // .await?; + let result = project_collaborator::Entity::delete_many() + .filter( + project_collaborator::Column::ProjectId + .eq(project_id) + .and(project_collaborator::Column::ConnectionId.eq(connection_id.0)), + ) + .exec(&tx) + .await?; + if result.rows_affected == 0 { + Err(anyhow!("not a collaborator on this project"))?; + } - // if result.rows_affected() == 0 { - // Err(anyhow!("not a collaborator on this project"))?; - // } + let project = project::Entity::find_by_id(project_id) + .one(&tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; + let collaborators = project + .find_related(project_collaborator::Entity) + .all(&tx) + .await?; + let connection_ids = collaborators + .into_iter() + .map(|collaborator| ConnectionId(collaborator.connection_id)) + .collect(); - // let connection_ids = sqlx::query_scalar::<_, i32>( - // " - // SELECT connection_id - // FROM project_collaborators - // WHERE project_id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_all(&mut tx) - // .await? - // .into_iter() - // .map(|id| ConnectionId(id as u32)) - // .collect(); - - // let (room_id, host_user_id, host_connection_id) = - // sqlx::query_as::<_, (RoomId, i32, i32)>( - // " - // SELECT room_id, host_user_id, host_connection_id - // FROM projects - // WHERE id = $1 - // ", - // ) - // .bind(project_id) - // .fetch_one(&mut tx) - // .await?; - - // self.commit_room_transaction( - // room_id, - // tx, - // LeftProject { - // id: project_id, - // host_user_id: UserId(host_user_id), - // host_connection_id: ConnectionId(host_connection_id as u32), - // connection_ids, - // }, - // ) - // .await + self.commit_room_transaction( + project.room_id, + tx, + LeftProject { + id: project_id, + host_user_id: project.host_user_id, + host_connection_id: ConnectionId(project.host_connection_id), + connection_ids, + }, + ) + .await }) .await } @@ -2442,8 +2339,6 @@ id_type!(ProjectCollaboratorId); id_type!(ReplicaId); id_type!(SignupId); id_type!(UserId); -id_type!(WorktreeId); -id_type!(WorktreeEntryId); pub struct LeftRoom { pub room: proto::Room, @@ -2453,7 +2348,7 @@ pub struct LeftRoom { pub struct Project { pub collaborators: Vec, - pub worktrees: BTreeMap, + pub worktrees: BTreeMap, pub language_servers: Vec, } @@ -2465,7 +2360,7 @@ pub struct LeftProject { } pub struct Worktree { - pub id: WorktreeId, + pub id: u64, pub abs_path: String, pub root_name: String, pub visible: bool, diff --git a/crates/collab/src/db/language_server.rs b/crates/collab/src/db/language_server.rs new file mode 100644 index 0000000000..d2c045c121 --- /dev/null +++ b/crates/collab/src/db/language_server.rs @@ -0,0 +1,30 @@ +use super::ProjectId; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "language_servers")] +pub struct Model { + #[sea_orm(primary_key)] + pub project_id: ProjectId, + #[sea_orm(primary_key)] + pub id: i64, + pub name: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::project::Entity", + from = "Column::ProjectId", + to = "super::project::Column::Id" + )] + Project, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Project.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index c8083402a3..5bf8addec8 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -26,7 +26,11 @@ pub enum Relation { )] Room, #[sea_orm(has_many = "super::worktree::Entity")] - Worktree, + Worktrees, + #[sea_orm(has_many = "super::project_collaborator::Entity")] + Collaborators, + #[sea_orm(has_many = "super::language_server::Entity")] + LanguageServers, } impl Related for Entity { @@ -43,7 +47,19 @@ impl Related for Entity { impl Related for Entity { fn to() -> RelationDef { - Relation::Worktree.def() + Relation::Worktrees.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Collaborators.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::LanguageServers.def() } } diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index bccf451a63..56048c3181 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -14,6 +14,19 @@ pub struct Model { } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation {} +pub enum Relation { + #[sea_orm( + belongs_to = "super::project::Entity", + from = "Column::ProjectId", + to = "super::project::Column::Id" + )] + Project, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Project.def() + } +} impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db/worktree.rs b/crates/collab/src/db/worktree.rs index 8cad41e8a9..b9f0f97dee 100644 --- a/crates/collab/src/db/worktree.rs +++ b/crates/collab/src/db/worktree.rs @@ -1,17 +1,17 @@ -use super::{ProjectId, WorktreeId}; +use super::ProjectId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "worktrees")] pub struct Model { #[sea_orm(primary_key)] - pub id: WorktreeId, + pub id: i64, #[sea_orm(primary_key)] pub project_id: ProjectId, pub abs_path: String, pub root_name: String, pub visible: bool, - pub scan_id: u32, + pub scan_id: i64, pub is_complete: bool, } diff --git a/crates/collab/src/db/worktree_diagnostic_summary.rs b/crates/collab/src/db/worktree_diagnostic_summary.rs new file mode 100644 index 0000000000..49bf4f6e03 --- /dev/null +++ b/crates/collab/src/db/worktree_diagnostic_summary.rs @@ -0,0 +1,21 @@ +use super::ProjectId; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "worktree_diagnostic_summaries")] +pub struct Model { + #[sea_orm(primary_key)] + pub project_id: ProjectId, + #[sea_orm(primary_key)] + pub worktree_id: i64, + #[sea_orm(primary_key)] + pub path: String, + pub language_server_id: i64, + pub error_count: u32, + pub warning_count: u32, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db/worktree_entry.rs b/crates/collab/src/db/worktree_entry.rs index 8698d844c1..f38ef7b3f7 100644 --- a/crates/collab/src/db/worktree_entry.rs +++ b/crates/collab/src/db/worktree_entry.rs @@ -1,23 +1,25 @@ -use super::{ProjectId, WorktreeEntryId, WorktreeId}; +use super::ProjectId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "worktree_entries")] pub struct Model { #[sea_orm(primary_key)] - project_id: ProjectId, + pub project_id: ProjectId, #[sea_orm(primary_key)] - worktree_id: WorktreeId, + pub worktree_id: i64, #[sea_orm(primary_key)] - id: WorktreeEntryId, - is_dir: bool, - path: String, - inode: u64, - mtime_seconds: u64, - mtime_nanos: u32, - is_symlink: bool, - is_ignored: bool, + pub id: i64, + pub is_dir: bool, + pub path: String, + pub inode: i64, + pub mtime_seconds: i64, + pub mtime_nanos: u32, + pub is_symlink: bool, + pub is_ignored: bool, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 01866b074d..d3b95a82e6 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1019,7 +1019,7 @@ async fn join_project( .worktrees .iter() .map(|(id, worktree)| proto::WorktreeMetadata { - id: id.to_proto(), + id: *id, root_name: worktree.root_name.clone(), visible: worktree.visible, abs_path: worktree.abs_path.clone(), @@ -1060,7 +1060,7 @@ async fn join_project( // Stream this worktree's entries. let message = proto::UpdateWorktree { project_id: project_id.to_proto(), - worktree_id: worktree_id.to_proto(), + worktree_id, abs_path: worktree.abs_path.clone(), root_name: worktree.root_name, updated_entries: worktree.entries, @@ -1078,7 +1078,7 @@ async fn join_project( session.connection_id, proto::UpdateDiagnosticSummary { project_id: project_id.to_proto(), - worktree_id: worktree.id.to_proto(), + worktree_id: worktree.id, summary: Some(summary), }, )?; From 48b6ee313f8777856489df4f3ad0e8f2f111ed05 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 13:58:54 +0100 Subject: [PATCH 104/240] Use i32 to represent Postgres `INTEGER` types in Rust --- crates/collab/src/api.rs | 12 ++-- crates/collab/src/db.rs | 60 +++++++++---------- crates/collab/src/db/project.rs | 2 +- crates/collab/src/db/project_collaborator.rs | 2 +- crates/collab/src/db/room_participant.rs | 6 +- crates/collab/src/db/tests.rs | 2 +- crates/collab/src/db/user.rs | 4 +- .../src/db/worktree_diagnostic_summary.rs | 4 +- crates/collab/src/db/worktree_entry.rs | 2 +- crates/collab/src/integration_tests.rs | 2 +- crates/collab/src/rpc.rs | 7 ++- 11 files changed, 52 insertions(+), 51 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index a055494791..4c1c60a04f 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -76,7 +76,7 @@ pub async fn validate_api_token(req: Request, next: Next) -> impl IntoR #[derive(Debug, Deserialize)] struct AuthenticatedUserParams { - github_user_id: Option, + github_user_id: Option, github_login: String, } @@ -123,14 +123,14 @@ async fn get_users( #[derive(Deserialize, Debug)] struct CreateUserParams { - github_user_id: u32, + github_user_id: i32, github_login: String, email_address: String, email_confirmation_code: Option, #[serde(default)] admin: bool, #[serde(default)] - invite_count: u32, + invite_count: i32, } #[derive(Serialize, Debug)] @@ -204,11 +204,11 @@ async fn create_user( #[derive(Deserialize)] struct UpdateUserParams { admin: Option, - invite_count: Option, + invite_count: Option, } async fn update_user( - Path(user_id): Path, + Path(user_id): Path, Json(params): Json, Extension(app): Extension>, Extension(rpc_server): Extension>, @@ -230,7 +230,7 @@ async fn update_user( } async fn destroy_user( - Path(user_id): Path, + Path(user_id): Path, Extension(app): Extension>, ) -> Result<()> { app.db.destroy_user(UserId(user_id)).await?; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index b01c6e7504..945ac1b577 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -159,7 +159,7 @@ impl Database { pub async fn get_user_by_github_account( &self, github_login: &str, - github_user_id: Option, + github_user_id: Option, ) -> Result> { self.transact(|tx| async { let tx = tx; @@ -879,7 +879,7 @@ impl Database { .await } - pub async fn set_invite_count_for_user(&self, id: UserId, count: u32) -> Result<()> { + pub async fn set_invite_count_for_user(&self, id: UserId, count: i32) -> Result<()> { self.transact(|tx| async move { if count > 0 { user::Entity::update_many() @@ -910,11 +910,11 @@ impl Database { .await } - pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { + pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { self.transact(|tx| async move { match user::Entity::find_by_id(id).one(&tx).await? { Some(user) if user.invite_code.is_some() => { - Ok(Some((user.invite_code.unwrap(), user.invite_count as u32))) + Ok(Some((user.invite_code.unwrap(), user.invite_count))) } _ => Ok(None), } @@ -982,9 +982,9 @@ impl Database { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(user_id), - answering_connection_id: ActiveValue::set(Some(connection_id.0)), + answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), calling_user_id: ActiveValue::set(user_id), - calling_connection_id: ActiveValue::set(connection_id.0), + calling_connection_id: ActiveValue::set(connection_id.0 as i32), ..Default::default() } .insert(&tx) @@ -1009,7 +1009,7 @@ impl Database { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(called_user_id), calling_user_id: ActiveValue::set(calling_user_id), - calling_connection_id: ActiveValue::set(calling_connection_id.0), + calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } @@ -1126,7 +1126,7 @@ impl Database { .and(room_participant::Column::AnsweringConnectionId.is_null()), ) .set(room_participant::ActiveModel { - answering_connection_id: ActiveValue::set(Some(connection_id.0)), + answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), ..Default::default() }) .exec(&tx) @@ -1488,7 +1488,7 @@ impl Database { let project = project::ActiveModel { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection_id.0), + host_connection_id: ActiveValue::set(connection_id.0 as i32), ..Default::default() } .insert(&tx) @@ -1508,7 +1508,7 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), - connection_id: ActiveValue::set(connection_id.0), + connection_id: ActiveValue::set(connection_id.0 as i32), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1536,7 +1536,7 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("project not found"))?; - if project.host_connection_id == connection_id.0 { + if project.host_connection_id == connection_id.0 as i32 { let room_id = project.room_id; project::Entity::delete(project.into_active_model()) .exec(&tx) @@ -1633,7 +1633,7 @@ impl Database { path: ActiveValue::set(entry.path.clone()), inode: ActiveValue::set(entry.inode as i64), mtime_seconds: ActiveValue::set(mtime.seconds as i64), - mtime_nanos: ActiveValue::set(mtime.nanos), + mtime_nanos: ActiveValue::set(mtime.nanos as i32), is_symlink: ActiveValue::set(entry.is_symlink), is_ignored: ActiveValue::set(entry.is_ignored), } @@ -1696,7 +1696,7 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id != connection_id.0 { + if project.host_connection_id != connection_id.0 as i32 { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -1706,8 +1706,8 @@ impl Database { worktree_id: ActiveValue::set(worktree_id), path: ActiveValue::set(summary.path.clone()), language_server_id: ActiveValue::set(summary.language_server_id as i64), - error_count: ActiveValue::set(summary.error_count), - warning_count: ActiveValue::set(summary.warning_count), + error_count: ActiveValue::set(summary.error_count as i32), + warning_count: ActiveValue::set(summary.warning_count as i32), ..Default::default() }) .on_conflict( @@ -1750,7 +1750,7 @@ impl Database { .one(&tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id != connection_id.0 { + if project.host_connection_id != connection_id.0 as i32 { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -1813,7 +1813,7 @@ impl Database { } let new_collaborator = project_collaborator::ActiveModel { project_id: ActiveValue::set(project_id), - connection_id: ActiveValue::set(connection_id.0), + connection_id: ActiveValue::set(connection_id.0 as i32), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(replica_id), is_host: ActiveValue::set(false), @@ -1859,7 +1859,7 @@ impl Database { inode: db_entry.inode as u64, mtime: Some(proto::Timestamp { seconds: db_entry.mtime_seconds as u64, - nanos: db_entry.mtime_nanos, + nanos: db_entry.mtime_nanos as u32, }), is_symlink: db_entry.is_symlink, is_ignored: db_entry.is_ignored, @@ -1946,7 +1946,7 @@ impl Database { .await?; let connection_ids = collaborators .into_iter() - .map(|collaborator| ConnectionId(collaborator.connection_id)) + .map(|collaborator| ConnectionId(collaborator.connection_id as u32)) .collect(); self.commit_room_transaction( @@ -1955,7 +1955,7 @@ impl Database { LeftProject { id: project_id, host_user_id: project.host_user_id, - host_connection_id: ConnectionId(project.host_connection_id), + host_connection_id: ConnectionId(project.host_connection_id as u32), connection_ids, }, ) @@ -1977,7 +1977,7 @@ impl Database { if collaborators .iter() - .any(|collaborator| collaborator.connection_id == connection_id.0) + .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) { Ok(collaborators) } else { @@ -2191,8 +2191,8 @@ impl DerefMut for RoomGuard { #[derive(Debug, Serialize, Deserialize)] pub struct NewUserParams { pub github_login: String, - pub github_user_id: u32, - pub invite_count: u32, + pub github_user_id: i32, + pub invite_count: i32, } #[derive(Debug)] @@ -2227,15 +2227,15 @@ macro_rules! id_type { Deserialize, )] #[serde(transparent)] - pub struct $name(pub u32); + pub struct $name(pub i32); impl $name { #[allow(unused)] - pub const MAX: Self = Self(u32::MAX); + pub const MAX: Self = Self(i32::MAX); #[allow(unused)] pub fn from_proto(value: u64) -> Self { - Self(value as u32) + Self(value as i32) } #[allow(unused)] @@ -2252,7 +2252,7 @@ macro_rules! id_type { impl From<$name> for sea_query::Value { fn from(value: $name) -> Self { - sea_query::Value::Unsigned(Some(value.0)) + sea_query::Value::Int(Some(value.0)) } } @@ -2262,7 +2262,7 @@ macro_rules! id_type { pre: &str, col: &str, ) -> Result { - Ok(Self(u32::try_get(res, pre, col)?)) + Ok(Self(i32::try_get(res, pre, col)?)) } } @@ -2302,11 +2302,11 @@ macro_rules! id_type { } fn array_type() -> sea_query::ArrayType { - sea_query::ArrayType::Unsigned + sea_query::ArrayType::Int } fn column_type() -> sea_query::ColumnType { - sea_query::ColumnType::Unsigned(None) + sea_query::ColumnType::Integer(None) } } diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index 5bf8addec8..b109ddc4b8 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -8,7 +8,7 @@ pub struct Model { pub id: ProjectId, pub room_id: RoomId, pub host_user_id: UserId, - pub host_connection_id: u32, + pub host_connection_id: i32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index 56048c3181..097272fcda 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -7,7 +7,7 @@ pub struct Model { #[sea_orm(primary_key)] pub id: ProjectCollaboratorId, pub project_id: ProjectId, - pub connection_id: u32, + pub connection_id: i32, pub user_id: UserId, pub replica_id: ReplicaId, pub is_host: bool, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index e8f38cf693..c7c804581b 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -8,12 +8,12 @@ pub struct Model { pub id: RoomParticipantId, pub room_id: RoomId, pub user_id: UserId, - pub answering_connection_id: Option, - pub location_kind: Option, + pub answering_connection_id: Option, + pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, pub calling_user_id: UserId, - pub calling_connection_id: u32, + pub calling_connection_id: i32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 6ca287746a..b276bd5057 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -430,7 +430,7 @@ async fn test_fuzzy_search_users() { false, NewUserParams { github_login: github_login.into(), - github_user_id: i as u32, + github_user_id: i as i32, invite_count: 0, }, ) diff --git a/crates/collab/src/db/user.rs b/crates/collab/src/db/user.rs index 9929233044..c2b157bd0a 100644 --- a/crates/collab/src/db/user.rs +++ b/crates/collab/src/db/user.rs @@ -8,11 +8,11 @@ pub struct Model { #[sea_orm(primary_key)] pub id: UserId, pub github_login: String, - pub github_user_id: Option, + pub github_user_id: Option, pub email_address: Option, pub admin: bool, pub invite_code: Option, - pub invite_count: u32, + pub invite_count: i32, pub inviter_id: Option, pub connected_once: bool, pub metrics_id: Uuid, diff --git a/crates/collab/src/db/worktree_diagnostic_summary.rs b/crates/collab/src/db/worktree_diagnostic_summary.rs index 49bf4f6e03..f3dd8083fb 100644 --- a/crates/collab/src/db/worktree_diagnostic_summary.rs +++ b/crates/collab/src/db/worktree_diagnostic_summary.rs @@ -11,8 +11,8 @@ pub struct Model { #[sea_orm(primary_key)] pub path: String, pub language_server_id: i64, - pub error_count: u32, - pub warning_count: u32, + pub error_count: i32, + pub warning_count: i32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/worktree_entry.rs b/crates/collab/src/db/worktree_entry.rs index f38ef7b3f7..413821201a 100644 --- a/crates/collab/src/db/worktree_entry.rs +++ b/crates/collab/src/db/worktree_entry.rs @@ -14,7 +14,7 @@ pub struct Model { pub path: String, pub inode: i64, pub mtime_seconds: i64, - pub mtime_nanos: u32, + pub mtime_nanos: i32, pub is_symlink: bool, pub is_ignored: bool, } diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index c554028407..225501c71d 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -5383,7 +5383,7 @@ async fn test_random_collaboration( false, NewUserParams { github_login: username.clone(), - github_user_id: (ix + 1) as u32, + github_user_id: (ix + 1) as i32, invite_count: 0, }, ) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d3b95a82e6..9d3917a417 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -381,7 +381,7 @@ impl Server { if let Some((code, count)) = invite_code { this.peer.send(connection_id, proto::UpdateInviteInfo { url: format!("{}{}", this.app_state.config.invite_link_prefix, code), - count, + count: count as u32, })?; } } @@ -1008,7 +1008,7 @@ async fn join_project( let collaborators = project .collaborators .iter() - .filter(|collaborator| collaborator.connection_id != session.connection_id.0) + .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) .map(|collaborator| proto::Collaborator { peer_id: collaborator.connection_id as u32, replica_id: collaborator.replica_id.0 as u32, @@ -1313,7 +1313,8 @@ async fn save_buffer( .await .project_collaborators(project_id, session.connection_id) .await?; - collaborators.retain(|collaborator| collaborator.connection_id != session.connection_id.0); + collaborators + .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); let project_connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); From 7502558631e6cb301114b53fcc948da19b38b200 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 14:22:36 +0100 Subject: [PATCH 105/240] Make all tests pass again after migration to sea-orm --- .../20221111092550_reconnection_support.sql | 10 +- crates/collab/src/db.rs | 150 ++++++++++-------- 2 files changed, 89 insertions(+), 71 deletions(-) diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index a7d45a9759..d23dbfa046 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -10,7 +10,7 @@ ALTER TABLE "projects" CREATE TABLE "worktrees" ( "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, - "id" INTEGER NOT NULL, + "id" INT8 NOT NULL, "root_name" VARCHAR NOT NULL, "abs_path" VARCHAR NOT NULL, "visible" BOOL NOT NULL, @@ -23,7 +23,7 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE TABLE "worktree_entries" ( "project_id" INTEGER NOT NULL, "worktree_id" INT8 NOT NULL, - "id" INTEGER NOT NULL, + "id" INT8 NOT NULL, "is_dir" BOOL NOT NULL, "path" VARCHAR NOT NULL, "inode" INT8 NOT NULL, @@ -39,9 +39,9 @@ CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree CREATE TABLE "worktree_diagnostic_summaries" ( "project_id" INTEGER NOT NULL, - "worktree_id" INTEGER NOT NULL, + "worktree_id" INT8 NOT NULL, "path" VARCHAR NOT NULL, - "language_server_id" INTEGER NOT NULL, + "language_server_id" INT8 NOT NULL, "error_count" INTEGER NOT NULL, "warning_count" INTEGER NOT NULL, PRIMARY KEY(project_id, worktree_id, path), @@ -52,7 +52,7 @@ CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" CREATE TABLE "language_servers" ( "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, - "id" INTEGER NOT NULL, + "id" INT8 NOT NULL, "name" VARCHAR NOT NULL, PRIMARY KEY(project_id, id) ); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 945ac1b577..7395a7cc76 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1494,17 +1494,21 @@ impl Database { .insert(&tx) .await?; - worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i64), - project_id: ActiveValue::set(project.id), - abs_path: ActiveValue::set(worktree.abs_path.clone()), - root_name: ActiveValue::set(worktree.root_name.clone()), - visible: ActiveValue::set(worktree.visible), - scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), - })) - .exec(&tx) - .await?; + if !worktrees.is_empty() { + worktree::Entity::insert_many(worktrees.iter().map(|worktree| { + worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i64), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + } + })) + .exec(&tx) + .await?; + } project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), @@ -1564,17 +1568,27 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; - worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { - id: ActiveValue::set(worktree.id as i64), - project_id: ActiveValue::set(project.id), - abs_path: ActiveValue::set(worktree.abs_path.clone()), - root_name: ActiveValue::set(worktree.root_name.clone()), - visible: ActiveValue::set(worktree.visible), - scan_id: ActiveValue::set(0), - is_complete: ActiveValue::set(false), - })) - .exec(&tx) - .await?; + if !worktrees.is_empty() { + worktree::Entity::insert_many(worktrees.iter().map(|worktree| { + worktree::ActiveModel { + id: ActiveValue::set(worktree.id as i64), + project_id: ActiveValue::set(project.id), + abs_path: ActiveValue::set(worktree.abs_path.clone()), + root_name: ActiveValue::set(worktree.root_name.clone()), + visible: ActiveValue::set(worktree.visible), + scan_id: ActiveValue::set(0), + is_complete: ActiveValue::set(false), + } + })) + .on_conflict( + OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id]) + .update_column(worktree::Column::RootName) + .to_owned(), + ) + .exec(&tx) + .await?; + } + worktree::Entity::delete_many() .filter( worktree::Column::ProjectId.eq(project.id).and( @@ -1623,53 +1637,57 @@ impl Database { .exec(&tx) .await?; - worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| { - let mtime = entry.mtime.clone().unwrap_or_default(); - worktree_entry::ActiveModel { - project_id: ActiveValue::set(project_id), - worktree_id: ActiveValue::set(worktree_id), - id: ActiveValue::set(entry.id as i64), - is_dir: ActiveValue::set(entry.is_dir), - path: ActiveValue::set(entry.path.clone()), - inode: ActiveValue::set(entry.inode as i64), - mtime_seconds: ActiveValue::set(mtime.seconds as i64), - mtime_nanos: ActiveValue::set(mtime.nanos as i32), - is_symlink: ActiveValue::set(entry.is_symlink), - is_ignored: ActiveValue::set(entry.is_ignored), - } - })) - .on_conflict( - OnConflict::columns([ - worktree_entry::Column::ProjectId, - worktree_entry::Column::WorktreeId, - worktree_entry::Column::Id, - ]) - .update_columns([ - worktree_entry::Column::IsDir, - worktree_entry::Column::Path, - worktree_entry::Column::Inode, - worktree_entry::Column::MtimeSeconds, - worktree_entry::Column::MtimeNanos, - worktree_entry::Column::IsSymlink, - worktree_entry::Column::IsIgnored, - ]) - .to_owned(), - ) - .exec(&tx) - .await?; - - worktree_entry::Entity::delete_many() - .filter( - worktree_entry::Column::ProjectId - .eq(project_id) - .and(worktree_entry::Column::WorktreeId.eq(worktree_id)) - .and( - worktree_entry::Column::Id - .is_in(update.removed_entries.iter().map(|id| *id as i64)), - ), + if !update.updated_entries.is_empty() { + worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| { + let mtime = entry.mtime.clone().unwrap_or_default(); + worktree_entry::ActiveModel { + project_id: ActiveValue::set(project_id), + worktree_id: ActiveValue::set(worktree_id), + id: ActiveValue::set(entry.id as i64), + is_dir: ActiveValue::set(entry.is_dir), + path: ActiveValue::set(entry.path.clone()), + inode: ActiveValue::set(entry.inode as i64), + mtime_seconds: ActiveValue::set(mtime.seconds as i64), + mtime_nanos: ActiveValue::set(mtime.nanos as i32), + is_symlink: ActiveValue::set(entry.is_symlink), + is_ignored: ActiveValue::set(entry.is_ignored), + } + })) + .on_conflict( + OnConflict::columns([ + worktree_entry::Column::ProjectId, + worktree_entry::Column::WorktreeId, + worktree_entry::Column::Id, + ]) + .update_columns([ + worktree_entry::Column::IsDir, + worktree_entry::Column::Path, + worktree_entry::Column::Inode, + worktree_entry::Column::MtimeSeconds, + worktree_entry::Column::MtimeNanos, + worktree_entry::Column::IsSymlink, + worktree_entry::Column::IsIgnored, + ]) + .to_owned(), ) .exec(&tx) .await?; + } + + if !update.removed_entries.is_empty() { + worktree_entry::Entity::delete_many() + .filter( + worktree_entry::Column::ProjectId + .eq(project_id) + .and(worktree_entry::Column::WorktreeId.eq(worktree_id)) + .and( + worktree_entry::Column::Id + .is_in(update.removed_entries.iter().map(|id| *id as i64)), + ), + ) + .exec(&tx) + .await?; + } let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; self.commit_room_transaction(room_id, tx, connection_ids) From 1b46b7a7d6d14e24646ba1db46069ff6b63c9942 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 14:37:26 +0100 Subject: [PATCH 106/240] Move modules into `collab` library as opposed to using the binary This ensures that we can use collab's modules from the seed script as well. --- crates/collab/src/bin/seed.rs | 10 ++--- crates/collab/src/lib.rs | 65 ++++++++++++++++++++++++++++++ crates/collab/src/main.rs | 75 +++-------------------------------- 3 files changed, 73 insertions(+), 77 deletions(-) diff --git a/crates/collab/src/bin/seed.rs b/crates/collab/src/bin/seed.rs index 3b635540b3..2f7c61147c 100644 --- a/crates/collab/src/bin/seed.rs +++ b/crates/collab/src/bin/seed.rs @@ -1,12 +1,8 @@ -use collab::{Error, Result}; -use db::{DefaultDb, UserId}; +use collab::{db, Error, Result}; +use db::{ConnectOptions, Database, UserId}; use serde::{de::DeserializeOwned, Deserialize}; use std::fmt::Write; -#[allow(unused)] -#[path = "../db.rs"] -mod db; - #[derive(Debug, Deserialize)] struct GitHubUser { id: i32, @@ -17,7 +13,7 @@ struct GitHubUser { #[tokio::main] async fn main() { let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var"); - let db = DefaultDb::new(&database_url, 5) + let db = Database::new(ConnectOptions::new(database_url)) .await .expect("failed to connect to postgres database"); let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var"); diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 23af3344b5..9011d2a1eb 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -1,4 +1,15 @@ +pub mod api; +pub mod auth; +pub mod db; +pub mod env; +#[cfg(test)] +mod integration_tests; +pub mod rpc; + use axum::{http::StatusCode, response::IntoResponse}; +use db::Database; +use serde::Deserialize; +use std::{path::PathBuf, sync::Arc}; pub type Result = std::result::Result; @@ -85,3 +96,57 @@ impl std::fmt::Display for Error { } impl std::error::Error for Error {} + +#[derive(Default, Deserialize)] +pub struct Config { + pub http_port: u16, + pub database_url: String, + pub api_token: String, + pub invite_link_prefix: String, + pub live_kit_server: Option, + pub live_kit_key: Option, + pub live_kit_secret: Option, + pub rust_log: Option, + pub log_json: Option, +} + +#[derive(Default, Deserialize)] +pub struct MigrateConfig { + pub database_url: String, + pub migrations_path: Option, +} + +pub struct AppState { + pub db: Arc, + pub live_kit_client: Option>, + pub config: Config, +} + +impl AppState { + pub async fn new(config: Config) -> Result> { + let mut db_options = db::ConnectOptions::new(config.database_url.clone()); + db_options.max_connections(5); + let db = Database::new(db_options).await?; + let live_kit_client = if let Some(((server, key), secret)) = config + .live_kit_server + .as_ref() + .zip(config.live_kit_key.as_ref()) + .zip(config.live_kit_secret.as_ref()) + { + Some(Arc::new(live_kit_server::api::LiveKitClient::new( + server.clone(), + key.clone(), + secret.clone(), + )) as Arc) + } else { + None + }; + + let this = Self { + db: Arc::new(db), + live_kit_client, + config, + }; + Ok(Arc::new(this)) + } +} diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 4802fd82b4..42ffe50ea3 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -1,22 +1,11 @@ -mod api; -mod auth; -mod db; -mod env; -mod rpc; - -#[cfg(test)] -mod integration_tests; - use anyhow::anyhow; use axum::{routing::get, Router}; -use collab::{Error, Result}; +use collab::{db, env, AppState, Config, MigrateConfig, Result}; use db::Database; -use serde::Deserialize; use std::{ env::args, net::{SocketAddr, TcpListener}, - path::{Path, PathBuf}, - sync::Arc, + path::Path, }; use tracing_log::LogTracer; use tracing_subscriber::{filter::EnvFilter, fmt::format::JsonFields, Layer}; @@ -24,60 +13,6 @@ use util::ResultExt; const VERSION: &'static str = env!("CARGO_PKG_VERSION"); -#[derive(Default, Deserialize)] -pub struct Config { - pub http_port: u16, - pub database_url: String, - pub api_token: String, - pub invite_link_prefix: String, - pub live_kit_server: Option, - pub live_kit_key: Option, - pub live_kit_secret: Option, - pub rust_log: Option, - pub log_json: Option, -} - -#[derive(Default, Deserialize)] -pub struct MigrateConfig { - pub database_url: String, - pub migrations_path: Option, -} - -pub struct AppState { - db: Arc, - live_kit_client: Option>, - config: Config, -} - -impl AppState { - async fn new(config: Config) -> Result> { - let mut db_options = db::ConnectOptions::new(config.database_url.clone()); - db_options.max_connections(5); - let db = Database::new(db_options).await?; - let live_kit_client = if let Some(((server, key), secret)) = config - .live_kit_server - .as_ref() - .zip(config.live_kit_key.as_ref()) - .zip(config.live_kit_secret.as_ref()) - { - Some(Arc::new(live_kit_server::api::LiveKitClient::new( - server.clone(), - key.clone(), - secret.clone(), - )) as Arc) - } else { - None - }; - - let this = Self { - db: Arc::new(db), - live_kit_client, - config, - }; - Ok(Arc::new(this)) - } -} - #[tokio::main] async fn main() -> Result<()> { if let Err(error) = env::load_dotenv() { @@ -120,10 +55,10 @@ async fn main() -> Result<()> { let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)) .expect("failed to bind TCP listener"); - let rpc_server = rpc::Server::new(state.clone()); + let rpc_server = collab::rpc::Server::new(state.clone()); - let app = api::routes(rpc_server.clone(), state.clone()) - .merge(rpc::routes(rpc_server.clone())) + let app = collab::api::routes(rpc_server.clone(), state.clone()) + .merge(collab::rpc::routes(rpc_server.clone())) .merge(Router::new().route("/", get(handle_root))); axum::Server::from_tcp(listener)? From 27f6ae945d2c53fe367c87672162913a5aef3baa Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 16:30:00 +0100 Subject: [PATCH 107/240] Clear stale data on startup This is a stopgap measure until we introduce reconnection support. --- .../20221109000000_test_schema.sql | 12 +++++-- .../20221111092550_reconnection_support.sql | 10 +++++- crates/collab/src/bin/seed.rs | 2 +- crates/collab/src/db.rs | 33 +++++++++++++++++++ crates/collab/src/db/project.rs | 1 + crates/collab/src/db/project_collaborator.rs | 1 + crates/collab/src/db/room_participant.rs | 2 ++ crates/collab/src/main.rs | 2 ++ 8 files changed, 59 insertions(+), 4 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index e62f834fbf..347db6a71a 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -43,8 +43,10 @@ CREATE TABLE "projects" ( "id" INTEGER PRIMARY KEY, "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, - "host_connection_id" INTEGER NOT NULL + "host_connection_id" INTEGER NOT NULL, + "host_connection_epoch" TEXT NOT NULL ); +CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); CREATE TABLE "worktrees" ( "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, @@ -100,22 +102,28 @@ CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, + "connection_epoch" TEXT NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, "is_host" BOOLEAN NOT NULL ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); +CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch"); CREATE TABLE "room_participants" ( "id" INTEGER PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, + "answering_connection_epoch" TEXT, "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), "calling_user_id" INTEGER NOT NULL REFERENCES users (id), - "calling_connection_id" INTEGER NOT NULL + "calling_connection_id" INTEGER NOT NULL, + "calling_connection_epoch" TEXT NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); +CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch"); +CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index d23dbfa046..6278fa7a59 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -6,7 +6,9 @@ CREATE TABLE IF NOT EXISTS "rooms" ( ALTER TABLE "projects" ADD "room_id" INTEGER REFERENCES rooms (id), ADD "host_connection_id" INTEGER, + ADD "host_connection_epoch" UUID, DROP COLUMN "unregistered"; +CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); CREATE TABLE "worktrees" ( "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, @@ -62,22 +64,28 @@ CREATE TABLE "project_collaborators" ( "id" SERIAL PRIMARY KEY, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, + "connection_epoch" UUID NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, "is_host" BOOLEAN NOT NULL ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); +CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch"); CREATE TABLE "room_participants" ( "id" SERIAL PRIMARY KEY, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, + "answering_connection_epoch" UUID, "location_kind" INTEGER, "location_project_id" INTEGER REFERENCES projects (id), "initial_project_id" INTEGER REFERENCES projects (id), "calling_user_id" INTEGER NOT NULL REFERENCES users (id), - "calling_connection_id" INTEGER NOT NULL + "calling_connection_id" INTEGER NOT NULL, + "calling_connection_epoch" UUID NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); +CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch"); +CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch"); diff --git a/crates/collab/src/bin/seed.rs b/crates/collab/src/bin/seed.rs index 2f7c61147c..9860b8be84 100644 --- a/crates/collab/src/bin/seed.rs +++ b/crates/collab/src/bin/seed.rs @@ -1,4 +1,4 @@ -use collab::{db, Error, Result}; +use collab::db; use db::{ConnectOptions, Database, UserId}; use serde::{de::DeserializeOwned, Deserialize}; use std::fmt::Write; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 7395a7cc76..05d6274108 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -47,6 +47,7 @@ pub struct Database { background: Option>, #[cfg(test)] runtime: Option, + epoch: Uuid, } impl Database { @@ -59,6 +60,7 @@ impl Database { background: None, #[cfg(test)] runtime: None, + epoch: Uuid::new_v4(), }) } @@ -103,6 +105,30 @@ impl Database { Ok(new_migrations) } + pub async fn clear_stale_data(&self) -> Result<()> { + self.transact(|tx| async { + project_collaborator::Entity::delete_many() + .filter(project_collaborator::Column::ConnectionEpoch.ne(self.epoch)) + .exec(&tx) + .await?; + room_participant::Entity::delete_many() + .filter( + room_participant::Column::AnsweringConnectionEpoch + .ne(self.epoch) + .or(room_participant::Column::CallingConnectionEpoch.ne(self.epoch)), + ) + .exec(&tx) + .await?; + project::Entity::delete_many() + .filter(project::Column::HostConnectionEpoch.ne(self.epoch)) + .exec(&tx) + .await?; + tx.commit().await?; + Ok(()) + }) + .await + } + // users pub async fn create_user( @@ -983,8 +1009,10 @@ impl Database { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(user_id), answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + answering_connection_epoch: ActiveValue::set(Some(self.epoch)), calling_user_id: ActiveValue::set(user_id), calling_connection_id: ActiveValue::set(connection_id.0 as i32), + calling_connection_epoch: ActiveValue::set(self.epoch), ..Default::default() } .insert(&tx) @@ -1010,6 +1038,7 @@ impl Database { user_id: ActiveValue::set(called_user_id), calling_user_id: ActiveValue::set(calling_user_id), calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), + calling_connection_epoch: ActiveValue::set(self.epoch), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } @@ -1127,6 +1156,7 @@ impl Database { ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), + answering_connection_epoch: ActiveValue::set(Some(self.epoch)), ..Default::default() }) .exec(&tx) @@ -1489,6 +1519,7 @@ impl Database { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), host_connection_id: ActiveValue::set(connection_id.0 as i32), + host_connection_epoch: ActiveValue::set(self.epoch), ..Default::default() } .insert(&tx) @@ -1513,6 +1544,7 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), connection_id: ActiveValue::set(connection_id.0 as i32), + connection_epoch: ActiveValue::set(self.epoch), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1832,6 +1864,7 @@ impl Database { let new_collaborator = project_collaborator::ActiveModel { project_id: ActiveValue::set(project_id), connection_id: ActiveValue::set(connection_id.0 as i32), + connection_epoch: ActiveValue::set(self.epoch), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(replica_id), is_host: ActiveValue::set(false), diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index b109ddc4b8..971a8fcefb 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -9,6 +9,7 @@ pub struct Model { pub room_id: RoomId, pub host_user_id: UserId, pub host_connection_id: i32, + pub host_connection_epoch: Uuid, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index 097272fcda..5db307f5df 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -8,6 +8,7 @@ pub struct Model { pub id: ProjectCollaboratorId, pub project_id: ProjectId, pub connection_id: i32, + pub connection_epoch: Uuid, pub user_id: UserId, pub replica_id: ReplicaId, pub is_host: bool, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index c7c804581b..783f45aa93 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -9,11 +9,13 @@ pub struct Model { pub room_id: RoomId, pub user_id: UserId, pub answering_connection_id: Option, + pub answering_connection_epoch: Option, pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, pub calling_user_id: UserId, pub calling_connection_id: i32, + pub calling_connection_epoch: Uuid, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 42ffe50ea3..a288e0f3ce 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -52,6 +52,8 @@ async fn main() -> Result<()> { init_tracing(&config); let state = AppState::new(config).await?; + state.db.clear_stale_data().await?; + let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)) .expect("failed to bind TCP listener"); From 568de814aad478ccca1e792e83cb24ca7fea3172 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 16:52:48 +0100 Subject: [PATCH 108/240] Delete empty rooms --- crates/collab/src/db.rs | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 05d6274108..ea9757a973 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -123,6 +123,18 @@ impl Database { .filter(project::Column::HostConnectionEpoch.ne(self.epoch)) .exec(&tx) .await?; + room::Entity::delete_many() + .filter( + room::Column::Id.not_in_subquery( + Query::select() + .column(room_participant::Column::RoomId) + .from(room_participant::Entity) + .distinct() + .to_owned(), + ), + ) + .exec(&tx) + .await?; tx.commit().await?; Ok(()) }) @@ -1272,8 +1284,12 @@ impl Database { .await?; let room = self.get_room(room_id, &tx).await?; - Ok(Some( - self.commit_room_transaction( + if room.participants.is_empty() { + room::Entity::delete_by_id(room_id).exec(&tx).await?; + } + + let left_room = self + .commit_room_transaction( room_id, tx, LeftRoom { @@ -1282,8 +1298,13 @@ impl Database { canceled_calls_to_user_ids, }, ) - .await?, - )) + .await?; + + if left_room.room.participants.is_empty() { + self.rooms.remove(&room_id); + } + + Ok(Some(left_room)) } else { Ok(None) } From 1c30767592b2f204c70189f0a80580f7cbee8016 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 19:20:51 +0100 Subject: [PATCH 109/240] Remove stale `Error` variant Co-Authored-By: Max Brunsfeld --- crates/collab/src/db.rs | 2 +- crates/collab/src/lib.rs | 16 ++-------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 2a8163b9c8..fd1ed7d50f 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2203,7 +2203,7 @@ impl Database { match f(tx).await { Ok(result) => return Ok(result), Err(error) => match error { - Error::Database2( + Error::Database( DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), ) if error diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 9011d2a1eb..24a9fc6117 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -15,8 +15,7 @@ pub type Result = std::result::Result; pub enum Error { Http(StatusCode, String), - Database(sqlx::Error), - Database2(sea_orm::error::DbErr), + Database(sea_orm::error::DbErr), Internal(anyhow::Error), } @@ -26,15 +25,9 @@ impl From for Error { } } -impl From for Error { - fn from(error: sqlx::Error) -> Self { - Self::Database(error) - } -} - impl From for Error { fn from(error: sea_orm::error::DbErr) -> Self { - Self::Database2(error) + Self::Database(error) } } @@ -63,9 +56,6 @@ impl IntoResponse for Error { Error::Database(error) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() } - Error::Database2(error) => { - (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() - } Error::Internal(error) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() } @@ -78,7 +68,6 @@ impl std::fmt::Debug for Error { match self { Error::Http(code, message) => (code, message).fmt(f), Error::Database(error) => error.fmt(f), - Error::Database2(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } @@ -89,7 +78,6 @@ impl std::fmt::Display for Error { match self { Error::Http(code, message) => write!(f, "{code}: {message}"), Error::Database(error) => error.fmt(f), - Error::Database2(error) => error.fmt(f), Error::Internal(error) => error.fmt(f), } } From d96f524fb6bb8873d8baaf96fcca7f690372fc53 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 2 Dec 2022 20:36:50 +0100 Subject: [PATCH 110/240] WIP: Manually rollback transactions to avoid spurious savepoint failure TODO: - Avoid unwrapping transaction after f(tx) - Remove duplication between `transaction` and `room_transaction` - Introduce random delay before and after committing a transaction - Run lots of randomized tests - Investigate diverging diagnostic summaries Co-Authored-By: Max Brunsfeld --- crates/collab/src/db.rs | 565 +++++++++++++++++++++------------------ crates/collab/src/rpc.rs | 4 +- 2 files changed, 300 insertions(+), 269 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index fd1ed7d50f..e667930cad 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -106,10 +106,10 @@ impl Database { } pub async fn clear_stale_data(&self) -> Result<()> { - self.transact(|tx| async { + self.transaction(|tx| async move { project_collaborator::Entity::delete_many() .filter(project_collaborator::Column::ConnectionEpoch.ne(self.epoch)) - .exec(&tx) + .exec(&*tx) .await?; room_participant::Entity::delete_many() .filter( @@ -117,11 +117,11 @@ impl Database { .ne(self.epoch) .or(room_participant::Column::CallingConnectionEpoch.ne(self.epoch)), ) - .exec(&tx) + .exec(&*tx) .await?; project::Entity::delete_many() .filter(project::Column::HostConnectionEpoch.ne(self.epoch)) - .exec(&tx) + .exec(&*tx) .await?; room::Entity::delete_many() .filter( @@ -133,9 +133,8 @@ impl Database { .to_owned(), ), ) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await @@ -149,7 +148,8 @@ impl Database { admin: bool, params: NewUserParams, ) -> Result { - self.transact(|tx| async { + self.transaction(|tx| async { + let tx = tx; let user = user::Entity::insert(user::ActiveModel { email_address: ActiveValue::set(Some(email_address.into())), github_login: ActiveValue::set(params.github_login.clone()), @@ -163,11 +163,9 @@ impl Database { .update_column(user::Column::GithubLogin) .to_owned(), ) - .exec_with_returning(&tx) + .exec_with_returning(&*tx) .await?; - tx.commit().await?; - Ok(NewUserResult { user_id: user.id, metrics_id: user.metrics_id.to_string(), @@ -179,16 +177,16 @@ impl Database { } pub async fn get_user_by_id(&self, id: UserId) -> Result> { - self.transact(|tx| async move { Ok(user::Entity::find_by_id(id).one(&tx).await?) }) + self.transaction(|tx| async move { Ok(user::Entity::find_by_id(id).one(&*tx).await?) }) .await } pub async fn get_users_by_ids(&self, ids: Vec) -> Result> { - self.transact(|tx| async { + self.transaction(|tx| async { let tx = tx; Ok(user::Entity::find() .filter(user::Column::Id.is_in(ids.iter().copied())) - .all(&tx) + .all(&*tx) .await?) }) .await @@ -199,32 +197,32 @@ impl Database { github_login: &str, github_user_id: Option, ) -> Result> { - self.transact(|tx| async { - let tx = tx; + self.transaction(|tx| async move { + let tx = &*tx; if let Some(github_user_id) = github_user_id { if let Some(user_by_github_user_id) = user::Entity::find() .filter(user::Column::GithubUserId.eq(github_user_id)) - .one(&tx) + .one(tx) .await? { let mut user_by_github_user_id = user_by_github_user_id.into_active_model(); user_by_github_user_id.github_login = ActiveValue::set(github_login.into()); - Ok(Some(user_by_github_user_id.update(&tx).await?)) + Ok(Some(user_by_github_user_id.update(tx).await?)) } else if let Some(user_by_github_login) = user::Entity::find() .filter(user::Column::GithubLogin.eq(github_login)) - .one(&tx) + .one(tx) .await? { let mut user_by_github_login = user_by_github_login.into_active_model(); user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id)); - Ok(Some(user_by_github_login.update(&tx).await?)) + Ok(Some(user_by_github_login.update(tx).await?)) } else { Ok(None) } } else { Ok(user::Entity::find() .filter(user::Column::GithubLogin.eq(github_login)) - .one(&tx) + .one(tx) .await?) } }) @@ -232,12 +230,12 @@ impl Database { } pub async fn get_all_users(&self, page: u32, limit: u32) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { Ok(user::Entity::find() .order_by_asc(user::Column::GithubLogin) .limit(limit as u64) .offset(page as u64 * limit as u64) - .all(&tx) + .all(&*tx) .await?) }) .await @@ -247,7 +245,7 @@ impl Database { &self, invited_by_another_user: bool, ) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { Ok(user::Entity::find() .filter( user::Column::InviteCount @@ -258,7 +256,7 @@ impl Database { user::Column::InviterId.is_null() }), ) - .all(&tx) + .all(&*tx) .await?) }) .await @@ -270,12 +268,12 @@ impl Database { MetricsId, } - self.transact(|tx| async move { + self.transaction(|tx| async move { let metrics_id: Uuid = user::Entity::find_by_id(id) .select_only() .column(user::Column::MetricsId) .into_values::<_, QueryAs>() - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("could not find user"))?; Ok(metrics_id.to_string()) @@ -284,45 +282,42 @@ impl Database { } pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { user::Entity::update_many() .filter(user::Column::Id.eq(id)) .set(user::ActiveModel { admin: ActiveValue::set(is_admin), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await } pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { user::Entity::update_many() .filter(user::Column::Id.eq(id)) .set(user::ActiveModel { connected_once: ActiveValue::set(connected_once), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await } pub async fn destroy_user(&self, id: UserId) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { access_token::Entity::delete_many() .filter(access_token::Column::UserId.eq(id)) - .exec(&tx) + .exec(&*tx) .await?; - user::Entity::delete_by_id(id).exec(&tx).await?; - tx.commit().await?; + user::Entity::delete_by_id(id).exec(&*tx).await?; Ok(()) }) .await @@ -342,7 +337,7 @@ impl Database { user_b_busy: bool, } - self.transact(|tx| async move { + self.transaction(|tx| async move { let user_a_participant = Alias::new("user_a_participant"); let user_b_participant = Alias::new("user_b_participant"); let mut db_contacts = contact::Entity::find() @@ -372,7 +367,7 @@ impl Database { user_b_participant, ) .into_model::() - .stream(&tx) + .stream(&*tx) .await?; let mut contacts = Vec::new(); @@ -421,10 +416,10 @@ impl Database { } pub async fn is_user_busy(&self, user_id: UserId) -> Result { - self.transact(|tx| async move { + self.transaction(|tx| async move { let participant = room_participant::Entity::find() .filter(room_participant::Column::UserId.eq(user_id)) - .one(&tx) + .one(&*tx) .await?; Ok(participant.is_some()) }) @@ -432,7 +427,7 @@ impl Database { } pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result { - self.transact(|tx| async move { + self.transaction(|tx| async move { let (id_a, id_b) = if user_id_1 < user_id_2 { (user_id_1, user_id_2) } else { @@ -446,7 +441,7 @@ impl Database { .and(contact::Column::UserIdB.eq(id_b)) .and(contact::Column::Accepted.eq(true)), ) - .one(&tx) + .one(&*tx) .await? .is_some()) }) @@ -454,7 +449,7 @@ impl Database { } pub async fn send_contact_request(&self, sender_id: UserId, receiver_id: UserId) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let (id_a, id_b, a_to_b) = if sender_id < receiver_id { (sender_id, receiver_id, true) } else { @@ -487,11 +482,10 @@ impl Database { ) .to_owned(), ) - .exec_without_returning(&tx) + .exec_without_returning(&*tx) .await?; if rows_affected == 1 { - tx.commit().await?; Ok(()) } else { Err(anyhow!("contact already requested"))? @@ -501,7 +495,7 @@ impl Database { } pub async fn remove_contact(&self, requester_id: UserId, responder_id: UserId) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let (id_a, id_b) = if responder_id < requester_id { (responder_id, requester_id) } else { @@ -514,11 +508,10 @@ impl Database { .eq(id_a) .and(contact::Column::UserIdB.eq(id_b)), ) - .exec(&tx) + .exec(&*tx) .await?; if result.rows_affected == 1 { - tx.commit().await?; Ok(()) } else { Err(anyhow!("no such contact"))? @@ -532,7 +525,7 @@ impl Database { user_id: UserId, contact_user_id: UserId, ) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let (id_a, id_b, a_to_b) = if user_id < contact_user_id { (user_id, contact_user_id, true) } else { @@ -557,12 +550,11 @@ impl Database { .and(contact::Column::Accepted.eq(false))), ), ) - .exec(&tx) + .exec(&*tx) .await?; if result.rows_affected == 0 { Err(anyhow!("no such contact request"))? } else { - tx.commit().await?; Ok(()) } }) @@ -575,7 +567,7 @@ impl Database { requester_id: UserId, accept: bool, ) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let (id_a, id_b, a_to_b) = if responder_id < requester_id { (responder_id, requester_id, false) } else { @@ -594,7 +586,7 @@ impl Database { .and(contact::Column::UserIdB.eq(id_b)) .and(contact::Column::AToB.eq(a_to_b)), ) - .exec(&tx) + .exec(&*tx) .await?; result.rows_affected } else { @@ -606,14 +598,13 @@ impl Database { .and(contact::Column::AToB.eq(a_to_b)) .and(contact::Column::Accepted.eq(false)), ) - .exec(&tx) + .exec(&*tx) .await?; result.rows_affected }; if rows_affected == 1 { - tx.commit().await?; Ok(()) } else { Err(anyhow!("no such contact request"))? @@ -635,7 +626,7 @@ impl Database { } pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result> { - self.transact(|tx| async { + self.transaction(|tx| async { let tx = tx; let like_string = Self::fuzzy_like_string(name_query); let query = " @@ -652,7 +643,7 @@ impl Database { query.into(), vec![like_string.into(), name_query.into(), limit.into()], )) - .all(&tx) + .all(&*tx) .await?) }) .await @@ -661,7 +652,7 @@ impl Database { // signups pub async fn create_signup(&self, signup: &NewSignup) -> Result<()> { - self.transact(|tx| async { + self.transaction(|tx| async move { signup::Entity::insert(signup::ActiveModel { email_address: ActiveValue::set(signup.email_address.clone()), email_confirmation_code: ActiveValue::set(random_email_confirmation_code()), @@ -681,16 +672,15 @@ impl Database { .update_column(signup::Column::EmailAddress) .to_owned(), ) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await } pub async fn get_waitlist_summary(&self) -> Result { - self.transact(|tx| async move { + self.transaction(|tx| async move { let query = " SELECT COUNT(*) as count, @@ -711,7 +701,7 @@ impl Database { query.into(), vec![], )) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("invalid result"))?, ) @@ -724,23 +714,23 @@ impl Database { .iter() .map(|s| s.email_address.as_str()) .collect::>(); - self.transact(|tx| async { + self.transaction(|tx| async { + let tx = tx; signup::Entity::update_many() .filter(signup::Column::EmailAddress.is_in(emails.iter().copied())) .set(signup::ActiveModel { email_confirmation_sent: ActiveValue::set(true), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await } pub async fn get_unsent_invites(&self, count: usize) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { Ok(signup::Entity::find() .select_only() .column(signup::Column::EmailAddress) @@ -755,7 +745,7 @@ impl Database { .order_by_asc(signup::Column::CreatedAt) .limit(count as u64) .into_model() - .all(&tx) + .all(&*tx) .await?) }) .await @@ -769,10 +759,10 @@ impl Database { email_address: &str, device_id: Option<&str>, ) -> Result { - self.transact(|tx| async move { + self.transaction(|tx| async move { let existing_user = user::Entity::find() .filter(user::Column::EmailAddress.eq(email_address)) - .one(&tx) + .one(&*tx) .await?; if existing_user.is_some() { @@ -785,7 +775,7 @@ impl Database { .eq(code) .and(user::Column::InviteCount.gt(0)), ) - .one(&tx) + .one(&*tx) .await? { Some(inviting_user) => inviting_user, @@ -806,7 +796,7 @@ impl Database { user::Column::InviteCount, Expr::col(user::Column::InviteCount).sub(1), ) - .exec(&tx) + .exec(&*tx) .await?; let signup = signup::Entity::insert(signup::ActiveModel { @@ -826,9 +816,8 @@ impl Database { .update_column(signup::Column::InvitingUserId) .to_owned(), ) - .exec_with_returning(&tx) + .exec_with_returning(&*tx) .await?; - tx.commit().await?; Ok(Invite { email_address: signup.email_address, @@ -843,7 +832,7 @@ impl Database { invite: &Invite, user: NewUserParams, ) -> Result> { - self.transact(|tx| async { + self.transaction(|tx| async { let tx = tx; let signup = signup::Entity::find() .filter( @@ -854,7 +843,7 @@ impl Database { .eq(invite.email_confirmation_code.as_str()), ), ) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "no such invite".to_string()))?; @@ -881,12 +870,12 @@ impl Database { ]) .to_owned(), ) - .exec_with_returning(&tx) + .exec_with_returning(&*tx) .await?; let mut signup = signup.into_active_model(); signup.user_id = ActiveValue::set(Some(user.id)); - let signup = signup.update(&tx).await?; + let signup = signup.update(&*tx).await?; if let Some(inviting_user_id) = signup.inviting_user_id { contact::Entity::insert(contact::ActiveModel { @@ -898,11 +887,10 @@ impl Database { ..Default::default() }) .on_conflict(OnConflict::new().do_nothing().to_owned()) - .exec_without_returning(&tx) + .exec_without_returning(&*tx) .await?; } - tx.commit().await?; Ok(Some(NewUserResult { user_id: user.id, metrics_id: user.metrics_id.to_string(), @@ -914,7 +902,7 @@ impl Database { } pub async fn set_invite_count_for_user(&self, id: UserId, count: i32) -> Result<()> { - self.transact(|tx| async move { + self.transaction(|tx| async move { if count > 0 { user::Entity::update_many() .filter( @@ -926,7 +914,7 @@ impl Database { invite_code: ActiveValue::set(Some(random_invite_code())), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; } @@ -936,17 +924,16 @@ impl Database { invite_count: ActiveValue::set(count), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await } pub async fn get_invite_code_for_user(&self, id: UserId) -> Result> { - self.transact(|tx| async move { - match user::Entity::find_by_id(id).one(&tx).await? { + self.transaction(|tx| async move { + match user::Entity::find_by_id(id).one(&*tx).await? { Some(user) if user.invite_code.is_some() => { Ok(Some((user.invite_code.unwrap(), user.invite_count))) } @@ -957,10 +944,10 @@ impl Database { } pub async fn get_user_for_invite_code(&self, code: &str) -> Result { - self.transact(|tx| async move { + self.transaction(|tx| async move { user::Entity::find() .filter(user::Column::InviteCode.eq(code)) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| { Error::Http( @@ -978,14 +965,14 @@ impl Database { &self, user_id: UserId, ) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let pending_participant = room_participant::Entity::find() .filter( room_participant::Column::UserId .eq(user_id) .and(room_participant::Column::AnsweringConnectionId.is_null()), ) - .one(&tx) + .one(&*tx) .await?; if let Some(pending_participant) = pending_participant { @@ -1004,12 +991,12 @@ impl Database { connection_id: ConnectionId, live_kit_room: &str, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let room = room::ActiveModel { live_kit_room: ActiveValue::set(live_kit_room.into()), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; let room_id = room.id; @@ -1023,11 +1010,11 @@ impl Database { calling_connection_epoch: ActiveValue::set(self.epoch), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, room).await + Ok((room_id, room)) }) .await } @@ -1040,7 +1027,7 @@ impl Database { called_user_id: UserId, initial_project_id: Option, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(called_user_id), @@ -1050,14 +1037,13 @@ impl Database { initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; let incoming_call = Self::build_incoming_call(&room, called_user_id) .ok_or_else(|| anyhow!("failed to build incoming call"))?; - self.commit_room_transaction(room_id, tx, (room, incoming_call)) - .await + Ok((room_id, (room, incoming_call))) }) .await } @@ -1067,17 +1053,17 @@ impl Database { room_id: RoomId, called_user_id: UserId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { room_participant::Entity::delete_many() .filter( room_participant::Column::RoomId .eq(room_id) .and(room_participant::Column::UserId.eq(called_user_id)), ) - .exec(&tx) + .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, room).await + Ok((room_id, room)) }) .await } @@ -1087,14 +1073,14 @@ impl Database { expected_room_id: Option, user_id: UserId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() .filter( room_participant::Column::UserId .eq(user_id) .and(room_participant::Column::AnsweringConnectionId.is_null()), ) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("could not decline call"))?; let room_id = participant.room_id; @@ -1104,11 +1090,11 @@ impl Database { } room_participant::Entity::delete(participant.into_active_model()) - .exec(&tx) + .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, room).await + Ok((room_id, room)) }) .await } @@ -1119,7 +1105,7 @@ impl Database { calling_connection_id: ConnectionId, called_user_id: UserId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() .filter( room_participant::Column::UserId @@ -1130,7 +1116,7 @@ impl Database { ) .and(room_participant::Column::AnsweringConnectionId.is_null()), ) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("could not cancel call"))?; let room_id = participant.room_id; @@ -1139,11 +1125,11 @@ impl Database { } room_participant::Entity::delete(participant.into_active_model()) - .exec(&tx) + .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, room).await + Ok((room_id, room)) }) .await } @@ -1154,7 +1140,7 @@ impl Database { user_id: UserId, connection_id: ConnectionId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let result = room_participant::Entity::update_many() .filter( room_participant::Column::RoomId @@ -1167,33 +1153,30 @@ impl Database { answering_connection_epoch: ActiveValue::set(Some(self.epoch)), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; if result.rows_affected == 0 { Err(anyhow!("room does not exist or was already joined"))? } else { let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, room).await + Ok((room_id, room)) } }) .await } - pub async fn leave_room( - &self, - connection_id: ConnectionId, - ) -> Result>> { - self.transact(|tx| async move { + pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { + self.room_transaction(|tx| async move { let leaving_participant = room_participant::Entity::find() .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) - .one(&tx) + .one(&*tx) .await?; if let Some(leaving_participant) = leaving_participant { // Leave room. let room_id = leaving_participant.room_id; room_participant::Entity::delete_by_id(leaving_participant.id) - .exec(&tx) + .exec(&*tx) .await?; // Cancel pending calls initiated by the leaving user. @@ -1203,14 +1186,14 @@ impl Database { .eq(connection_id.0) .and(room_participant::Column::AnsweringConnectionId.is_null()), ) - .all(&tx) + .all(&*tx) .await?; room_participant::Entity::delete_many() .filter( room_participant::Column::Id .is_in(called_participants.iter().map(|participant| participant.id)), ) - .exec(&tx) + .exec(&*tx) .await?; let canceled_calls_to_user_ids = called_participants .into_iter() @@ -1230,12 +1213,12 @@ impl Database { ) .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) .into_values::<_, QueryProjectIds>() - .all(&tx) + .all(&*tx) .await?; let mut left_projects = HashMap::default(); let mut collaborators = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.is_in(project_ids)) - .stream(&tx) + .stream(&*tx) .await?; while let Some(collaborator) = collaborators.next().await { let collaborator = collaborator?; @@ -1266,7 +1249,7 @@ impl Database { // Leave projects. project_collaborator::Entity::delete_many() .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) - .exec(&tx) + .exec(&*tx) .await?; // Unshare projects. @@ -1276,33 +1259,27 @@ impl Database { .eq(room_id) .and(project::Column::HostConnectionId.eq(connection_id.0)), ) - .exec(&tx) + .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; if room.participants.is_empty() { - room::Entity::delete_by_id(room_id).exec(&tx).await?; + room::Entity::delete_by_id(room_id).exec(&*tx).await?; } - let left_room = self - .commit_room_transaction( - room_id, - tx, - LeftRoom { - room, - left_projects, - canceled_calls_to_user_ids, - }, - ) - .await?; + let left_room = LeftRoom { + room, + left_projects, + canceled_calls_to_user_ids, + }; if left_room.room.participants.is_empty() { self.rooms.remove(&room_id); } - Ok(Some(left_room)) + Ok((room_id, left_room)) } else { - Ok(None) + Err(anyhow!("could not leave room"))? } }) .await @@ -1314,8 +1291,8 @@ impl Database { connection_id: ConnectionId, location: proto::ParticipantLocation, ) -> Result> { - self.transact(|tx| async { - let mut tx = tx; + self.room_transaction(|tx| async { + let tx = tx; let location_kind; let location_project_id; match location @@ -1348,12 +1325,12 @@ impl Database { location_project_id: ActiveValue::set(location_project_id), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; if result.rows_affected == 1 { - let room = self.get_room(room_id, &mut tx).await?; - self.commit_room_transaction(room_id, tx, room).await + let room = self.get_room(room_id, &tx).await?; + Ok((room_id, room)) } else { Err(anyhow!("could not update room participant location"))? } @@ -1478,22 +1455,6 @@ impl Database { }) } - async fn commit_room_transaction( - &self, - room_id: RoomId, - tx: DatabaseTransaction, - data: T, - ) -> Result> { - let lock = self.rooms.entry(room_id).or_default().clone(); - let _guard = lock.lock_owned().await; - tx.commit().await?; - Ok(RoomGuard { - data, - _guard, - _not_send: PhantomData, - }) - } - // projects pub async fn project_count_excluding_admins(&self) -> Result { @@ -1502,14 +1463,14 @@ impl Database { Count, } - self.transact(|tx| async move { + self.transaction(|tx| async move { Ok(project::Entity::find() .select_only() .column_as(project::Column::Id.count(), QueryAs::Count) .inner_join(user::Entity) .filter(user::Column::Admin.eq(false)) .into_values::<_, QueryAs>() - .one(&tx) + .one(&*tx) .await? .unwrap_or(0) as usize) }) @@ -1522,10 +1483,10 @@ impl Database { connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("could not find participant"))?; if participant.room_id != room_id { @@ -1539,7 +1500,7 @@ impl Database { host_connection_epoch: ActiveValue::set(self.epoch), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; if !worktrees.is_empty() { @@ -1554,7 +1515,7 @@ impl Database { is_complete: ActiveValue::set(false), } })) - .exec(&tx) + .exec(&*tx) .await?; } @@ -1567,12 +1528,11 @@ impl Database { is_host: ActiveValue::set(true), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, (project.id, room)) - .await + Ok((room_id, (project.id, room))) }) .await } @@ -1582,21 +1542,20 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result)>> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; let project = project::Entity::find_by_id(project_id) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("project not found"))?; if project.host_connection_id == connection_id.0 as i32 { let room_id = project.room_id; project::Entity::delete(project.into_active_model()) - .exec(&tx) + .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - self.commit_room_transaction(room_id, tx, (room, guest_connection_ids)) - .await + Ok((room_id, (room, guest_connection_ids))) } else { Err(anyhow!("cannot unshare a project hosted by another user"))? } @@ -1610,10 +1569,10 @@ impl Database { connection_id: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result)>> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) .filter(project::Column::HostConnectionId.eq(connection_id.0)) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; @@ -1634,7 +1593,7 @@ impl Database { .update_column(worktree::Column::RootName) .to_owned(), ) - .exec(&tx) + .exec(&*tx) .await?; } @@ -1645,13 +1604,12 @@ impl Database { .is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)), ), ) - .exec(&tx) + .exec(&*tx) .await?; let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?; let room = self.get_room(project.room_id, &tx).await?; - self.commit_room_transaction(project.room_id, tx, (room, guest_connection_ids)) - .await + Ok((project.room_id, (room, guest_connection_ids))) }) .await } @@ -1661,14 +1619,14 @@ impl Database { update: &proto::UpdateWorktree, connection_id: ConnectionId, ) -> Result>> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); let worktree_id = update.worktree_id as i64; // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) .filter(project::Column::HostConnectionId.eq(connection_id.0)) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; let room_id = project.room_id; @@ -1683,7 +1641,7 @@ impl Database { abs_path: ActiveValue::set(update.abs_path.clone()), ..Default::default() }) - .exec(&tx) + .exec(&*tx) .await?; if !update.updated_entries.is_empty() { @@ -1719,7 +1677,7 @@ impl Database { ]) .to_owned(), ) - .exec(&tx) + .exec(&*tx) .await?; } @@ -1734,13 +1692,12 @@ impl Database { .is_in(update.removed_entries.iter().map(|id| *id as i64)), ), ) - .exec(&tx) + .exec(&*tx) .await?; } let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; - self.commit_room_transaction(room_id, tx, connection_ids) - .await + Ok((room_id, connection_ids)) }) .await } @@ -1750,7 +1707,7 @@ impl Database { update: &proto::UpdateDiagnosticSummary, connection_id: ConnectionId, ) -> Result>> { - self.transact(|tx| async { + self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); let worktree_id = update.worktree_id as i64; let summary = update @@ -1760,7 +1717,7 @@ impl Database { // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; if project.host_connection_id != connection_id.0 as i32 { @@ -1790,12 +1747,11 @@ impl Database { ]) .to_owned(), ) - .exec(&tx) + .exec(&*tx) .await?; let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; - self.commit_room_transaction(project.room_id, tx, connection_ids) - .await + Ok((project.room_id, connection_ids)) }) .await } @@ -1805,7 +1761,7 @@ impl Database { update: &proto::StartLanguageServer, connection_id: ConnectionId, ) -> Result>> { - self.transact(|tx| async { + self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); let server = update .server @@ -1814,7 +1770,7 @@ impl Database { // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; if project.host_connection_id != connection_id.0 as i32 { @@ -1836,12 +1792,11 @@ impl Database { .update_column(language_server::Column::Name) .to_owned(), ) - .exec(&tx) + .exec(&*tx) .await?; let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; - self.commit_room_transaction(project.room_id, tx, connection_ids) - .await + Ok((project.room_id, connection_ids)) }) .await } @@ -1851,15 +1806,15 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("must join a room first"))?; let project = project::Entity::find_by_id(project_id) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; if project.room_id != participant.room_id { @@ -1868,7 +1823,7 @@ impl Database { let mut collaborators = project .find_related(project_collaborator::Entity) - .all(&tx) + .all(&*tx) .await?; let replica_ids = collaborators .iter() @@ -1887,11 +1842,11 @@ impl Database { is_host: ActiveValue::set(false), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; collaborators.push(new_collaborator); - let db_worktrees = project.find_related(worktree::Entity).all(&tx).await?; + let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?; let mut worktrees = db_worktrees .into_iter() .map(|db_worktree| { @@ -1915,7 +1870,7 @@ impl Database { { let mut db_entries = worktree_entry::Entity::find() .filter(worktree_entry::Column::ProjectId.eq(project_id)) - .stream(&tx) + .stream(&*tx) .await?; while let Some(db_entry) = db_entries.next().await { let db_entry = db_entry?; @@ -1940,7 +1895,7 @@ impl Database { { let mut db_summaries = worktree_diagnostic_summary::Entity::find() .filter(worktree_diagnostic_summary::Column::ProjectId.eq(project_id)) - .stream(&tx) + .stream(&*tx) .await?; while let Some(db_summary) = db_summaries.next().await { let db_summary = db_summary?; @@ -1960,28 +1915,22 @@ impl Database { // Populate language servers. let language_servers = project .find_related(language_server::Entity) - .all(&tx) + .all(&*tx) .await?; - self.commit_room_transaction( - project.room_id, - tx, - ( - Project { - collaborators, - worktrees, - language_servers: language_servers - .into_iter() - .map(|language_server| proto::LanguageServer { - id: language_server.id as u64, - name: language_server.name, - }) - .collect(), - }, - replica_id as ReplicaId, - ), - ) - .await + let room_id = project.room_id; + let project = Project { + collaborators, + worktrees, + language_servers: language_servers + .into_iter() + .map(|language_server| proto::LanguageServer { + id: language_server.id as u64, + name: language_server.name, + }) + .collect(), + }; + Ok((room_id, (project, replica_id as ReplicaId))) }) .await } @@ -1991,43 +1940,39 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result> { - self.transact(|tx| async move { + self.room_transaction(|tx| async move { let result = project_collaborator::Entity::delete_many() .filter( project_collaborator::Column::ProjectId .eq(project_id) .and(project_collaborator::Column::ConnectionId.eq(connection_id.0)), ) - .exec(&tx) + .exec(&*tx) .await?; if result.rows_affected == 0 { Err(anyhow!("not a collaborator on this project"))?; } let project = project::Entity::find_by_id(project_id) - .one(&tx) + .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; let collaborators = project .find_related(project_collaborator::Entity) - .all(&tx) + .all(&*tx) .await?; let connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId(collaborator.connection_id as u32)) .collect(); - self.commit_room_transaction( - project.room_id, - tx, - LeftProject { - id: project_id, - host_user_id: project.host_user_id, - host_connection_id: ConnectionId(project.host_connection_id as u32), - connection_ids, - }, - ) - .await + let left_project = LeftProject { + id: project_id, + host_user_id: project.host_user_id, + host_connection_id: ConnectionId(project.host_connection_id as u32), + connection_ids, + }; + Ok((project.room_id, left_project)) }) .await } @@ -2037,10 +1982,10 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { let collaborators = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.eq(project_id)) - .all(&tx) + .all(&*tx) .await?; if collaborators @@ -2060,7 +2005,7 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result> { - self.transact(|tx| async move { + self.transaction(|tx| async move { #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] enum QueryAs { ConnectionId, @@ -2074,7 +2019,7 @@ impl Database { ) .filter(project_collaborator::Column::ProjectId.eq(project_id)) .into_values::() - .stream(&tx) + .stream(&*tx) .await?; let mut connection_ids = HashSet::default(); @@ -2131,7 +2076,7 @@ impl Database { access_token_hash: &str, max_access_token_count: usize, ) -> Result<()> { - self.transact(|tx| async { + self.transaction(|tx| async { let tx = tx; access_token::ActiveModel { @@ -2139,7 +2084,7 @@ impl Database { hash: ActiveValue::set(access_token_hash.into()), ..Default::default() } - .insert(&tx) + .insert(&*tx) .await?; access_token::Entity::delete_many() @@ -2155,9 +2100,8 @@ impl Database { .to_owned(), ), ) - .exec(&tx) + .exec(&*tx) .await?; - tx.commit().await?; Ok(()) }) .await @@ -2169,22 +2113,22 @@ impl Database { Hash, } - self.transact(|tx| async move { + self.transaction(|tx| async move { Ok(access_token::Entity::find() .select_only() .column(access_token::Column::Hash) .filter(access_token::Column::UserId.eq(user_id)) .order_by_desc(access_token::Column::Id) .into_values::<_, QueryAs>() - .all(&tx) + .all(&*tx) .await?) }) .await } - async fn transact(&self, f: F) -> Result + async fn transaction(&self, f: F) -> Result where - F: Send + Fn(DatabaseTransaction) -> Fut, + F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { let body = async { @@ -2200,22 +2144,32 @@ impl Database { .await?; } - match f(tx).await { - Ok(result) => return Ok(result), - Err(error) => match error { - Error::Database( - DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) - | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), - ) if error - .as_database_error() - .and_then(|error| error.code()) - .as_deref() - == Some("40001") => - { - // Retry (don't break the loop) + let mut tx = Arc::new(Some(tx)); + let result = f(TransactionHandle(tx.clone())).await; + let tx = Arc::get_mut(&mut tx).unwrap().take().unwrap(); + + match result { + Ok(result) => { + tx.commit().await?; + return Ok(result); + } + Err(error) => { + tx.rollback().await?; + match error { + Error::Database( + DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) + | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), + ) if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some("40001") => + { + // Retry (don't break the loop) + } + error @ _ => return Err(error), } - error @ _ => return Err(error), - }, + } } } }; @@ -2234,6 +2188,85 @@ impl Database { body.await } } + + async fn room_transaction(&self, f: F) -> Result> + where + F: Send + Fn(TransactionHandle) -> Fut, + Fut: Send + Future>, + { + let body = async { + loop { + let tx = self.pool.begin().await?; + + // In Postgres, serializable transactions are opt-in + if let DatabaseBackend::Postgres = self.pool.get_database_backend() { + tx.execute(Statement::from_string( + DatabaseBackend::Postgres, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), + )) + .await?; + } + + let mut tx = Arc::new(Some(tx)); + let result = f(TransactionHandle(tx.clone())).await; + let tx = Arc::get_mut(&mut tx).unwrap().take().unwrap(); + + match result { + Ok((room_id, data)) => { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + tx.commit().await?; + return Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, + }); + } + Err(error) => { + tx.rollback().await?; + match error { + Error::Database( + DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) + | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), + ) if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some("40001") => + { + // Retry (don't break the loop) + } + error @ _ => return Err(error), + } + } + } + } + }; + + #[cfg(test)] + { + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + self.runtime.as_ref().unwrap().block_on(body) + } + + #[cfg(not(test))] + { + body.await + } + } +} + +struct TransactionHandle(Arc>); + +impl Deref for TransactionHandle { + type Target = DatabaseTransaction; + + fn deref(&self) -> &Self::Target { + self.0.as_ref().as_ref().unwrap() + } } pub struct RoomGuard { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 9d3917a417..7f404feffe 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1854,9 +1854,7 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { let live_kit_room; let delete_live_kit_room; { - let Some(mut left_room) = session.db().await.leave_room(session.connection_id).await? else { - return Err(anyhow!("no room to leave"))?; - }; + let mut left_room = session.db().await.leave_room(session.connection_id).await?; contacts_to_update.insert(session.user_id); for project in left_room.left_projects.values() { From 4bc1d775358f8af756740c4ef5d250ceee560cb2 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 16:09:37 -0500 Subject: [PATCH 111/240] Fix tab following order test to wait for file open to propagate Now it can actually repro the original bug Co-Authored-By: Max Brunsfeld --- crates/collab/src/integration_tests.rs | 18 ++++++++++-------- crates/editor/src/items.rs | 6 ++++-- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 7115ed6c60..0daa3b69f6 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -5040,14 +5040,6 @@ async fn test_following_tab_order( //Verify that the tabs opened in the order we expect assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt"]); - //Open just 2 on client B - workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "2.txt"), None, true, cx) - }) - .await - .unwrap(); - //Follow client B as client A workspace_a .update(cx_a, |workspace, cx| { @@ -5058,6 +5050,15 @@ async fn test_following_tab_order( .await .unwrap(); + //Open just 2 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), None, true, cx) + }) + .await + .unwrap(); + deterministic.run_until_parked(); + // Verify that newly opened followed file is at the end assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); @@ -5069,6 +5070,7 @@ async fn test_following_tab_order( .await .unwrap(); assert_eq!(&pane_paths(&pane_b, cx_b), &["2.txt", "1.txt"]); + deterministic.run_until_parked(); // Verify that following into 1 did not reorder assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0cc8575e99..ccabe81de6 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -55,9 +55,11 @@ impl FollowableItem for Editor { let buffer = buffer.await?; let editor = pane .read_with(&cx, |pane, cx| { - pane.items_of_type::().find(|editor| { + let existing = pane.items_of_type::().find(|editor| { editor.read(cx).buffer.read(cx).as_singleton().as_ref() == Some(&buffer) - }) + }); + dbg!(&existing); + existing }) .unwrap_or_else(|| { pane.update(&mut cx, |_, cx| { From 57e10b7dd56c18caaf91a176a86481e3ee4c4571 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 16:42:49 -0500 Subject: [PATCH 112/240] Cleanup dbg --- crates/editor/src/items.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index ccabe81de6..0cc8575e99 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -55,11 +55,9 @@ impl FollowableItem for Editor { let buffer = buffer.await?; let editor = pane .read_with(&cx, |pane, cx| { - let existing = pane.items_of_type::().find(|editor| { + pane.items_of_type::().find(|editor| { editor.read(cx).buffer.read(cx).as_singleton().as_ref() == Some(&buffer) - }); - dbg!(&existing); - existing + }) }) .unwrap_or_else(|| { pane.update(&mut cx, |_, cx| { From 72c1ee904b7335ede76b421a33edb70c07342b2a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 09:33:16 -0700 Subject: [PATCH 113/240] Fix rebase - Broken tab --- crates/db/src/items.rs | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index ed4a4f85e3..87edbd2c00 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -148,18 +148,38 @@ impl Db { let tx = lock.transaction()?; - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut editors_stmt = tx.prepare_cached( - r#" + // When working with transactions in rusqlite, need to make this kind of scope + // To make the borrow stuff work correctly. Don't know why, rust is wild. + let result = { + let mut read_editors = tx + .prepare_cached( + r#" SELECT items.id, item_path.path FROM items LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = ?; - "#, - )?; + ON items.id = item_path.item_id + WHERE items.kind = "Editor"; + "#r, + )? + .query_map([], |row| { + let buf: Vec = row.get(2)?; + let path: PathBuf = OsStr::from_bytes(&buf).into(); + + Ok(SerializedItem::Editor(id, path)) + })?; + + let mut read_stmt = tx.prepare_cached( + " + SELECT items.id, items.kind, item_path.path, item_query.query + FROM items + LEFT JOIN item_path + ON items.id = item_path.item_id + LEFT JOIN item_query + ON items.id = item_query.item_id + WHERE + ORDER BY items.id; + ", + )?; let editors_iter = editors_stmt.query_map( [SerializedItemKind::Editor.to_string()], From 60ebe33518df5540f98af8e4019c1a72056e0c03 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 11:36:01 -0700 Subject: [PATCH 114/240] Rebase fix - Reworking approach to sql for take --- crates/db/src/db.rs | 1 + crates/db/src/items.rs | 38 +++++++++---------------------------- crates/db/src/migrations.rs | 4 ++-- 3 files changed, 12 insertions(+), 31 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 2949acdb83..6f1ac7f59f 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,3 +1,4 @@ +mod items; mod kvp; mod migrations; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 87edbd2c00..ed4a4f85e3 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -148,38 +148,18 @@ impl Db { let tx = lock.transaction()?; - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut read_editors = tx - .prepare_cached( - r#" + // When working with transactions in rusqlite, need to make this kind of scope + // To make the borrow stuff work correctly. Don't know why, rust is wild. + let result = { + let mut editors_stmt = tx.prepare_cached( + r#" SELECT items.id, item_path.path FROM items LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = "Editor"; - "#r, - )? - .query_map([], |row| { - let buf: Vec = row.get(2)?; - let path: PathBuf = OsStr::from_bytes(&buf).into(); - - Ok(SerializedItem::Editor(id, path)) - })?; - - let mut read_stmt = tx.prepare_cached( - " - SELECT items.id, items.kind, item_path.path, item_query.query - FROM items - LEFT JOIN item_path - ON items.id = item_path.item_id - LEFT JOIN item_query - ON items.id = item_query.item_id - WHERE - ORDER BY items.id; - ", - )?; + ON items.id = item_path.item_id + WHERE items.kind = ?; + "#, + )?; let editors_iter = editors_stmt.query_map( [SerializedItemKind::Editor.to_string()], diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 1000543d8d..40e5d28b80 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::kvp::KVP_M_1_UP; +use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,6 +10,6 @@ use crate::kvp::KVP_M_1_UP; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1_UP), - // M::up(ITEMS_M_1), + M::up(ITEMS_M_1), ]); } From b48e28b55512f57b0d045aa5de0292d13ad1f2b2 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 17:10:49 -0700 Subject: [PATCH 115/240] Built first draft of workspace serialization schemas, started writing DB tests Co-Authored-By: kay@zed.dev --- crates/db/src/db.rs | 1 + crates/db/src/items.rs | 347 ++++++------------------------------ crates/db/src/kvp.rs | 2 +- crates/db/src/migrations.rs | 5 +- crates/db/src/workspace.rs | 180 +++++++++++++++++++ 5 files changed, 234 insertions(+), 301 deletions(-) create mode 100644 crates/db/src/workspace.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6f1ac7f59f..2b4b7cf9c3 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,7 @@ mod items; mod kvp; mod migrations; +mod workspace; use std::fs; use std::path::{Path, PathBuf}; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index ed4a4f85e3..7454f24331 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -6,306 +6,59 @@ use rusqlite::{named_params, params}; use super::Db; +/// Current design makes the cut at the item level, +/// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate +/// - items table, with a kind, and an integer that acts as a key to one of these other tables +/// This column is a foreign key to ONE OF: editors, terminals, searches +/// - + +// (workspace_id, item_id) +// kind -> ::Editor:: + +// -> +// At the workspace level +// -> (Workspace_ID, item_id) +// -> One shot, big query, load everything up: + +// -> SerializedWorkspace::deserialize(tx, itemKey) +// -> SerializedEditor::deserialize(tx, itemKey) + +// -> +// -> Workspace::new(SerializedWorkspace) +// -> Editor::new(serialized_workspace[???]serializedEditor) + +// //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) +// //Cons: DB has to know the internals of the entire rest of the app + +// Workspace +// Worktree roots +// Pane groups +// Dock +// Items +// Sidebars + pub(crate) const ITEMS_M_1: &str = " CREATE TABLE items( - id INTEGER PRIMARY KEY, - kind TEXT + workspace_id INTEGER, + item_id INTEGER, + kind TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; -CREATE TABLE item_path( - item_id INTEGER PRIMARY KEY, - path BLOB + +CREATE TABLE project_searches( + workspace_id INTEGER, + item_id INTEGER, + query TEXT, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; -CREATE TABLE item_query( - item_id INTEGER PRIMARY KEY, - query TEXT + +CREATE TABLE editors( + workspace_id INTEGER, + item_id INTEGER, + path BLOB NOT NULL, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; "; - -#[derive(PartialEq, Eq, Hash, Debug)] -pub enum SerializedItemKind { - Editor, - Terminal, - ProjectSearch, - Diagnostics, -} - -impl Display for SerializedItemKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(&format!("{:?}", self)) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum SerializedItem { - Editor(usize, PathBuf), - Terminal(usize), - ProjectSearch(usize, String), - Diagnostics(usize), -} - -impl SerializedItem { - fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor(_, _) => SerializedItemKind::Editor, - SerializedItem::Terminal(_) => SerializedItemKind::Terminal, - SerializedItem::ProjectSearch(_, _) => SerializedItemKind::ProjectSearch, - SerializedItem::Diagnostics(_) => SerializedItemKind::Diagnostics, - } - } - - fn id(&self) -> usize { - match self { - SerializedItem::Editor(id, _) - | SerializedItem::Terminal(id) - | SerializedItem::ProjectSearch(id, _) - | SerializedItem::Diagnostics(id) => *id, - } - } -} - -impl Db { - fn write_item(&self, serialized_item: SerializedItem) -> Result<()> { - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - let tx = lock.transaction()?; - - // Serialize the item - let id = serialized_item.id(); - { - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO items(id, kind) VALUES ((?), (?))", - )?; - - dbg!("inserting item"); - stmt.execute(params![id, serialized_item.kind().to_string()])?; - } - - // Serialize item data - match &serialized_item { - SerializedItem::Editor(_, path) => { - dbg!("inserting path"); - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO item_path(item_id, path) VALUES ((?), (?))", - )?; - - let path_bytes = path.as_os_str().as_bytes(); - stmt.execute(params![id, path_bytes])?; - } - SerializedItem::ProjectSearch(_, query) => { - dbg!("inserting query"); - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO item_query(item_id, query) VALUES ((?), (?))", - )?; - - stmt.execute(params![id, query])?; - } - _ => {} - } - - tx.commit()?; - - let mut stmt = lock.prepare_cached("SELECT id, kind FROM items")?; - let _ = stmt - .query_map([], |row| { - let zero: usize = row.get(0)?; - let one: String = row.get(1)?; - - dbg!(zero, one); - Ok(()) - })? - .collect::>>(); - - Ok(()) - }) - .unwrap_or(Ok(())) - } - - fn delete_item(&self, item_id: usize) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached( - r#" - DELETE FROM items WHERE id = (:id); - DELETE FROM item_path WHERE id = (:id); - DELETE FROM item_query WHERE id = (:id); - "#, - )?; - - stmt.execute(named_params! {":id": item_id})?; - - Ok(()) - }) - .unwrap_or(Ok(())) - } - - fn take_items(&self) -> Result> { - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - let tx = lock.transaction()?; - - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut editors_stmt = tx.prepare_cached( - r#" - SELECT items.id, item_path.path - FROM items - LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = ?; - "#, - )?; - - let editors_iter = editors_stmt.query_map( - [SerializedItemKind::Editor.to_string()], - |row| { - let id: usize = row.get(0)?; - - let buf: Vec = row.get(1)?; - let path: PathBuf = OsStr::from_bytes(&buf).into(); - - Ok(SerializedItem::Editor(id, path)) - }, - )?; - - let mut terminals_stmt = tx.prepare_cached( - r#" - SELECT items.id - FROM items - WHERE items.kind = ?; - "#, - )?; - let terminals_iter = terminals_stmt.query_map( - [SerializedItemKind::Terminal.to_string()], - |row| { - let id: usize = row.get(0)?; - - Ok(SerializedItem::Terminal(id)) - }, - )?; - - let mut search_stmt = tx.prepare_cached( - r#" - SELECT items.id, item_query.query - FROM items - LEFT JOIN item_query - ON items.id = item_query.item_id - WHERE items.kind = ?; - "#, - )?; - let searches_iter = search_stmt.query_map( - [SerializedItemKind::ProjectSearch.to_string()], - |row| { - let id: usize = row.get(0)?; - let query = row.get(1)?; - - Ok(SerializedItem::ProjectSearch(id, query)) - }, - )?; - - #[cfg(debug_assertions)] - let tmp = - searches_iter.collect::>>(); - #[cfg(debug_assertions)] - debug_assert!(tmp.len() == 0 || tmp.len() == 1); - #[cfg(debug_assertions)] - let searches_iter = tmp.into_iter(); - - let mut diagnostic_stmt = tx.prepare_cached( - r#" - SELECT items.id - FROM items - WHERE items.kind = ?; - "#, - )?; - - let diagnostics_iter = diagnostic_stmt.query_map( - [SerializedItemKind::Diagnostics.to_string()], - |row| { - let id: usize = row.get(0)?; - - Ok(SerializedItem::Diagnostics(id)) - }, - )?; - - #[cfg(debug_assertions)] - let tmp = - diagnostics_iter.collect::>>(); - #[cfg(debug_assertions)] - debug_assert!(tmp.len() == 0 || tmp.len() == 1); - #[cfg(debug_assertions)] - let diagnostics_iter = tmp.into_iter(); - - let res = editors_iter - .chain(terminals_iter) - .chain(diagnostics_iter) - .chain(searches_iter) - .collect::, rusqlite::Error>>()?; - - let mut delete_stmt = tx.prepare_cached( - r#" - DELETE FROM items; - DELETE FROM item_path; - DELETE FROM item_query; - "#, - )?; - - delete_stmt.execute([])?; - - res - }; - - tx.commit()?; - - Ok(result) - }) - .unwrap_or(Ok(HashSet::default())) - } -} - -#[cfg(test)] -mod test { - use anyhow::Result; - - use super::*; - - #[test] - fn test_items_round_trip() -> Result<()> { - let db = Db::open_in_memory(); - - let mut items = vec![ - SerializedItem::Editor(0, PathBuf::from("/tmp/test.txt")), - SerializedItem::Terminal(1), - SerializedItem::ProjectSearch(2, "Test query!".to_string()), - SerializedItem::Diagnostics(3), - ] - .into_iter() - .collect::>(); - - for item in items.iter() { - dbg!("Inserting... "); - db.write_item(item.clone())?; - } - - assert_eq!(items, db.take_items()?); - - // Check that it's empty, as expected - assert_eq!(HashSet::default(), db.take_items()?); - - for item in items.iter() { - db.write_item(item.clone())?; - } - - items.remove(&SerializedItem::ProjectSearch(2, "Test query!".to_string())); - db.delete_item(2)?; - - assert_eq!(items, db.take_items()?); - - Ok(()) - } -} diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 534577bc79..96f13d8040 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -3,7 +3,7 @@ use rusqlite::OptionalExtension; use super::Db; -pub(crate) const KVP_M_1_UP: &str = " +pub(crate) const KVP_M_1: &str = " CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 40e5d28b80..3a21c7fa6f 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; +use crate::kvp::KVP_M_1; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -9,7 +9,6 @@ use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ - M::up(KVP_M_1_UP), - M::up(ITEMS_M_1), + M::up(KVP_M_1), ]); } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs new file mode 100644 index 0000000000..8ece0d5b78 --- /dev/null +++ b/crates/db/src/workspace.rs @@ -0,0 +1,180 @@ +use std::{path::Path, sync::Arc}; + +use super::Db; + +pub(crate) const WORKSPACE_M_1: &str = " +CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + center_group INTEGER NOT NULL, + dock_pane INTEGER NOT NULL, + timestamp INTEGER, + FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) + FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) +) STRICT; + +CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +) STRICT; + +CREATE TABLE pane_groups( + workspace_id INTEGER, + group_id INTEGER, + split_direction STRING, -- 'Vertical' / 'Horizontal' / + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_group_children( + workspace_id INTEGER, + group_id INTEGER, + child_pane_id INTEGER, -- Nullable + child_group_id INTEGER, -- Nullable + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_items( + workspace_id INTEGER, + pane_id INTEGER, + item_id INTEGER, -- Array + PRIMARY KEY (workspace_id, pane_id) +) STRICT; +"; + +// Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This + +// Case 1: Starting Zed Contextless +// > Zed -> Reopen the last +// Case 2: Starting Zed with a project folder +// > Zed ~/projects/Zed +// Case 3: Starting Zed with a file +// > Zed ~/projects/Zed/cargo.toml +// Case 4: Starting Zed with multiple project folders +// > Zed ~/projects/Zed ~/projects/Zed.dev + +#[derive(Debug, PartialEq, Eq)] +pub struct WorkspaceId(usize); + +impl Db { + /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the + /// the last workspace id + pub fn workspace_id(&self, worktree_roots: &[Arc]) -> WorkspaceId { + // Find the workspace id which is uniquely identified by this set of paths return it if found + // Otherwise: + // Find the max workspace_id and increment it as our new workspace id + // Store in the worktrees table the mapping from this new id to the set of worktree roots + unimplemented!(); + } + + /// Updates the open paths for the given workspace id. Will garbage collect items from + /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps + /// in the workspace id table + pub fn update_worktree_roots(&self, workspace_id: &WorkspaceId, worktree_roots: &[Arc]) { + // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) + // Remove the old rows which contain workspace_id + // Add rows for the new worktree_roots + + // zed /tree + // -> add tree2 + // -> udpate_worktree_roots() -> ADDs entries for /tree and /tree2, LEAVING BEHIND, the initial entry for /tree + unimplemented!(); + } + + /// Returns the previous workspace ids sorted by last modified + pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { + // Return all the workspace ids and their associated paths ordered by the access timestamp + //ORDER BY timestamps + unimplemented!(); + } + + pub fn center_pane(&self, workspace: WorkspaceId) -> SerializedPaneGroup {} + + pub fn dock_pane(&self, workspace: WorkspaceId) -> SerializedPane {} +} + +#[cfg(test)] +mod tests { + + use std::{ + path::{Path, PathBuf}, + sync::Arc, + }; + + use crate::Db; + + use super::WorkspaceId; + + fn test_tricky_overlapping_updates() { + // DB state: + // (/tree) -> ID: 1 + // (/tree, /tree2) -> ID: 2 + // (/tree2, /tree3) -> ID: 3 + + // -> User updates 2 to: (/tree2, /tree3) + + // DB state: + // (/tree) -> ID: 1 + // (/tree2, /tree3) -> ID: 2 + // Get rid of 3 for garbage collection + + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() + } + + let data = &[ + (WorkspaceId(1), vec![arc_path("/tmp")]), + (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), + (WorkspaceId(3), vec![arc_path("/tmp2"), arc_path("/tmp3")]), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.update_worktree_roots(workspace_id, entries); //?? + assert_eq!(&db.workspace_id(&[]), workspace_id) + } + + for (workspace_id, entries) in data { + assert_eq!(&db.workspace_id(entries.as_slice()), workspace_id); + } + + db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); + // todo!(); // make sure that 3 got garbage collected + + assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), WorkspaceId(2)); + assert_eq!(db.workspace_id(&[arc_path("/tmp")]), WorkspaceId(1)); + + let recent_workspaces = db.recent_workspaces(); + assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); + assert_eq!(recent_workspaces.get(1).unwrap().0, WorkspaceId(3)); + assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); + } +} + +// [/tmp, /tmp2] -> ID1? +// [/tmp] -> ID2? + +/* +path | id +/tmp ID1 +/tmp ID2 +/tmp2 ID1 + + +SELECT id +FROM workspace_ids +WHERE path IN (path1, path2) +INTERSECT +SELECT id +FROM workspace_ids +WHERE path = path_2 +... and etc. for each element in path array + +If contains row, yay! If not, +SELECT max(id) FROm workspace_ids + +Select id WHERE path IN paths + +SELECT MAX(id) + +*/ From 0c466f806c50c1d0fd742fbf3bf8f1709bf15eb7 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Thu, 20 Oct 2022 15:07:58 -0700 Subject: [PATCH 116/240] WIP --- Cargo.lock | 1 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 1 + crates/db/src/items.rs | 62 +++++++++++++- crates/db/src/pane.rs | 134 +++++++++++++++++++++++++++++ crates/db/src/workspace.rs | 137 +++++++++++++++++++++--------- crates/gpui/src/presenter.rs | 3 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/dock.rs | 6 +- crates/workspace/src/workspace.rs | 1 + 10 files changed, 302 insertions(+), 45 deletions(-) create mode 100644 crates/db/src/pane.rs diff --git a/Cargo.lock b/Cargo.lock index e04624d686..b381331ef1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7617,6 +7617,7 @@ dependencies = [ "client", "collections", "context_menu", + "db", "drag_and_drop", "fs", "futures 0.3.24", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index e0b932003e..10f0858a52 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -12,6 +12,7 @@ test-support = [] [dependencies] collections = { path = "../collections" } +gpui = { path = "../gpui" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 2b4b7cf9c3..bf3cd64508 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,7 @@ mod items; mod kvp; mod migrations; +mod pane; mod workspace; use std::fs; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 7454f24331..1b633fdc47 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -1,8 +1,17 @@ -use std::{ffi::OsStr, fmt::Display, hash::Hash, os::unix::prelude::OsStrExt, path::PathBuf}; +use std::{ + ffi::OsStr, + fmt::Display, + hash::Hash, + os::unix::prelude::OsStrExt, + path::{Path, PathBuf}, + sync::Arc, +}; use anyhow::Result; use collections::HashSet; -use rusqlite::{named_params, params}; +use rusqlite::{named_params, params, types::FromSql}; + +use crate::workspace::WorkspaceId; use super::Db; @@ -62,3 +71,52 @@ CREATE TABLE editors( FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; "; + +#[derive(Debug, PartialEq, Eq)] +pub struct ItemId { + workspace_id: usize, + item_id: usize, +} + +enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +struct SerializedItemRow { + kind: SerializedItemKind, + item_id: usize, + path: Option>, + query: Option, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} + +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } +} + +impl Db { + pub fn get_item(&self, item_id: ItemId) -> SerializedItem { + unimplemented!() + } + + pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} + + pub fn close_item(&self, item_id: ItemId) {} +} diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs new file mode 100644 index 0000000000..98feb36abf --- /dev/null +++ b/crates/db/src/pane.rs @@ -0,0 +1,134 @@ +use gpui::Axis; + +use crate::{items::ItemId, workspace::WorkspaceId}; + +use super::Db; + +pub(crate) const PANE_M_1: &str = " +CREATE TABLE pane_groups( + workspace_id INTEGER, + group_id INTEGER, + axis STRING NOT NULL, -- 'Vertical' / 'Horizontal' + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_group_children( + workspace_id INTEGER, + group_id INTEGER, + child_pane_id INTEGER, -- Nullable + child_group_id INTEGER, -- Nullable + index INTEGER, + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_items( + workspace_id INTEGER, + pane_id INTEGER, + item_id INTEGER, -- Array + index INTEGER, + KEY (workspace_id, pane_id) +) STRICT; +"; + +#[derive(Debug, PartialEq, Eq)] +pub struct PaneId { + workspace_id: WorkspaceId, + pane_id: usize, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct PaneGroupId { + workspace_id: WorkspaceId, + group_id: usize, +} + +impl PaneGroupId { + pub(crate) fn root(workspace_id: WorkspaceId) -> Self { + Self { + workspace_id, + group_id: 0, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedPaneGroup { + group_id: PaneGroupId, + axis: Axis, + children: Vec, +} + +struct PaneGroupChildRow { + child_pane_id: Option, + child_group_id: Option, + index: usize, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum PaneGroupChild { + Pane(SerializedPane), + Group(SerializedPaneGroup), +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedPane { + pane_id: PaneId, + children: Vec, +} + +impl Db { + pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { + let axis = self.get_pane_group_axis(pane_group_id); + let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + for child_row in self.get_pane_group_children(pane_group_id) { + if let Some(child_pane_id) = child_row.child_pane_id { + children.push(( + child_row.index, + PaneGroupChild::Pane(self.get_pane(PaneId { + workspace_id: pane_group_id.workspace_id, + pane_id: child_pane_id, + })), + )); + } else if let Some(child_group_id) = child_row.child_group_id { + children.push(( + child_row.index, + PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + workspace_id: pane_group_id.workspace_id, + group_id: child_group_id, + })), + )); + } + } + children.sort_by_key(|(index, _)| index); + + SerializedPaneGroup { + group_id: pane_group_id, + axis, + children: children.into_iter().map(|(_, child)| child).collect(), + } + } + + pub fn get_pane_group_children( + &self, + pane_group_id: PaneGroupId, + ) -> impl Iterator { + unimplemented!() + } + + pub fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + unimplemented!(); + } + + pub fn save_center_pane_group(&self, center_pane_group: SerializedPaneGroup) { + // Delete the center pane group for this workspace and any of its children + // Generate new pane group IDs as we go through + // insert them + // Items garbage collect themselves when dropped + } + + pub(crate) fn get_pane(&self, pane_id: PaneId) -> SerializedPane { + unimplemented!(); + } + + pub fn save_pane(&self, pane: SerializedPane) {} +} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 8ece0d5b78..e342391b71 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,5 +1,7 @@ use std::{path::Path, sync::Arc}; +use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; + use super::Db; pub(crate) const WORKSPACE_M_1: &str = " @@ -17,28 +19,6 @@ CREATE TABLE worktree_roots( workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; - -CREATE TABLE pane_groups( - workspace_id INTEGER, - group_id INTEGER, - split_direction STRING, -- 'Vertical' / 'Horizontal' / - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_group_children( - workspace_id INTEGER, - group_id INTEGER, - child_pane_id INTEGER, -- Nullable - child_group_id INTEGER, -- Nullable - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_items( - workspace_id INTEGER, - pane_id INTEGER, - item_id INTEGER, -- Array - PRIMARY KEY (workspace_id, pane_id) -) STRICT; "; // Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This @@ -52,18 +32,65 @@ CREATE TABLE pane_items( // Case 4: Starting Zed with multiple project folders // > Zed ~/projects/Zed ~/projects/Zed.dev -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WorkspaceId(usize); +struct WorkspaceRow { + pub workspace_id: WorkspaceId, + pub center_group_id: PaneGroupId, + pub dock_pane_id: PaneId, +} + +pub struct SerializedWorkspace { + pub workspace_id: WorkspaceId, + pub center_group: SerializedPaneGroup, + pub dock_pane: Option, +} + impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the /// the last workspace id - pub fn workspace_id(&self, worktree_roots: &[Arc]) -> WorkspaceId { + pub fn workspace_for_worktree_roots( + &self, + worktree_roots: &[Arc], + ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - // Otherwise: - // Find the max workspace_id and increment it as our new workspace id - // Store in the worktrees table the mapping from this new id to the set of worktree roots - unimplemented!(); + if let Some(workspace_id) = self.workspace_id(worktree_roots) { + let workspace_row = self.get_workspace_row(workspace_id); + let center_group = self.get_pane_group(workspace_row.center_group_id); + let dock_pane = self.get_pane(workspace_row.dock_pane_id); + + SerializedWorkspace { + workspace_id, + center_group, + dock_pane: Some(dock_pane), + } + } else { + let workspace_id = self.get_next_workspace_id(); + let center_group = SerializedPaneGroup { + group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + }; + + SerializedWorkspace { + workspace_id, + center_group, + dock_pane: None, + } + } + } + + fn get_next_workspace_id(&self) -> WorkspaceId { + unimplemented!() + } + + fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { + unimplemented!() + } + + fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { + unimplemented!() } /// Updates the open paths for the given workspace id. Will garbage collect items from @@ -80,16 +107,12 @@ impl Db { unimplemented!(); } - /// Returns the previous workspace ids sorted by last modified + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { // Return all the workspace ids and their associated paths ordered by the access timestamp //ORDER BY timestamps unimplemented!(); } - - pub fn center_pane(&self, workspace: WorkspaceId) -> SerializedPaneGroup {} - - pub fn dock_pane(&self, workspace: WorkspaceId) -> SerializedPane {} } #[cfg(test)] @@ -104,6 +127,42 @@ mod tests { use super::WorkspaceId; + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() + } + + fn test_detect_workspace_id() { + let data = &[ + (WorkspaceId(1), vec![arc_path("/tmp")]), + (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), + ( + WorkspaceId(3), + vec![arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")], + ), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.update_worktree_roots(workspace_id, entries); //?? + } + + assert_eq!(None, db.workspace_id(&[arc_path("/tmp2")])); + assert_eq!( + None, + db.workspace_id(&[arc_path("/tmp2"), arc_path("/tmp3")]) + ); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&[arc_path("/tmp")])); + assert_eq!( + Some(WorkspaceId(2)), + db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2")]) + ); + assert_eq!( + Some(WorkspaceId(3)), + db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")]) + ); + } + fn test_tricky_overlapping_updates() { // DB state: // (/tree) -> ID: 1 @@ -117,10 +176,6 @@ mod tests { // (/tree2, /tree3) -> ID: 2 // Get rid of 3 for garbage collection - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - let data = &[ (WorkspaceId(1), vec![arc_path("/tmp")]), (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), @@ -131,18 +186,18 @@ mod tests { for (workspace_id, entries) in data { db.update_worktree_roots(workspace_id, entries); //?? - assert_eq!(&db.workspace_id(&[]), workspace_id) + assert_eq!(&db.workspace_id(&[]), &Some(*workspace_id)) } for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()), workspace_id); + assert_eq!(&db.workspace_id(entries.as_slice()), &Some(*workspace_id)); } db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), WorkspaceId(2)); - assert_eq!(db.workspace_id(&[arc_path("/tmp")]), WorkspaceId(1)); + assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), Some(WorkspaceId(2))); + assert_eq!(db.workspace_id(&[arc_path("/tmp")]), Some(WorkspaceId(1))); let recent_workspaces = db.recent_workspaces(); assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); diff --git a/crates/gpui/src/presenter.rs b/crates/gpui/src/presenter.rs index d15051ef12..27cd2a1347 100644 --- a/crates/gpui/src/presenter.rs +++ b/crates/gpui/src/presenter.rs @@ -863,8 +863,9 @@ pub struct DebugContext<'a> { pub app: &'a AppContext, } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub enum Axis { + #[default] Horizontal, Vertical, } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 2db4ef2d3d..c481792f7c 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -18,6 +18,7 @@ test-support = [ ] [dependencies] +db = { path = "../db" } call = { path = "../call" } client = { path = "../client" } collections = { path = "../collections" } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index b17a7ea22e..fa8f182a31 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,7 +137,11 @@ pub struct Dock { } impl Dock { - pub fn new(cx: &mut ViewContext, default_item_factory: DefaultItemFactory) -> Self { + pub fn new( + serialized_pane: SerializedPane, + default_item_factory: DefaultItemFactory, + cx: &mut ViewContext, + ) -> Self { let anchor = cx.global::().default_dock_anchor; let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); pane.update(cx, |pane, cx| { diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7082b61949..86eff8fb79 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1110,6 +1110,7 @@ enum FollowerItem { impl Workspace { pub fn new( + serialized_workspace: SerializedWorkspace, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, From 73f0459a0fadfeebd82472729e6cf5f29b0c41d1 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Thu, 20 Oct 2022 16:24:33 -0700 Subject: [PATCH 117/240] wip --- crates/db/src/db.rs | 1 + crates/db/src/pane.rs | 24 +++++++++++++++++------- crates/db/src/workspace.rs | 8 +------- crates/workspace/src/dock.rs | 6 +----- crates/workspace/src/workspace.rs | 7 +++++-- 5 files changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index bf3cd64508..9a64986987 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -14,6 +14,7 @@ use parking_lot::Mutex; use rusqlite::Connection; use migrations::MIGRATIONS; +pub use workspace::*; #[derive(Clone)] pub enum Db { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 98feb36abf..8ca1fd5de2 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -30,13 +30,13 @@ CREATE TABLE pane_items( ) STRICT; "; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneId { workspace_id: WorkspaceId, pane_id: usize, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneGroupId { workspace_id: WorkspaceId, group_id: usize, @@ -58,6 +58,16 @@ pub struct SerializedPaneGroup { children: Vec, } +impl SerializedPaneGroup { + pub(crate) fn empty_root(workspace_id: WorkspaceId) -> Self { + Self { + group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + } + } +} + struct PaneGroupChildRow { child_pane_id: Option, child_group_id: Option, @@ -99,7 +109,7 @@ impl Db { )); } } - children.sort_by_key(|(index, _)| index); + children.sort_by_key(|(index, _)| *index); SerializedPaneGroup { group_id: pane_group_id, @@ -108,18 +118,18 @@ impl Db { } } - pub fn get_pane_group_children( + fn get_pane_group_children( &self, pane_group_id: PaneGroupId, ) -> impl Iterator { - unimplemented!() + Vec::new().into_iter() } - pub fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_center_pane_group(&self, center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits(&self, center_pane_group: SerializedPaneGroup) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e342391b71..e60cb19e3b 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -36,7 +36,6 @@ CREATE TABLE worktree_roots( pub struct WorkspaceId(usize); struct WorkspaceRow { - pub workspace_id: WorkspaceId, pub center_group_id: PaneGroupId, pub dock_pane_id: PaneId, } @@ -67,15 +66,10 @@ impl Db { } } else { let workspace_id = self.get_next_workspace_id(); - let center_group = SerializedPaneGroup { - group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - }; SerializedWorkspace { workspace_id, - center_group, + center_group: SerializedPaneGroup::empty_root(workspace_id), dock_pane: None, } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index fa8f182a31..699b9b1d60 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,11 +137,7 @@ pub struct Dock { } impl Dock { - pub fn new( - serialized_pane: SerializedPane, - default_item_factory: DefaultItemFactory, - cx: &mut ViewContext, - ) -> Self { + pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { let anchor = cx.global::().default_dock_anchor; let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); pane.update(cx, |pane, cx| { diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 86eff8fb79..154cf10912 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,6 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; +use db::{SerializedWorkspace, WorkspaceId}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1064,6 +1065,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, + db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1110,8 +1112,8 @@ enum FollowerItem { impl Workspace { pub fn new( - serialized_workspace: SerializedWorkspace, project: ModelHandle, + serialized_workspace: SerializedWorkspace, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, ) -> Self { @@ -1175,7 +1177,7 @@ impl Workspace { cx.emit_global(WorkspaceCreated(weak_handle.clone())); - let dock = Dock::new(cx, dock_default_factory); + let dock = Dock::new(dock_default_factory, cx); let dock_pane = dock.pane().clone(); let left_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Left)); @@ -1207,6 +1209,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, + db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array From e5c6393f85b9853ca1512bf2818b155e8e866986 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Fri, 21 Oct 2022 00:09:09 -0700 Subject: [PATCH 118/240] rebase fix - almost have serialize_workspace piped to the workspace constructor. Just a few compile errors left --- crates/db/src/workspace.rs | 5 +- crates/workspace/src/workspace.rs | 164 ++++++++++++++++++------------ crates/zed/src/zed.rs | 6 +- 3 files changed, 106 insertions(+), 69 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e60cb19e3b..aa1ca6efb5 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,7 @@ -use std::{path::Path, sync::Arc}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 154cf10912..7f82a46edf 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{SerializedWorkspace, WorkspaceId}; +use db::{Db, SerializedWorkspace, WorkspaceId}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -180,7 +180,11 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewFile, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - open_new(&app_state, cx) + let task = open_new(&app_state, cx); + cx.spawn(|_| async { + task.await; + }) + .detach(); } } }); @@ -188,7 +192,11 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewWindow, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - open_new(&app_state, cx) + let task = open_new(&app_state, cx); + cx.spawn(|_| async { + task.await; + }) + .detach(); } } }); @@ -1112,8 +1120,8 @@ enum FollowerItem { impl Workspace { pub fn new( - project: ModelHandle, serialized_workspace: SerializedWorkspace, + project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, ) -> Self { @@ -1242,6 +1250,74 @@ impl Workspace { this } + fn new_local( + abs_paths: &[PathBuf], + app_state: &Arc, + cx: &mut MutableAppContext, + callback: F, + ) -> Task + where + T: 'static, + F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, + { + let project_handle = Project::local( + app_state.client.clone(), + app_state.user_store.clone(), + app_state.project_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + cx, + ); + + cx.spawn(|mut cx| async move { + // Get project paths for all of the abs_paths + let mut worktree_roots: HashSet> = Default::default(); + let mut project_paths = Vec::new(); + for path in abs_paths { + if let Some((worktree, project_entry)) = cx + .update(|cx| Workspace::project_path_for_path(project_handle, path, true, cx)) + .await + .log_err() + { + worktree_roots.insert(worktree.read_with(&mut cx, |tree, _| tree.abs_path())); + project_paths.push(project_entry); + } + } + + // Use the resolved worktree roots to get the serialized_db from the database + let serialized_workspace = cx.read(|cx| { + cx.global::() + .workspace_for_worktree_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + }); + + // Use the serialized workspace to construct the new window + let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { + let mut workspace = Workspace::new( + serialized_workspace, + project_handle, + app_state.default_item_factory, + cx, + ); + (app_state.initialize_workspace)(&mut workspace, &app_state, cx); + workspace + }); + + // Call open path for each of the project paths + // (this will bring them to the front if they were in kthe serialized workspace) + let tasks = workspace.update(&mut cx, |workspace, cx| { + let tasks = Vec::new(); + for path in project_paths { + tasks.push(workspace.open_path(path, true, cx)); + } + tasks + }); + futures::future::join_all(tasks.into_iter()).await; + + // Finally call callback on the workspace + workspace.update(&mut cx, |workspace, cx| callback(workspace, cx)) + }) + } + pub fn weak_handle(&self) -> WeakViewHandle { self.weak_self.clone() } @@ -1289,34 +1365,18 @@ impl Workspace { /// to the callback. Otherwise, a new empty window will be created. pub fn with_local_workspace( &mut self, + app_state: &Arc, cx: &mut ViewContext, - app_state: Arc, callback: F, - ) -> T + ) -> Task where T: 'static, F: FnOnce(&mut Workspace, &mut ViewContext) -> T, { if self.project.read(cx).is_local() { - callback(self, cx) + Task::Ready(Some(callback(self, cx))) } else { - let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new( - Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ), - app_state.default_item_factory, - cx, - ); - (app_state.initialize_workspace)(&mut workspace, &app_state, cx); - workspace - }); - workspace.update(cx, callback) + Self::new_local(&[], app_state, cx, callback) } } @@ -1479,7 +1539,7 @@ impl Workspace { for path in &abs_paths { project_paths.push( this.update(&mut cx, |this, cx| { - this.project_path_for_path(path, visible, cx) + Workspace::project_path_for_path(this.project, path, visible, cx) }) .await .log_err(), @@ -1544,15 +1604,15 @@ impl Workspace { } fn project_path_for_path( - &self, + project: ModelHandle, abs_path: &Path, visible: bool, - cx: &mut ViewContext, + cx: &mut MutableAppContext, ) -> Task, ProjectPath)>> { - let entry = self.project().update(cx, |project, cx| { + let entry = project.update(cx, |project, cx| { project.find_or_create_local_worktree(abs_path, visible, cx) }); - cx.spawn(|_, cx| async move { + cx.spawn(|cx| async move { let (worktree, path) = entry.await?; let worktree_id = worktree.read_with(&cx, |t, _| t.id()); Ok(( @@ -2957,7 +3017,6 @@ pub fn open_paths( let app_state = app_state.clone(); let abs_paths = abs_paths.to_vec(); cx.spawn(|mut cx| async move { - let mut new_project = None; let workspace = if let Some(existing) = existing { existing } else { @@ -2966,24 +3025,15 @@ pub fn open_paths( .await .contains(&false); - cx.add_window((app_state.build_window_options)(), |cx| { - let project = Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ); - new_project = Some(project.clone()); - let mut workspace = Workspace::new(project, app_state.default_item_factory, cx); - (app_state.initialize_workspace)(&mut workspace, &app_state, cx); - if contains_directory { - workspace.toggle_sidebar(SidebarSide::Left, cx); - } - workspace + cx.update(|cx| { + Workspace::new_local(&abs_paths[..], &app_state, cx, move |workspace, cx| { + if contains_directory { + workspace.toggle_sidebar(SidebarSide::Left, cx); + } + cx.handle() + }) }) - .1 + .await }; let items = workspace @@ -2996,24 +3046,8 @@ pub fn open_paths( }) } -fn open_new(app_state: &Arc, cx: &mut MutableAppContext) { - let (window_id, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new( - Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ), - app_state.default_item_factory, - cx, - ); - (app_state.initialize_workspace)(&mut workspace, app_state, cx); - workspace - }); - cx.dispatch_action_at(window_id, workspace.id(), NewFile); +fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { + Workspace::new_local(&[], app_state, cx, |_, cx| cx.dispatch_action(NewFile)) } #[cfg(test)] diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index bb33109d0d..71a99cb3b2 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -463,7 +463,7 @@ fn open_config_file( workspace .update(&mut cx, |workspace, cx| { - workspace.with_local_workspace(cx, app_state, |workspace, cx| { + workspace.with_local_workspace(app_state, cx, |workspace, cx| { workspace.open_paths(vec![path.to_path_buf()], false, cx) }) }) @@ -480,7 +480,7 @@ fn open_log_file( ) { const MAX_LINES: usize = 1000; - workspace.with_local_workspace(cx, app_state.clone(), |_, cx| { + workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let (old_log, new_log) = futures::join!( app_state.fs.load(&paths::OLD_LOG), @@ -532,7 +532,7 @@ fn open_telemetry_log_file( app_state: Arc, cx: &mut ViewContext, ) { - workspace.with_local_workspace(cx, app_state.clone(), |_, cx| { + workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let workspace = workspace.upgrade(&cx)?; let path = app_state.client.telemetry_log_file_path()?; From 500ecbf91504db010a03fa6dc921b2416d4f22f8 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 24 Oct 2022 16:55:32 -0700 Subject: [PATCH 119/240] Rebase fix + Started writing the real SQL we're going to need --- crates/db/Cargo.toml | 3 +- crates/db/README.md | 5 ++ crates/db/examples/serialize_workspace.rs | 22 ++++++ crates/db/src/db.rs | 60 +++++++++------ crates/db/src/migrations.rs | 3 +- crates/db/src/pane.rs | 7 ++ crates/db/src/workspace.rs | 90 +++++++++-------------- test.rs | 0 8 files changed, 109 insertions(+), 81 deletions(-) create mode 100644 crates/db/README.md create mode 100644 crates/db/examples/serialize_workspace.rs create mode 100644 test.rs diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 10f0858a52..9c841519d2 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -18,8 +18,9 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] } +rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } +>>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need) serde = { workspace = true } serde_rusqlite = "0.31.0" diff --git a/crates/db/README.md b/crates/db/README.md new file mode 100644 index 0000000000..d4ea2fee39 --- /dev/null +++ b/crates/db/README.md @@ -0,0 +1,5 @@ +# Building Queries + +First, craft your test data. The examples folder shows a template for building a test-db, and can be ran with `cargo run --example [your-example]`. + +To actually use and test your queries, import the generated DB file into https://sqliteonline.com/ \ No newline at end of file diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs new file mode 100644 index 0000000000..e2bf288710 --- /dev/null +++ b/crates/db/examples/serialize_workspace.rs @@ -0,0 +1,22 @@ +use std::{fs::File, path::Path}; + +const TEST_FILE: &'static str = "test-db.db"; + +fn main() -> anyhow::Result<()> { + let db = db::Db::open_in_memory(); + if db.real().is_none() { + return Err(anyhow::anyhow!("Migrations failed")); + } + let file = Path::new(TEST_FILE); + + let f = File::create(file)?; + drop(f); + + db.write_kvp("test", "1")?; + db.write_kvp("test", "2")?; + db.write_to(file).ok(); + + println!("Wrote database!"); + + Ok(()) +} diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9a64986987..320b131ea6 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use anyhow::Result; use log::error; use parking_lot::Mutex; -use rusqlite::Connection; +use rusqlite::{backup, Connection}; use migrations::MIGRATIONS; pub use workspace::*; @@ -54,27 +54,6 @@ impl Db { }) } - /// Open a in memory database for testing and as a fallback. - #[cfg(any(test, feature = "test-support"))] - pub fn open_in_memory() -> Self { - Connection::open_in_memory() - .map_err(Into::into) - .and_then(|connection| Self::initialize(connection)) - .map(|connection| { - Db::Real(Arc::new(RealDb { - connection, - path: None, - })) - }) - .unwrap_or_else(|e| { - error!( - "Connecting to in memory db failed. Reverting to null db. {}", - e - ); - Self::Null - }) - } - fn initialize(mut conn: Connection) -> Result> { MIGRATIONS.to_latest(&mut conn)?; @@ -96,6 +75,43 @@ impl Db { _ => None, } } + + /// Open a in memory database for testing and as a fallback. + pub fn open_in_memory() -> Self { + Connection::open_in_memory() + .map_err(Into::into) + .and_then(|connection| Self::initialize(connection)) + .map(|connection| { + Db::Real(Arc::new(RealDb { + connection, + path: None, + })) + }) + .unwrap_or_else(|e| { + error!( + "Connecting to in memory db failed. Reverting to null db. {}", + e + ); + Self::Null + }) + } + + pub fn write_to>(&self, dest: P) -> Result<()> { + self.real() + .map(|db| { + if db.path.is_some() { + panic!("DB already exists"); + } + + let lock = db.connection.lock(); + let mut dst = Connection::open(dest)?; + let backup = backup::Backup::new(&lock, &mut dst)?; + backup.step(-1)?; + + Ok(()) + }) + .unwrap_or(Ok(())) + } } impl Drop for Db { diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 3a21c7fa6f..e10c388d5c 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::kvp::KVP_M_1; +use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,5 +10,6 @@ use crate::kvp::KVP_M_1; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1), + M::up(WORKSPACE_M_1) ]); } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 8ca1fd5de2..89721157c3 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -28,6 +28,13 @@ CREATE TABLE pane_items( index INTEGER, KEY (workspace_id, pane_id) ) STRICT; + +ALTER TABLE WORKSPACE +ADD THESE COLS: +center_group INTEGER NOT NULL, +dock_pane INTEGER NOT NULL, +-- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) +-- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) "; #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index aa1ca6efb5..6093b10355 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,6 @@ -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; +use anyhow::Result; + +use std::{path::Path, sync::Arc}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; @@ -9,18 +8,15 @@ use super::Db; pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - center_group INTEGER NOT NULL, - dock_pane INTEGER NOT NULL, - timestamp INTEGER, - FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) - FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) + workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP, + dummy_data INTEGER ) STRICT; CREATE TABLE worktree_roots( worktree_root BLOB NOT NULL, workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ) STRICT; "; @@ -35,18 +31,19 @@ CREATE TABLE worktree_roots( // Case 4: Starting Zed with multiple project folders // > Zed ~/projects/Zed ~/projects/Zed.dev -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct WorkspaceId(usize); +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +pub struct WorkspaceId(i64); struct WorkspaceRow { pub center_group_id: PaneGroupId, pub dock_pane_id: PaneId, } +#[derive(Default)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, - pub center_group: SerializedPaneGroup, - pub dock_pane: Option, + // pub center_group: SerializedPaneGroup, + // pub dock_pane: Option, } impl Db { @@ -58,28 +55,33 @@ impl Db { ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found if let Some(workspace_id) = self.workspace_id(worktree_roots) { - let workspace_row = self.get_workspace_row(workspace_id); - let center_group = self.get_pane_group(workspace_row.center_group_id); - let dock_pane = self.get_pane(workspace_row.dock_pane_id); + // TODO + // let workspace_row = self.get_workspace_row(workspace_id); + // let center_group = self.get_pane_group(workspace_row.center_group_id); + // let dock_pane = self.get_pane(workspace_row.dock_pane_id); SerializedWorkspace { workspace_id, - center_group, - dock_pane: Some(dock_pane), + // center_group, + // dock_pane: Some(dock_pane), } } else { - let workspace_id = self.get_next_workspace_id(); - - SerializedWorkspace { - workspace_id, - center_group: SerializedPaneGroup::empty_root(workspace_id), - dock_pane: None, - } + self.make_new_workspace() } } - fn get_next_workspace_id(&self) -> WorkspaceId { - unimplemented!() + fn make_new_workspace(&self) -> SerializedWorkspace { + self.real() + .map(|db| { + let lock = db.connection.lock(); + match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) { + Ok(_) => SerializedWorkspace { + workspace_id: WorkspaceId(lock.last_insert_rowid()), + }, + Err(_) => Default::default(), + } + }) + .unwrap_or_default() } fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { @@ -128,6 +130,7 @@ mod tests { PathBuf::from(path).into() } + #[test] fn test_detect_workspace_id() { let data = &[ (WorkspaceId(1), vec![arc_path("/tmp")]), @@ -160,6 +163,7 @@ mod tests { ); } + #[test] fn test_tricky_overlapping_updates() { // DB state: // (/tree) -> ID: 1 @@ -202,31 +206,3 @@ mod tests { assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); } } - -// [/tmp, /tmp2] -> ID1? -// [/tmp] -> ID2? - -/* -path | id -/tmp ID1 -/tmp ID2 -/tmp2 ID1 - - -SELECT id -FROM workspace_ids -WHERE path IN (path1, path2) -INTERSECT -SELECT id -FROM workspace_ids -WHERE path = path_2 -... and etc. for each element in path array - -If contains row, yay! If not, -SELECT max(id) FROm workspace_ids - -Select id WHERE path IN paths - -SELECT MAX(id) - -*/ diff --git a/test.rs b/test.rs new file mode 100644 index 0000000000..e69de29bb2 From d7bbfb82a3c38a3d979990dbacec5c8c65d08746 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 13:18:37 -0700 Subject: [PATCH 120/240] Rebase - Successfully detecting workplace IDs :D --- .gitignore | 4 + crates/db/Cargo.toml | 1 - crates/db/examples/serialize_workspace.rs | 26 ++- crates/db/src/workspace.rs | 242 ++++++++++++++++++---- 4 files changed, 228 insertions(+), 45 deletions(-) diff --git a/.gitignore b/.gitignore index b4eba05582..da1950f2b3 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ /assets/themes/Internal/*.json /assets/themes/Experiments/*.json **/venv +<<<<<<< HEAD .build Packages *.xcodeproj @@ -18,3 +19,6 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc +======= +crates/db/test-db.db +>>>>>>> 9d9ad38ce (Successfully detecting workplace IDs :D) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9c841519d2..9fad1aa39a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -20,7 +20,6 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } ->>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need) serde = { workspace = true } serde_rusqlite = "0.31.0" diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index e2bf288710..51082a811f 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -13,7 +13,31 @@ fn main() -> anyhow::Result<()> { drop(f); db.write_kvp("test", "1")?; - db.write_kvp("test", "2")?; + db.write_kvp("test-2", "2")?; + + let workspace_1 = db.workspace_for_worktree_roots(&[]); + let workspace_2 = db.workspace_for_worktree_roots(&[]); + let workspace_3 = db.workspace_for_worktree_roots(&[]); + let workspace_4 = db.workspace_for_worktree_roots(&[]); + let workspace_5 = db.workspace_for_worktree_roots(&[]); + let workspace_6 = db.workspace_for_worktree_roots(&[]); + let workspace_7 = db.workspace_for_worktree_roots(&[]); + + db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) + .unwrap(); + db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) + .unwrap(); + db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) + .unwrap(); + db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) + .unwrap(); + db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(); + db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) + .unwrap(); + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) + .unwrap(); + db.write_to(file).ok(); println!("Wrote database!"); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 6093b10355..d60e32f09f 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use rusqlite::params; use std::{path::Path, sync::Arc}; @@ -14,9 +15,10 @@ CREATE TABLE workspaces( ) STRICT; CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, + worktree_root TEXT NOT NULL, --TODO: Update this to use blobs workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; @@ -39,7 +41,7 @@ struct WorkspaceRow { pub dock_pane_id: PaneId, } -#[derive(Default)] +#[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, // pub center_group: SerializedPaneGroup, @@ -54,7 +56,7 @@ impl Db { worktree_roots: &[Arc], ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - if let Some(workspace_id) = self.workspace_id(worktree_roots) { + if let Ok(Some(workspace_id)) = self.workspace_id(worktree_roots) { // TODO // let workspace_row = self.get_workspace_row(workspace_id); // let center_group = self.get_pane_group(workspace_row.center_group_id); @@ -84,8 +86,110 @@ impl Db { .unwrap_or_default() } - fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { - unimplemented!() + fn workspace_id

(&self, worktree_roots: &[P]) -> Result> + where + P: AsRef, + { + self.real() + .map(|db| { + let lock = db.connection.lock(); + + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } + } + array_binding_stmt.push(')'); + + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + + let mut stmt = lock.prepare_cached(&query)?; + + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + // TODO: Update this to use blobs + let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + if let Ok(Some(row)) = rows.next() { + return Ok(Some(WorkspaceId(row.get(0)?))) + } + + // Ensure that this query only returns one row + debug_assert!(matches!(rows.next(), Ok(None))); + + Ok(None) + }) + .unwrap_or(Ok(None)) } fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -95,15 +199,36 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots(&self, workspace_id: &WorkspaceId, worktree_roots: &[Arc]) { + pub fn update_worktree_roots

( + &self, + workspace_id: &WorkspaceId, + worktree_roots: &[P], + ) -> Result<()> + where + P: AsRef, + { // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) + // TODO // Remove the old rows which contain workspace_id + // TODO // Add rows for the new worktree_roots - // zed /tree - // -> add tree2 - // -> udpate_worktree_roots() -> ADDs entries for /tree and /tree2, LEAVING BEHIND, the initial entry for /tree - unimplemented!(); + self.real() + .map(|db| { + let lock = db.connection.lock(); + + for root in worktree_roots { + // TODO: Update this to use blobs + let path = root.as_ref().to_string_lossy().to_string(); + lock.execute( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], + )?; + } + + Ok(()) + }) + .unwrap_or(Ok(())) } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots @@ -117,49 +242,79 @@ impl Db { #[cfg(test)] mod tests { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - }; - use crate::Db; use super::WorkspaceId; - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - #[test] - fn test_detect_workspace_id() { + fn test_more_workspace_ids() { let data = &[ - (WorkspaceId(1), vec![arc_path("/tmp")]), - (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), - ( - WorkspaceId(3), - vec![arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")], - ), + (WorkspaceId(1), vec!["/tmp1"]), + (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), + (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), + (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), + (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), + (WorkspaceId(7), vec!["/tmp2"]), ]; let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries); //?? + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(None, db.workspace_id(&[arc_path("/tmp2")])); + assert_eq!(WorkspaceId(1), db.workspace_id(&["/tmp1"]).unwrap().unwrap()); assert_eq!( - None, - db.workspace_id(&[arc_path("/tmp2"), arc_path("/tmp3")]) + WorkspaceId(2), + db.workspace_id(&["/tmp1", "/tmp2"]).unwrap().unwrap() ); - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&[arc_path("/tmp")])); assert_eq!( - Some(WorkspaceId(2)), - db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2")]) + WorkspaceId(3), + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().unwrap() ); + assert_eq!( + WorkspaceId(4), + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap().unwrap() + ); + assert_eq!( + WorkspaceId(5), + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().unwrap() + ); + assert_eq!( + WorkspaceId(6), + db.workspace_id(&["/tmp2", "/tmp4"]).unwrap().unwrap() + ); + assert_eq!(WorkspaceId(7), db.workspace_id(&["/tmp2"]).unwrap().unwrap()); + + assert_eq!(None, db.workspace_id(&["/tmp1", "/tmp5"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp5"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]).unwrap()); + } + + #[test] + fn test_detect_workspace_id() { + let data = &[ + (WorkspaceId(1), vec!["/tmp"]), + (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries).unwrap(); + } + + assert_eq!(None, db.workspace_id(&["/tmp2"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3"]).unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp"]).unwrap()); + assert_eq!(Some(WorkspaceId(2)), db.workspace_id(&["/tmp", "/tmp2"]).unwrap()); assert_eq!( Some(WorkspaceId(3)), - db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")]) + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap() ); } @@ -178,27 +333,28 @@ mod tests { // Get rid of 3 for garbage collection let data = &[ - (WorkspaceId(1), vec![arc_path("/tmp")]), - (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), - (WorkspaceId(3), vec![arc_path("/tmp2"), arc_path("/tmp3")]), + (WorkspaceId(1), vec!["/tmp"]), + (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), ]; let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries); //?? - assert_eq!(&db.workspace_id(&[]), &Some(*workspace_id)) + db.update_worktree_roots(workspace_id, entries).unwrap(); //?? + assert_eq!(&db.workspace_id::(&[]).unwrap(), &Some(*workspace_id)) } for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()), &Some(*workspace_id)); + assert_eq!(&db.workspace_id(entries.as_slice()).unwrap(), &Some(*workspace_id)); } - db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2"]) + .unwrap(); // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), Some(WorkspaceId(2))); - assert_eq!(db.workspace_id(&[arc_path("/tmp")]), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(2))); + assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); let recent_workspaces = db.recent_workspaces(); assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); From e9ea751f3d37008c2398528e2ac9a36413373676 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 15:27:51 -0700 Subject: [PATCH 121/240] All workspace tests passing :D --- crates/db/examples/serialize_workspace.rs | 14 +- crates/db/src/workspace.rs | 403 ++++++++++++++-------- 2 files changed, 261 insertions(+), 156 deletions(-) diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 51082a811f..108980ee92 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -1,4 +1,4 @@ -use std::{fs::File, path::Path}; +use std::{fs::File, path::Path, thread::sleep, time::Duration}; const TEST_FILE: &'static str = "test-db.db"; @@ -23,20 +23,28 @@ fn main() -> anyhow::Result<()> { let workspace_6 = db.workspace_for_worktree_roots(&[]); let workspace_7 = db.workspace_for_worktree_roots(&[]); + // Order scrambled + sleeps added because sqlite only has 1 second resolution on + // their timestamps + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) + .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) .unwrap(); - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) - .unwrap(); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index d60e32f09f..09aa9f5301 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,23 +1,31 @@ use anyhow::Result; -use rusqlite::params; +use rusqlite::{params, Connection}; -use std::{path::Path, sync::Arc}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; +// TODO for workspace serialization: +// - Update return types to unwrap all of the results into dummy values +// - On database failure to initialize, delete the DB file +// - Update paths to be blobs ( :( https://users.rust-lang.org/t/how-to-safely-store-a-path-osstring-in-a-sqllite-database/79712/10 ) +// - Convert hot paths to prepare-cache-execute style + pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP, - dummy_data INTEGER + timestamp TEXT DEFAULT CURRENT_TIMESTAMP ) STRICT; CREATE TABLE worktree_roots( - worktree_root TEXT NOT NULL, --TODO: Update this to use blobs + worktree_root TEXT NOT NULL, workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; @@ -76,7 +84,7 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) { + match lock.execute("INSERT INTO workspaces DEFAULT VALUES;", []) { Ok(_) => SerializedWorkspace { workspace_id: WorkspaceId(lock.last_insert_rowid()), }, @@ -93,108 +101,15 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } - } - array_binding_stmt.push(')'); - - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: - // - // ID PATH - // 1 /tmp - // 2 /tmp - // 2 /tmp2 - // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: - // - // ID num_matching - // 1 2 - // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - let query = format!( - r#" - SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? - "#, - array_bind = array_binding_stmt - ); - let mut stmt = lock.prepare_cached(&query)?; - - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - - for i in 0..worktree_roots.len() { - // TODO: Update this to use blobs - let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - - let mut rows = stmt.raw_query(); - if let Ok(Some(row)) = rows.next() { - return Ok(Some(WorkspaceId(row.get(0)?))) - } - - // Ensure that this query only returns one row - debug_assert!(matches!(rows.next(), Ok(None))); - - Ok(None) + get_workspace_id(worktree_roots, &lock) }) .unwrap_or(Ok(None)) } - fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { - unimplemented!() - } + // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { + // unimplemented!() + // } /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps @@ -207,24 +122,42 @@ impl Db { where P: AsRef, { - // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) - // TODO - // Remove the old rows which contain workspace_id - // TODO - // Add rows for the new worktree_roots - self.real() .map(|db| { - let lock = db.connection.lock(); + let mut lock = db.connection.lock(); - for root in worktree_roots { - // TODO: Update this to use blobs - let path = root.as_ref().to_string_lossy().to_string(); - lock.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], + let tx = lock.transaction()?; + + { + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &tx)?; + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables + tx.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; + } + } + + tx.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], )?; + + for root in worktree_roots { + // TODO: Update this to use blobs + let path = root.as_ref().to_string_lossy().to_string(); + + let mut stmt = tx.prepare_cached("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?; + stmt.execute(params![workspace_id.0, path])?; + } + + let mut stmt = tx.prepare_cached("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?; + stmt.execute([workspace_id.0])?; } + tx.commit()?; Ok(()) }) @@ -232,16 +165,156 @@ impl Db { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { + pub fn recent_workspaces(&self, limit: usize) -> Result>)>> { // Return all the workspace ids and their associated paths ordered by the access timestamp //ORDER BY timestamps - unimplemented!(); + self.real() + .map(|db| { + let mut lock = db.connection.lock(); + + let tx = lock.transaction()?; + let result = { + let mut stmt = tx.prepare_cached( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?; + let workspace_ids = stmt + .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? + .collect::, rusqlite::Error>>()?; + + let mut result = Vec::new(); + let mut stmt = tx.prepare_cached( + "SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?", + )?; + for workspace_id in workspace_ids { + let roots = stmt + .query_map([workspace_id.0], |row| { + let row = row.get::<_, String>(0)?; + Ok(PathBuf::from(Path::new(&row)).into()) + })? + .collect::, rusqlite::Error>>()?; + result.push((workspace_id, roots)) + } + + result + }; + + tx.commit()?; + + return Ok(result); + }) + .unwrap_or_else(|| Ok(Vec::new())) } } +fn get_workspace_id

( + worktree_roots: &[P], + connection: &Connection, +) -> Result, anyhow::Error> +where + P: AsRef, +{ + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } + } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + let mut stmt = connection.prepare_cached(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + // TODO: Update this to use blobs + let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + if let Ok(Some(row)) = rows.next() { + return Ok(Some(WorkspaceId(row.get(0)?))); + } + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch it if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); + Ok(None) +} + #[cfg(test)] mod tests { + use std::{ + path::{Path, PathBuf}, + sync::Arc, + thread::sleep, + time::Duration, + }; + use crate::Db; use super::WorkspaceId; @@ -265,32 +338,36 @@ mod tests { db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(WorkspaceId(1), db.workspace_id(&["/tmp1"]).unwrap().unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]).unwrap()); assert_eq!( - WorkspaceId(2), - db.workspace_id(&["/tmp1", "/tmp2"]).unwrap().unwrap() + db.workspace_id(&["/tmp1", "/tmp2"]).unwrap(), + Some(WorkspaceId(2)) ); assert_eq!( - WorkspaceId(3), - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().unwrap() + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(3)) ); assert_eq!( - WorkspaceId(4), - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(4)) ); assert_eq!( - WorkspaceId(5), - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap(), + Some(WorkspaceId(5)) ); assert_eq!( - WorkspaceId(6), - db.workspace_id(&["/tmp2", "/tmp4"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp4"]).unwrap(), + Some(WorkspaceId(6)) ); - assert_eq!(WorkspaceId(7), db.workspace_id(&["/tmp2"]).unwrap().unwrap()); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(7))); - assert_eq!(None, db.workspace_id(&["/tmp1", "/tmp5"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp5"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]).unwrap()); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp5"]).unwrap(), None); + assert_eq!( + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]) + .unwrap(), + None + ); } #[test] @@ -308,14 +385,21 @@ mod tests { db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(None, db.workspace_id(&["/tmp2"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3"]).unwrap()); - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp"]).unwrap()); - assert_eq!(Some(WorkspaceId(2)), db.workspace_id(&["/tmp", "/tmp2"]).unwrap()); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); assert_eq!( - Some(WorkspaceId(3)), - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap() + db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), + Some(WorkspaceId(2)) ); + assert_eq!( + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(3)) + ); + } + + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() } #[test] @@ -340,25 +424,38 @@ mod tests { let db = Db::open_in_memory(); + // Load in the test data for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries).unwrap(); //?? - assert_eq!(&db.workspace_id::(&[]).unwrap(), &Some(*workspace_id)) + db.workspace_for_worktree_roots(&[]); + db.update_worktree_roots(workspace_id, entries).unwrap(); } - for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()).unwrap(), &Some(*workspace_id)); - } - - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2"]) + // Make sure the timestamp updates + sleep(Duration::from_secs(1)); + // Execute the update + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]) .unwrap(); - // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(2))); + // Make sure that workspace 3 doesn't exist + assert_eq!( + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(2)) + ); + + // And that workspace 1 was untouched assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); - let recent_workspaces = db.recent_workspaces(); - assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); - assert_eq!(recent_workspaces.get(1).unwrap().0, WorkspaceId(3)); - assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); + // And that workspace 2 is no longer registered under this + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), None); + + let recent_workspaces = db.recent_workspaces(10).unwrap(); + assert_eq!( + recent_workspaces.get(0).unwrap(), + &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) + ); + assert_eq!( + recent_workspaces.get(1).unwrap(), + &(WorkspaceId(1), vec![arc_path("/tmp")]) + ); } } From 7d33520b2c2954fbee631bd16c62e435fe81f85b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 16:55:20 -0700 Subject: [PATCH 122/240] Tidied up code, managed errors, etc. --- crates/db/examples/serialize_workspace.rs | 21 +- crates/db/src/workspace.rs | 518 ++++++++++++---------- 2 files changed, 292 insertions(+), 247 deletions(-) diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 108980ee92..6de6d9daf2 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -25,26 +25,19 @@ fn main() -> anyhow::Result<()> { // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) - .unwrap(); + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) - .unwrap(); + db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) - .unwrap(); + db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) - .unwrap(); + db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) - .unwrap(); + db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 09aa9f5301..e2c4d6319c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,10 @@ use anyhow::Result; -use rusqlite::{params, Connection}; +use rusqlite::{params, Connection, OptionalExtension}; use std::{ + ffi::OsStr, + fmt::Debug, + os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, }; @@ -10,12 +13,6 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; -// TODO for workspace serialization: -// - Update return types to unwrap all of the results into dummy values -// - On database failure to initialize, delete the DB file -// - Update paths to be blobs ( :( https://users.rust-lang.org/t/how-to-safely-store-a-path-osstring-in-a-sqllite-database/79712/10 ) -// - Convert hot paths to prepare-cache-execute style - pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -23,24 +20,13 @@ CREATE TABLE workspaces( ) STRICT; CREATE TABLE worktree_roots( - worktree_root TEXT NOT NULL, + worktree_root BLOB NOT NULL, workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; -// Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This - -// Case 1: Starting Zed Contextless -// > Zed -> Reopen the last -// Case 2: Starting Zed with a project folder -// > Zed ~/projects/Zed -// Case 3: Starting Zed with a file -// > Zed ~/projects/Zed/cargo.toml -// Case 4: Starting Zed with multiple project folders -// > Zed ~/projects/Zed ~/projects/Zed.dev - #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); @@ -64,7 +50,12 @@ impl Db { worktree_roots: &[Arc], ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - if let Ok(Some(workspace_id)) = self.workspace_id(worktree_roots) { + let mut workspace_id = self.workspace_id(worktree_roots); + if workspace_id.is_none() && worktree_roots.len() == 0 { + workspace_id = self.last_workspace_id(); + } + + if let Some(workspace_id) = workspace_id { // TODO // let workspace_row = self.get_workspace_row(workspace_id); // let center_group = self.get_pane_group(workspace_row.center_group_id); @@ -84,7 +75,8 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - match lock.execute("INSERT INTO workspaces DEFAULT VALUES;", []) { + // No need to waste the memory caching this, should happen rarely. + match lock.execute("INSERT INTO workspaces DEFAULT VALUES", []) { Ok(_) => SerializedWorkspace { workspace_id: WorkspaceId(lock.last_insert_rowid()), }, @@ -94,9 +86,9 @@ impl Db { .unwrap_or_default() } - fn workspace_id

(&self, worktree_roots: &[P]) -> Result> + fn workspace_id

(&self, worktree_roots: &[P]) -> Option where - P: AsRef, + P: AsRef + Debug, { self.real() .map(|db| { @@ -104,7 +96,7 @@ impl Db { get_workspace_id(worktree_roots, &lock) }) - .unwrap_or(Ok(None)) + .unwrap_or(None) } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -114,195 +106,272 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots

( - &self, - workspace_id: &WorkspaceId, - worktree_roots: &[P], - ) -> Result<()> + pub fn update_worktree_roots

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) where - P: AsRef, + P: AsRef + Debug, { + fn logic

( + connection: &mut Connection, + worktree_roots: &[P], + workspace_id: &WorkspaceId, + ) -> Result<()> + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + { + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &tx); + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + tx.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; + } + } + + tx.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], + )?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); + + tx.execute( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], + )?; + } + + tx.execute( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + [workspace_id.0], + )?; + } + tx.commit()?; + Ok(()) + } + + self.real().map(|db| { + let mut lock = db.connection.lock(); + + match logic(&mut lock, worktree_roots, workspace_id) { + Ok(_) => {} + Err(err) => { + log::error!( + "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", + workspace_id, + worktree_roots, + err + ); + } + } + }); + } + + pub fn last_workspace_id(&self) -> Option { + fn logic(connection: &mut Connection) -> Result> { + let mut stmt = connection + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; + + Ok(stmt + .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) + .optional()?) + } + self.real() .map(|db| { let mut lock = db.connection.lock(); - let tx = lock.transaction()?; - - { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &tx)?; - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { - // Should also delete fields in other tables - tx.execute( - "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; - } + match logic(&mut lock) { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get last workspace id, err: {}", err); + None } - - tx.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], - )?; - - for root in worktree_roots { - // TODO: Update this to use blobs - let path = root.as_ref().to_string_lossy().to_string(); - - let mut stmt = tx.prepare_cached("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?; - stmt.execute(params![workspace_id.0, path])?; - } - - let mut stmt = tx.prepare_cached("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?; - stmt.execute([workspace_id.0])?; } - tx.commit()?; - - Ok(()) }) - .unwrap_or(Ok(())) + .unwrap_or(None) } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Result>)>> { - // Return all the workspace ids and their associated paths ordered by the access timestamp - //ORDER BY timestamps + pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { + fn logic( + connection: &mut Connection, + limit: usize, + ) -> Result>)>, anyhow::Error> { + let tx = connection.transaction()?; + let result = { + let mut stmt = tx.prepare( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?; + + let workspace_ids = stmt + .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? + .collect::, rusqlite::Error>>()?; + + let mut result = Vec::new(); + let mut stmt = + tx.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + for workspace_id in workspace_ids { + let roots = stmt + .query_map([workspace_id.0], |row| { + let row = row.get::<_, Vec>(0)?; + Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) + })? + .collect::, rusqlite::Error>>()?; + result.push((workspace_id, roots)) + } + + result + }; + tx.commit()?; + return Ok(result); + } + self.real() .map(|db| { let mut lock = db.connection.lock(); - let tx = lock.transaction()?; - let result = { - let mut stmt = tx.prepare_cached( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?; - let workspace_ids = stmt - .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? - .collect::, rusqlite::Error>>()?; - - let mut result = Vec::new(); - let mut stmt = tx.prepare_cached( - "SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?", - )?; - for workspace_id in workspace_ids { - let roots = stmt - .query_map([workspace_id.0], |row| { - let row = row.get::<_, String>(0)?; - Ok(PathBuf::from(Path::new(&row)).into()) - })? - .collect::, rusqlite::Error>>()?; - result.push((workspace_id, roots)) + match logic(&mut lock, limit) { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() } - - result - }; - - tx.commit()?; - - return Ok(result); + } }) - .unwrap_or_else(|| Ok(Vec::new())) + .unwrap_or_else(|| Vec::new()) } } -fn get_workspace_id

( - worktree_roots: &[P], - connection: &Connection, -) -> Result, anyhow::Error> +fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Option where - P: AsRef, + P: AsRef + Debug, { - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); + fn logic

( + worktree_roots: &[P], + connection: &Connection, + ) -> Result, anyhow::Error> + where + P: AsRef + Debug, + { + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } + } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + // 47,116,109,112,50 + // 2F746D7032 + + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + + // This will only be called on start up and when root workspaces change, no need to waste memory + // caching it. + let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + let row = rows.next(); + let result = if let Ok(Some(row)) = row { + Ok(Some(WorkspaceId(row.get(0)?))) + } else { + Ok(None) + }; + + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); + + result + } + + match logic(worktree_roots, connection) { + Ok(result) => result, + Err(err) => { + log::error!( + "Failed to get the workspace ID for paths {:?}, err: {}", + worktree_roots, + err + ); + None } } - array_binding_stmt.push(')'); - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: - // - // ID PATH - // 1 /tmp - // 2 /tmp - // 2 /tmp2 - // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: - // - // ID num_matching - // 1 2 - // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - let query = format!( - r#" - SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? - "#, - array_bind = array_binding_stmt - ); - let mut stmt = connection.prepare_cached(&query)?; - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - - for i in 0..worktree_roots.len() { - // TODO: Update this to use blobs - let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - - let mut rows = stmt.raw_query(); - if let Ok(Some(row)) = rows.next() { - return Ok(Some(WorkspaceId(row.get(0)?))); - } - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch it if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); - Ok(None) } #[cfg(test)] @@ -335,39 +404,26 @@ mod tests { for (workspace_id, entries) in data { db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.update_worktree_roots(workspace_id, entries); } - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]).unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2"]).unwrap(), - Some(WorkspaceId(2)) - ); - assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap(), + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]), Some(WorkspaceId(3)) ); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(4))); assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), - Some(WorkspaceId(4)) - ); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap(), + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]), Some(WorkspaceId(5)) ); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp4"]).unwrap(), - Some(WorkspaceId(6)) - ); - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(7))); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp4"]), Some(WorkspaceId(6))); + assert_eq!(db.workspace_id(&["/tmp2"]), Some(WorkspaceId(7))); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp5"]).unwrap(), None); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]) - .unwrap(), - None - ); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]), None); + assert_eq!(db.workspace_id(&["/tmp5"]), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); } #[test] @@ -382,18 +438,15 @@ mod tests { for (workspace_id, entries) in data { db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.update_worktree_roots(workspace_id, entries); } - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp2"]), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), None); + assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), - Some(WorkspaceId(2)) - ); - assert_eq!( - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap(), + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]), Some(WorkspaceId(3)) ); } @@ -426,29 +479,28 @@ mod tests { // Load in the test data for (workspace_id, entries) in data { - db.workspace_for_worktree_roots(&[]); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries); } // Make sure the timestamp updates sleep(Duration::from_secs(1)); + // Execute the update - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), - Some(WorkspaceId(2)) - ); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); // And that workspace 1 was untouched - assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); - // And that workspace 2 is no longer registered under this - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), None); + // And that workspace 2 is no longer registered under these roots + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), None); - let recent_workspaces = db.recent_workspaces(10).unwrap(); + assert_eq!(Some(WorkspaceId(2)), db.last_workspace_id()); + + let recent_workspaces = db.recent_workspaces(10); assert_eq!( recent_workspaces.get(0).unwrap(), &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) From a9dc46c950693bf20edf213264fd2b324c3ee426 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 17:09:26 -0700 Subject: [PATCH 123/240] added stubs for more tests --- crates/db/src/workspace.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e2c4d6319c..68008a2795 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -49,7 +49,8 @@ impl Db { &self, worktree_roots: &[Arc], ) -> SerializedWorkspace { - // Find the workspace id which is uniquely identified by this set of paths return it if found + // Find the workspace id which is uniquely identified by this set of paths + // return it if found let mut workspace_id = self.workspace_id(worktree_roots); if workspace_id.is_none() && worktree_roots.len() == 0 { workspace_id = self.last_workspace_id(); @@ -388,6 +389,12 @@ mod tests { use super::WorkspaceId; + #[test] + fn test_empty_worktrees() { + // TODO determine update_worktree_roots(), workspace_id(), recent_workspaces() + // semantics for this case + } + #[test] fn test_more_workspace_ids() { let data = &[ From 46ff0885f088da6e74c723f7f9968bc552a9e049 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 26 Oct 2022 11:08:44 -0700 Subject: [PATCH 124/240] WIP: Writing tests --- crates/db/src/workspace.rs | 56 ++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 68008a2795..6bccf3387c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -260,10 +260,15 @@ where where P: AsRef + Debug, { + if worktree_roots.len() == 0 { + return Ok(None); + } + // Prepare the array binding string. SQL doesn't have syntax for this, so // we have to do it ourselves. let mut array_binding_stmt = "(".to_string(); for i in 0..worktree_roots.len() { + // This uses ?NNN for numbered placeholder syntax array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based if i < worktree_roots.len() - 1 { array_binding_stmt.push(','); @@ -292,33 +297,35 @@ where // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: // - We start with a join of this table on itself, generating every possible // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: + // *overlapping but incorrect* workspace IDs. For this small data set, + // this would look like: // // wt1.ID wt1.PATH | wt2.ID wt2.PATH // 3 /tmp3 3 /tmp2 // // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: + // meaning we now have a list of all the entries for our array, minus overlapping sets, + // but including *subsets* of our worktree roots: // // ID PATH // 1 /tmp + // 1 /tmp2 // 2 /tmp - // 2 /tmp2 // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: + // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no + // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of + // our keys: // // ID num_matching // 1 2 + // 2 1 // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - // 47,116,109,112,50 - // 2F746D7032 - + // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the + // matching ID correctly :D + // + // Note: due to limitations in SQLite's query binding, we have to generate the prepared + // statement with string substitution (the {array_bind}) below, and then bind the + // parameters by number. let query = format!( r#" SELECT workspace_id @@ -391,8 +398,27 @@ mod tests { #[test] fn test_empty_worktrees() { - // TODO determine update_worktree_roots(), workspace_id(), recent_workspaces() - // semantics for this case + let db = Db::open_in_memory(); + + assert_eq!(None, db.workspace_id::(&[])); + + db.make_new_workspace(); + db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); + + // Sanity check + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"])); + + db.update_worktree_roots::(&WorkspaceId(1), &[]); + + // Make sure DB doesn't consider 'no worktrees' to be a query it can answer + assert_eq!(None, db.workspace_id::(&[])); + + assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id()); + + assert_eq!( + &(WorkspaceId(1), vec![]), + db.recent_workspaces(1).get(0).unwrap() + ) } #[test] From 5505a776e67b41786f1725a94cb6b38af676c0cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 26 Oct 2022 16:31:44 -0700 Subject: [PATCH 125/240] Figured out a good schema for the pane serialization stuff --- Cargo.lock | 1 + crates/db/Cargo.toml | 1 + crates/db/examples/serialize-pane.rs | 27 ++++++ crates/db/examples/serialize_workspace.rs | 14 +-- crates/db/src/items.rs | 9 +- crates/db/src/kvp.rs | 4 + crates/db/src/migrations.rs | 5 +- crates/db/src/pane.rs | 113 ++++++++++++++++------ crates/db/src/workspace.rs | 47 ++++++--- 9 files changed, 156 insertions(+), 65 deletions(-) create mode 100644 crates/db/examples/serialize-pane.rs diff --git a/Cargo.lock b/Cargo.lock index b381331ef1..0da4d17710 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,6 +1560,7 @@ dependencies = [ "rusqlite_migration", "serde", "serde_rusqlite", + "settings", "tempdir", ] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9fad1aa39a..64e86e0345 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -13,6 +13,7 @@ test-support = [] [dependencies] collections = { path = "../collections" } gpui = { path = "../gpui" } +settings = { path = "../settings" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs new file mode 100644 index 0000000000..289f70967c --- /dev/null +++ b/crates/db/examples/serialize-pane.rs @@ -0,0 +1,27 @@ +use std::{fs::File, path::Path, thread::sleep, time::Duration}; + +const TEST_FILE: &'static str = "test-db.db"; + +fn main() -> anyhow::Result<()> { + let db = db::Db::open_in_memory(); + if db.real().is_none() { + return Err(anyhow::anyhow!("Migrations failed")); + } + let file = Path::new(TEST_FILE); + + let f = File::create(file)?; + drop(f); + + let workspace = db.make_new_workspace(); + + db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]); + + db.save_pane_splits(center_pane_group); + db.save_dock_pane(); + + db.write_to(file).ok(); + + println!("Wrote database!"); + + Ok(()) +} diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 6de6d9daf2..9e1b61387e 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,13 +15,13 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.workspace_for_worktree_roots(&[]); - let workspace_2 = db.workspace_for_worktree_roots(&[]); - let workspace_3 = db.workspace_for_worktree_roots(&[]); - let workspace_4 = db.workspace_for_worktree_roots(&[]); - let workspace_5 = db.workspace_for_worktree_roots(&[]); - let workspace_6 = db.workspace_for_worktree_roots(&[]); - let workspace_7 = db.workspace_for_worktree_roots(&[]); + let workspace_1 = db.make_new_workspace(); + let workspace_2 = db.make_new_workspace(); + let workspace_3 = db.make_new_workspace(); + let workspace_4 = db.make_new_workspace(); + let workspace_5 = db.make_new_workspace(); + let workspace_6 = db.make_new_workspace(); + let workspace_7 = db.make_new_workspace(); // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 1b633fdc47..7bd4c27f43 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -46,15 +46,8 @@ use super::Db; // Items // Sidebars +// Things I'm doing: finding about nullability for foreign keys pub(crate) const ITEMS_M_1: &str = " -CREATE TABLE items( - workspace_id INTEGER, - item_id INTEGER, - kind TEXT NOT NULL, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; - CREATE TABLE project_searches( workspace_id INTEGER, item_id INTEGER, diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 96f13d8040..eecd0238ca 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -4,10 +4,14 @@ use rusqlite::OptionalExtension; use super::Db; pub(crate) const KVP_M_1: &str = " +BEGIN TRANSACTION; + CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; + +COMMIT; "; impl Db { diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index e10c388d5c..8caa528fc1 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; +use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,6 +10,7 @@ use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1), - M::up(WORKSPACE_M_1) + M::up(WORKSPACE_M_1), + M::up(PANE_M_1) ]); } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 89721157c3..e4d6694319 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,41 +1,23 @@ use gpui::Axis; +use settings::DockAnchor; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_M_1: &str = " -CREATE TABLE pane_groups( - workspace_id INTEGER, - group_id INTEGER, - axis STRING NOT NULL, -- 'Vertical' / 'Horizontal' - PRIMARY KEY (workspace_id, group_id) -) STRICT; +// We have an many-branched, unbalanced tree with three types: +// Pane Groups +// Panes +// Items -CREATE TABLE pane_group_children( - workspace_id INTEGER, - group_id INTEGER, - child_pane_id INTEGER, -- Nullable - child_group_id INTEGER, -- Nullable - index INTEGER, - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_items( - workspace_id INTEGER, - pane_id INTEGER, - item_id INTEGER, -- Array - index INTEGER, - KEY (workspace_id, pane_id) -) STRICT; - -ALTER TABLE WORKSPACE -ADD THESE COLS: -center_group INTEGER NOT NULL, -dock_pane INTEGER NOT NULL, --- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) --- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) -"; +// The root is always a Pane Group +// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children +// Panes can have 0 or more items as children +// Panes can be their own root +// Items cannot have children +// References pointing down is hard (SQL doesn't like arrays) +// References pointing up is easy (1-1 item / parent relationship) but is harder to query +// #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneId { @@ -93,6 +75,71 @@ pub struct SerializedPane { children: Vec, } +pub(crate) const PANE_M_1: &str = " +BEGIN TRANSACTION; + +CREATE TABLE dock_panes( + dock_pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + shown INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE grouped_panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE items( + item_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE group_items( + workspace_id INTEGER NOT NULL, + pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE dock_items( + workspace_id INTEGER NOT NULL, + dock_pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, dock_pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE +) STRICT; + +COMMIT; +"; + +struct SerializedDockPane { + //Cols +} + impl Db { pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -147,5 +194,7 @@ impl Db { unimplemented!(); } - pub fn save_pane(&self, pane: SerializedPane) {} + fn save_dock_pane() {} } + +mod tests {} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 6bccf3387c..cd1d22f50b 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -13,10 +13,15 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; +// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging +// you might want to update some of the parsing code as well, I've left the variations in but commented +// out pub(crate) const WORKSPACE_M_1: &str = " +BEGIN TRANSACTION; + CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL ) STRICT; CREATE TABLE worktree_roots( @@ -25,16 +30,13 @@ CREATE TABLE worktree_roots( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; + +COMMIT; "; #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); -struct WorkspaceRow { - pub center_group_id: PaneGroupId, - pub dock_pane_id: PaneId, -} - #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, @@ -72,7 +74,7 @@ impl Db { } } - fn make_new_workspace(&self) -> SerializedWorkspace { + pub fn make_new_workspace(&self) -> SerializedWorkspace { self.real() .map(|db| { let lock = db.connection.lock(); @@ -140,6 +142,8 @@ impl Db { for root in worktree_roots { let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); tx.execute( "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", @@ -162,6 +166,7 @@ impl Db { match logic(&mut lock, worktree_roots, workspace_id) { Ok(_) => {} Err(err) => { + dbg!(&err); log::error!( "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", workspace_id, @@ -222,6 +227,9 @@ impl Db { .query_map([workspace_id.0], |row| { let row = row.get::<_, Vec>(0)?; Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) + // If you need to debug this, here's the string parsing: + // let row = row.get::<_, String>(0)?; + // Ok(PathBuf::from(row).into()) })? .collect::, rusqlite::Error>>()?; result.push((workspace_id, roots)) @@ -260,6 +268,7 @@ where where P: AsRef + Debug, { + // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); } @@ -297,7 +306,7 @@ where // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: // - We start with a join of this table on itself, generating every possible // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but incorrect* workspace IDs. For this small data set, + // *overlapping but non-matching* workspace IDs. For this small data set, // this would look like: // // wt1.ID wt1.PATH | wt2.ID wt2.PATH @@ -349,6 +358,8 @@ where for i in 0..worktree_roots.len() { let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() stmt.raw_bind_parameter(i + 1, path)? } // No -1, because SQLite is 1 based @@ -402,22 +413,26 @@ mod tests { assert_eq!(None, db.workspace_id::(&[])); - db.make_new_workspace(); + db.make_new_workspace(); //ID 1 + db.make_new_workspace(); //ID 2 db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"])); + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); db.update_worktree_roots::(&WorkspaceId(1), &[]); - // Make sure DB doesn't consider 'no worktrees' to be a query it can answer - assert_eq!(None, db.workspace_id::(&[])); + // Make sure 'no worktrees' fails correctly. returning [1, 2] from this + // call would be semantically correct (as those are the workspaces that + // don't have roots) but I'd prefer that this API to either return exactly one + // workspace, and None otherwise + assert_eq!(db.workspace_id::(&[]), None,); - assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id()); + assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1))); assert_eq!( - &(WorkspaceId(1), vec![]), - db.recent_workspaces(1).get(0).unwrap() + db.recent_workspaces(2), + vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),], ) } From b9cbd4084e15b7ab2234323a4ce6659359514bbd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 13:58:54 -0700 Subject: [PATCH 126/240] WIP: fixing up behavior of workspace initialization --- crates/db/examples/serialize-pane.rs | 16 +- crates/db/examples/serialize_workspace.rs | 28 +- crates/db/src/db.rs | 8 +- crates/db/src/pane.rs | 39 +- crates/db/src/workspace.rs | 448 ++++++++++++---------- 5 files changed, 317 insertions(+), 222 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 289f70967c..9cf32dfd57 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,5 +1,8 @@ use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use db::pane::SerializedDockPane; +use settings::DockAnchor; + const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { @@ -12,12 +15,17 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.make_new_workspace(); + let workspace = db.make_new_workspace::(&[]); - db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]); + db.update_worktrees(&workspace.workspace_id, &["/tmp"]); - db.save_pane_splits(center_pane_group); - db.save_dock_pane(); + db.save_dock_pane(SerializedDockPane { + workspace: workspace.workspace_id, + anchor_position: DockAnchor::Expanded, + shown: true, + }); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 9e1b61387e..97d50bbe5b 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,29 +15,29 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.make_new_workspace(); - let workspace_2 = db.make_new_workspace(); - let workspace_3 = db.make_new_workspace(); - let workspace_4 = db.make_new_workspace(); - let workspace_5 = db.make_new_workspace(); - let workspace_6 = db.make_new_workspace(); - let workspace_7 = db.make_new_workspace(); + let workspace_1 = db.make_new_workspace::(&[]); + let workspace_2 = db.make_new_workspace::(&[]); + let workspace_3 = db.make_new_workspace::(&[]); + let workspace_4 = db.make_new_workspace::(&[]); + let workspace_5 = db.make_new_workspace::(&[]); + let workspace_6 = db.make_new_workspace::(&[]); + let workspace_7 = db.make_new_workspace::(&[]); // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]); + db.update_worktrees(&workspace_7.workspace_id, &["/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]); + db.update_worktrees(&workspace_1.workspace_id, &["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); + db.update_worktrees(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); + db.update_worktrees(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); + db.update_worktrees(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); + db.update_worktrees(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); + db.update_worktrees(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); db.write_to(file).ok(); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 320b131ea6..107bbffdf4 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,8 +1,8 @@ -mod items; -mod kvp; +pub mod items; +pub mod kvp; mod migrations; -mod pane; -mod workspace; +pub mod pane; +pub mod workspace; use std::fs; use std::path::{Path, PathBuf}; diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index e4d6694319..37f91c58a3 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -136,8 +136,11 @@ CREATE TABLE dock_items( COMMIT; "; -struct SerializedDockPane { - //Cols +#[derive(Default, Debug)] +pub struct SerializedDockPane { + pub workspace: WorkspaceId, + pub anchor_position: DockAnchor, + pub shown: bool, } impl Db { @@ -194,7 +197,35 @@ impl Db { unimplemented!(); } - fn save_dock_pane() {} + pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + unimplemented!() + } + + pub fn save_dock_pane(&self, dock_pane: SerializedDockPane) {} } -mod tests {} +#[cfg(test)] +mod tests { + use settings::DockAnchor; + + use crate::Db; + + use super::SerializedDockPane; + + #[test] + fn test_basic_dock_pane() { + let db = Db::open_in_memory(); + + let workspace = db.make_new_workspace::(&[]); + + db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + + db.save_dock_pane(SerializedDockPane { + workspace: workspace.workspace_id, + anchor_position: DockAnchor::Expanded, + shown: true, + }); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); + } +} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cd1d22f50b..0d8dae59ef 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -9,7 +9,7 @@ use std::{ sync::Arc, }; -use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; +use crate::pane::SerializedDockPane; use super::Db; @@ -41,16 +41,16 @@ pub struct WorkspaceId(i64); pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, // pub center_group: SerializedPaneGroup, - // pub dock_pane: Option, + pub dock_pane: Option, } impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the /// the last workspace id - pub fn workspace_for_worktree_roots( - &self, - worktree_roots: &[Arc], - ) -> SerializedWorkspace { + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace + where + P: AsRef + Debug, + { // Find the workspace id which is uniquely identified by this set of paths // return it if found let mut workspace_id = self.workspace_id(worktree_roots); @@ -59,31 +59,50 @@ impl Db { } if let Some(workspace_id) = workspace_id { - // TODO - // let workspace_row = self.get_workspace_row(workspace_id); - // let center_group = self.get_pane_group(workspace_row.center_group_id); - // let dock_pane = self.get_pane(workspace_row.dock_pane_id); - SerializedWorkspace { workspace_id, - // center_group, - // dock_pane: Some(dock_pane), + dock_pane: self.get_dock_pane(workspace_id), } } else { - self.make_new_workspace() + self.make_new_workspace(worktree_roots) } } - pub fn make_new_workspace(&self) -> SerializedWorkspace { + pub fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace + where + P: AsRef + Debug, + { + fn logic

( + connection: &mut Connection, + worktree_roots: &[P], + ) -> Result + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + tx.execute("INSERT INTO workspaces DEFAULT VALUES", [])?; + + let id = WorkspaceId(tx.last_insert_rowid()); + + update_worktree_roots(&tx, &id, worktree_roots)?; + + Ok(SerializedWorkspace { + workspace_id: id, + dock_pane: None, + }) + } + self.real() .map(|db| { - let lock = db.connection.lock(); + let mut lock = db.connection.lock(); + // No need to waste the memory caching this, should happen rarely. - match lock.execute("INSERT INTO workspaces DEFAULT VALUES", []) { - Ok(_) => SerializedWorkspace { - workspace_id: WorkspaceId(lock.last_insert_rowid()), - }, - Err(_) => Default::default(), + match logic(&mut lock, worktree_roots) { + Ok(serialized_workspace) => serialized_workspace, + Err(err) => { + log::error!("Failed to insert new workspace into DB: {}", err); + Default::default() + } } }) .unwrap_or_default() @@ -97,7 +116,13 @@ impl Db { .map(|db| { let lock = db.connection.lock(); - get_workspace_id(worktree_roots, &lock) + match get_workspace_id(worktree_roots, &lock) { + Ok(workspace_id) => workspace_id, + Err(err) => { + log::error!("Failed ot get workspace_id: {}", err); + None + } + } }) .unwrap_or(None) } @@ -109,61 +134,16 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) + pub fn update_worktrees

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - worktree_roots: &[P], - workspace_id: &WorkspaceId, - ) -> Result<()> - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &tx); - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - tx.execute( - "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; - } - } - - tx.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], - )?; - - for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - - tx.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], - )?; - } - - tx.execute( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - [workspace_id.0], - )?; - } - tx.commit()?; - Ok(()) - } - self.real().map(|db| { let mut lock = db.connection.lock(); - match logic(&mut lock, worktree_roots, workspace_id) { + let tx = lock.transaction(); + + match tx.map(|tx| update_worktree_roots(&tx, workspace_id, worktree_roots)) { Ok(_) => {} Err(err) => { dbg!(&err); @@ -257,86 +237,130 @@ impl Db { } } -fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Option +fn update_worktree_roots

( + connection: &Connection, + workspace_id: &WorkspaceId, + worktree_roots: &[P], +) -> Result<()> where P: AsRef + Debug, { - fn logic

( - worktree_roots: &[P], - connection: &Connection, - ) -> Result, anyhow::Error> - where - P: AsRef + Debug, - { - // Short circuit if we can - if worktree_roots.len() == 0 { - return Ok(None); + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &connection)?; + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + connection.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; } + } - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - // This uses ?NNN for numbered placeholder syntax - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } + connection.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], + )?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); + + connection.execute( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], + )?; + } + + connection.execute( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + [workspace_id.0], + )?; + + Ok(()) +} + +fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result> +where + P: AsRef + Debug, +{ + // fn logic

( + // worktree_roots: &[P], + // connection: &Connection, + // ) -> Result, anyhow::Error> + // where + // P: AsRef + Debug, + // { + // Short circuit if we can + if worktree_roots.len() == 0 { + return Ok(None); + } + + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + // This uses ?NNN for numbered placeholder syntax + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); } - array_binding_stmt.push(')'); - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but non-matching* workspace IDs. For this small data set, - // this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array, minus overlapping sets, - // but including *subsets* of our worktree roots: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // - // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no - // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of - // our keys: - // - // ID num_matching - // 1 2 - // 2 1 - // - // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the - // matching ID correctly :D - // - // Note: due to limitations in SQLite's query binding, we have to generate the prepared - // statement with string substitution (the {array_bind}) below, and then bind the - // parameters by number. - let query = format!( - r#" + } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping but non-matching* workspace IDs. For this small data set, + // this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array, minus overlapping sets, + // but including *subsets* of our worktree roots: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // + // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no + // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of + // our keys: + // + // ID num_matching + // 1 2 + // 2 1 + // + // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the + // matching ID correctly :D + // + // Note: due to limitations in SQLite's query binding, we have to generate the prepared + // statement with string substitution (the {array_bind}) below, and then bind the + // parameters by number. + let query = format!( + r#" SELECT workspace_id FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots WHERE worktree_root in {array_bind} AND workspace_id NOT IN @@ -347,50 +371,50 @@ where GROUP BY workspace_id) WHERE num_matching = ? "#, - array_bind = array_binding_stmt - ); + array_bind = array_binding_stmt + ); - // This will only be called on start up and when root workspaces change, no need to waste memory - // caching it. - let mut stmt = connection.prepare(&query)?; - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + // This will only be called on start up and when root workspaces change, no need to waste memory + // caching it. + let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - for i in 0..worktree_roots.len() { - let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - - let mut rows = stmt.raw_query(); - let row = rows.next(); - let result = if let Ok(Some(row)) = row { - Ok(Some(WorkspaceId(row.get(0)?))) - } else { - Ok(None) - }; - - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); - - result + for i in 0..worktree_roots.len() { + let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() + stmt.raw_bind_parameter(i + 1, path)? } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - match logic(worktree_roots, connection) { - Ok(result) => result, - Err(err) => { - log::error!( - "Failed to get the workspace ID for paths {:?}, err: {}", - worktree_roots, - err - ); - None - } - } + let mut rows = stmt.raw_query(); + let row = rows.next(); + let result = if let Ok(Some(row)) = row { + Ok(Some(WorkspaceId(row.get(0)?))) + } else { + Ok(None) + }; + + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); + + result + // } + + // match logic(worktree_roots, connection) { + // Ok(result) => result, + // Err(err) => { + // log::error!( + // "Failed to get the workspace ID for paths {:?}, err: {}", + // worktree_roots, + // err + // ); + // None + // } + // } } #[cfg(test)] @@ -407,20 +431,52 @@ mod tests { use super::WorkspaceId; + #[test] + fn test_worktree_for_roots() { + let db = Db::open_in_memory(); + + // Test creation in 0 case + let workspace_1 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + + // Test pulling from recent workspaces + let workspace_1 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + + sleep(Duration::from_secs(1)); + db.make_new_workspace::(&[]); + + // Test pulling another value from recent workspaces + let workspace_2 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + + // Test creating a new workspace that doesn't exist already + let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // Make sure it's in the recent workspaces.... + let workspace_3 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // And that it can be pulled out again + let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + } + #[test] fn test_empty_worktrees() { let db = Db::open_in_memory(); assert_eq!(None, db.workspace_id::(&[])); - db.make_new_workspace(); //ID 1 - db.make_new_workspace(); //ID 2 - db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); + db.make_new_workspace::(&[]); //ID 1 + db.make_new_workspace::(&[]); //ID 2 + db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); - db.update_worktree_roots::(&WorkspaceId(1), &[]); + db.update_worktrees::(&WorkspaceId(1), &[]); // Make sure 'no worktrees' fails correctly. returning [1, 2] from this // call would be semantically correct (as those are the workspaces that @@ -451,8 +507,8 @@ mod tests { let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); @@ -485,8 +541,8 @@ mod tests { let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } assert_eq!(db.workspace_id(&["/tmp2"]), None); @@ -527,15 +583,15 @@ mod tests { // Load in the test data for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } // Make sure the timestamp updates sleep(Duration::from_secs(1)); // Execute the update - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]); + db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); From 3451a3c7fe40234fec3db826993961f19ab1f816 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 15:52:38 -0700 Subject: [PATCH 127/240] Rebase - Got Zed compiling and fixed a build error due to conflicting dependencies that cargo didn't catch :( Co-Authored-By: kay@zed.dev --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/auto_update/Cargo.toml | 1 + crates/auto_update/src/auto_update.rs | 14 +- crates/collab/src/integration_tests.rs | 37 +++- crates/collab_ui/src/collab_ui.rs | 7 +- crates/command_palette/src/command_palette.rs | 5 +- crates/db/Cargo.toml | 1 - crates/db/examples/serialize-pane.rs | 7 +- crates/db/src/items.rs | 180 +++++++++--------- crates/db/src/pane.rs | 32 ++-- crates/diagnostics/src/diagnostics.rs | 10 +- .../src/test/editor_lsp_test_context.rs | 10 +- crates/file_finder/src/file_finder.rs | 30 +-- crates/project_panel/src/project_panel.rs | 20 +- .../src/tests/terminal_test_context.rs | 11 +- crates/vim/src/test/vim_test_context.rs | 10 +- crates/workspace/src/dock.rs | 5 +- crates/workspace/src/pane.rs | 15 +- crates/workspace/src/workspace.rs | 172 +++++++++++------ crates/zed/src/main.rs | 13 +- crates/zed/src/zed.rs | 174 ++++++++++------- 22 files changed, 466 insertions(+), 291 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0da4d17710..2cc8063ca4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,6 +428,7 @@ version = "0.1.0" dependencies = [ "anyhow", "client", + "db", "gpui", "isahc", "lazy_static", @@ -1560,7 +1561,6 @@ dependencies = [ "rusqlite_migration", "serde", "serde_rusqlite", - "settings", "tempdir", ] diff --git a/Cargo.toml b/Cargo.toml index 8e9814c448..a97f272e47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,3 +81,4 @@ split-debuginfo = "unpacked" [profile.release] debug = true + diff --git a/crates/auto_update/Cargo.toml b/crates/auto_update/Cargo.toml index 944aa87ee5..b1ca061614 100644 --- a/crates/auto_update/Cargo.toml +++ b/crates/auto_update/Cargo.toml @@ -8,6 +8,7 @@ path = "src/auto_update.rs" doctest = false [dependencies] +db = { path = "../db" } client = { path = "../client" } gpui = { path = "../gpui" } menu = { path = "../menu" } diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index bda45053b1..1baf609268 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -1,7 +1,8 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; -use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN, ZED_SERVER_URL}; +use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; +use db::Db; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, @@ -55,11 +56,16 @@ impl Entity for AutoUpdater { type Event = (); } -pub fn init(db: project::Db, http_client: Arc, cx: &mut MutableAppContext) { +pub fn init( + db: Db, + http_client: Arc, + server_url: String, + cx: &mut MutableAppContext, +) { if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) { - let server_url = ZED_SERVER_URL.to_string(); + let server_url = server_url; let auto_updater = cx.add_model(|cx| { - let updater = AutoUpdater::new(version, db.clone(), http_client, server_url.clone()); + let updater = AutoUpdater::new(version, db, http_client, server_url.clone()); updater.start_polling(cx).detach(); updater }); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3e0b2171a8..5de28f1c65 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -905,8 +905,14 @@ async fn test_host_disconnect( let project_b = client_b.build_remote_project(project_id, cx_b).await; assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - let (_, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "b.txt"), None, true, cx) @@ -3701,8 +3707,14 @@ async fn test_collaborating_with_code_actions( // Join the project as client B. let project_b = client_b.build_remote_project(project_id, cx_b).await; - let (_window_b, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_window_b, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "main.rs"), None, true, cx) @@ -3922,8 +3934,14 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T .unwrap(); let project_b = client_b.build_remote_project(project_id, cx_b).await; - let (_window_b, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_window_b, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "one.rs"), None, true, cx) @@ -6054,7 +6072,12 @@ impl TestClient { ) -> ViewHandle { let (_, root_view) = cx.add_window(|_| EmptyView); cx.add_view(&root_view, |cx| { - Workspace::new(project.clone(), |_, _| unimplemented!(), cx) + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) }) } diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index f5f508ce5b..3a20a2fc69 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -51,7 +51,12 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { .await?; let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new(project, app_state.default_item_factory, cx); + let mut workspace = Workspace::new( + Default::default(), + project, + app_state.default_item_factory, + cx, + ); (app_state.initialize_workspace)(&mut workspace, &app_state, cx); workspace }); diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index b472da3bb5..5af23b45d7 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -350,8 +350,9 @@ mod tests { }); let project = Project::test(app_state.fs.clone(), [], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let editor = cx.add_view(&workspace, |cx| { let mut editor = Editor::single_line(None, cx); editor.set_text("abc", cx); diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 64e86e0345..9fad1aa39a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -13,7 +13,6 @@ test-support = [] [dependencies] collections = { path = "../collections" } gpui = { path = "../gpui" } -settings = { path = "../settings" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 9cf32dfd57..fc420b866d 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,7 +1,6 @@ -use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use std::{fs::File, path::Path}; -use db::pane::SerializedDockPane; -use settings::DockAnchor; +use db::pane::{DockAnchor, SerializedDockPane}; const TEST_FILE: &'static str = "test-db.db"; @@ -25,7 +24,7 @@ fn main() -> anyhow::Result<()> { shown: true, }); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + let _new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 7bd4c27f43..a6497903ac 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -1,69 +1,69 @@ -use std::{ - ffi::OsStr, - fmt::Display, - hash::Hash, - os::unix::prelude::OsStrExt, - path::{Path, PathBuf}, - sync::Arc, -}; +// use std::{ +// ffi::OsStr, +// fmt::Display, +// hash::Hash, +// os::unix::prelude::OsStrExt, +// path::{Path, PathBuf}, +// sync::Arc, +// }; -use anyhow::Result; -use collections::HashSet; -use rusqlite::{named_params, params, types::FromSql}; +// use anyhow::Result; +// use collections::HashSet; +// use rusqlite::{named_params, params, types::FromSql}; -use crate::workspace::WorkspaceId; +// use crate::workspace::WorkspaceId; -use super::Db; +// use super::Db; -/// Current design makes the cut at the item level, -/// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate -/// - items table, with a kind, and an integer that acts as a key to one of these other tables -/// This column is a foreign key to ONE OF: editors, terminals, searches -/// - +// /// Current design makes the cut at the item level, +// /// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate +// /// - items table, with a kind, and an integer that acts as a key to one of these other tables +// /// This column is a foreign key to ONE OF: editors, terminals, searches +// /// - -// (workspace_id, item_id) -// kind -> ::Editor:: +// // (workspace_id, item_id) +// // kind -> ::Editor:: -// -> -// At the workspace level -// -> (Workspace_ID, item_id) -// -> One shot, big query, load everything up: +// // -> +// // At the workspace level +// // -> (Workspace_ID, item_id) +// // -> One shot, big query, load everything up: -// -> SerializedWorkspace::deserialize(tx, itemKey) -// -> SerializedEditor::deserialize(tx, itemKey) +// // -> SerializedWorkspace::deserialize(tx, itemKey) +// // -> SerializedEditor::deserialize(tx, itemKey) -// -> -// -> Workspace::new(SerializedWorkspace) -// -> Editor::new(serialized_workspace[???]serializedEditor) +// // -> +// // -> Workspace::new(SerializedWorkspace) +// // -> Editor::new(serialized_workspace[???]serializedEditor) -// //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) -// //Cons: DB has to know the internals of the entire rest of the app +// // //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) +// // //Cons: DB has to know the internals of the entire rest of the app -// Workspace -// Worktree roots -// Pane groups -// Dock -// Items -// Sidebars +// // Workspace +// // Worktree roots +// // Pane groups +// // Dock +// // Items +// // Sidebars -// Things I'm doing: finding about nullability for foreign keys -pub(crate) const ITEMS_M_1: &str = " -CREATE TABLE project_searches( - workspace_id INTEGER, - item_id INTEGER, - query TEXT, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; +// // Things I'm doing: finding about nullability for foreign keys +// pub(crate) const ITEMS_M_1: &str = " +// CREATE TABLE project_searches( +// workspace_id INTEGER, +// item_id INTEGER, +// query TEXT, +// PRIMARY KEY (workspace_id, item_id) +// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +// ) STRICT; -CREATE TABLE editors( - workspace_id INTEGER, - item_id INTEGER, - path BLOB NOT NULL, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; -"; +// CREATE TABLE editors( +// workspace_id INTEGER, +// item_id INTEGER, +// path BLOB NOT NULL, +// PRIMARY KEY (workspace_id, item_id) +// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +// ) STRICT; +// "; #[derive(Debug, PartialEq, Eq)] pub struct ItemId { @@ -71,45 +71,45 @@ pub struct ItemId { item_id: usize, } -enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, -} +// enum SerializedItemKind { +// Editor, +// Diagnostics, +// ProjectSearch, +// Terminal, +// } -struct SerializedItemRow { - kind: SerializedItemKind, - item_id: usize, - path: Option>, - query: Option, -} +// struct SerializedItemRow { +// kind: SerializedItemKind, +// item_id: usize, +// path: Option>, +// query: Option, +// } -#[derive(Debug, PartialEq, Eq)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, -} +// #[derive(Debug, PartialEq, Eq)] +// pub enum SerializedItem { +// Editor { item_id: usize, path: Arc }, +// Diagnostics { item_id: usize }, +// ProjectSearch { item_id: usize, query: String }, +// Terminal { item_id: usize }, +// } -impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } -} +// impl SerializedItem { +// pub fn item_id(&self) -> usize { +// match self { +// SerializedItem::Editor { item_id, .. } => *item_id, +// SerializedItem::Diagnostics { item_id } => *item_id, +// SerializedItem::ProjectSearch { item_id, .. } => *item_id, +// SerializedItem::Terminal { item_id } => *item_id, +// } +// } +// } -impl Db { - pub fn get_item(&self, item_id: ItemId) -> SerializedItem { - unimplemented!() - } +// impl Db { +// pub fn get_item(&self, item_id: ItemId) -> SerializedItem { +// unimplemented!() +// } - pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} +// pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} - pub fn close_item(&self, item_id: ItemId) {} -} +// pub fn close_item(&self, item_id: ItemId) {} +// } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 37f91c58a3..447b5eed87 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,5 +1,4 @@ use gpui::Axis; -use settings::DockAnchor; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -32,7 +31,7 @@ pub struct PaneGroupId { } impl PaneGroupId { - pub(crate) fn root(workspace_id: WorkspaceId) -> Self { + pub fn root(workspace_id: WorkspaceId) -> Self { Self { workspace_id, group_id: 0, @@ -48,7 +47,7 @@ pub struct SerializedPaneGroup { } impl SerializedPaneGroup { - pub(crate) fn empty_root(workspace_id: WorkspaceId) -> Self { + pub fn empty_root(workspace_id: WorkspaceId) -> Self { Self { group_id: PaneGroupId::root(workspace_id), axis: Default::default(), @@ -136,6 +135,14 @@ CREATE TABLE dock_items( COMMIT; "; +#[derive(Default, Debug)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + #[derive(Default, Debug)] pub struct SerializedDockPane { pub workspace: WorkspaceId, @@ -144,7 +151,7 @@ pub struct SerializedDockPane { } impl Db { - pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); for child_row in self.get_pane_group_children(pane_group_id) { @@ -177,40 +184,39 @@ impl Db { fn get_pane_group_children( &self, - pane_group_id: PaneGroupId, + _pane_group_id: PaneGroupId, ) -> impl Iterator { Vec::new().into_iter() } - fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + fn get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_pane_splits(&self, center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits(&self, _center_pane_group: SerializedPaneGroup) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them // Items garbage collect themselves when dropped } - pub(crate) fn get_pane(&self, pane_id: PaneId) -> SerializedPane { + pub(crate) fn get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } - pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { unimplemented!() } - pub fn save_dock_pane(&self, dock_pane: SerializedDockPane) {} + pub fn save_dock_pane(&self, _dock_pane: SerializedDockPane) {} } #[cfg(test)] mod tests { - use settings::DockAnchor; use crate::Db; - use super::SerializedDockPane; + use super::{DockAnchor, SerializedDockPane}; #[test] fn test_basic_dock_pane() { @@ -226,6 +232,6 @@ mod tests { shown: true, }); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + let _new_workspace = db.workspace_for_roots(&["/tmp"]); } } diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 6ff7490181..078d83ac61 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -781,8 +781,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/test".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); // Create some diagnostics project.update(cx, |project, cx| { diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 69205e1991..9cf305ad37 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -63,8 +63,14 @@ impl<'a> EditorLspTestContext<'a> { .insert_tree("/root", json!({ "dir": { file_name: "" }})) .await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); project .update(cx, |project, cx| { project.find_or_create_local_worktree("/root", true, cx) diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index c6d4a8f121..b0016002fa 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -316,8 +316,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); cx.dispatch_action(window_id, Toggle); let finder = cx.read(|cx| workspace.read(cx).modal::().unwrap()); @@ -371,8 +372,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/dir".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -446,8 +448,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); finder @@ -471,8 +474,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -524,8 +528,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -563,8 +568,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); finder diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index b6787c930c..dae1f70aae 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1393,8 +1393,14 @@ mod tests { .await; let project = Project::test(fs.clone(), ["/root1".as_ref(), "/root2".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let panel = workspace.update(cx, |_, cx| ProjectPanel::new(project, cx)); assert_eq!( visible_entries_as_strings(&panel, 0..50, cx), @@ -1486,8 +1492,14 @@ mod tests { .await; let project = Project::test(fs.clone(), ["/root1".as_ref(), "/root2".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let panel = workspace.update(cx, |_, cx| ProjectPanel::new(project, cx)); select_path(&panel, "root1", cx); diff --git a/crates/terminal/src/tests/terminal_test_context.rs b/crates/terminal/src/tests/terminal_test_context.rs index 3e3d1243d5..352ce4a0d2 100644 --- a/crates/terminal/src/tests/terminal_test_context.rs +++ b/crates/terminal/src/tests/terminal_test_context.rs @@ -28,9 +28,14 @@ impl<'a> TerminalTestContext<'a> { let params = self.cx.update(AppState::test); let project = Project::test(params.fs.clone(), [], self.cx).await; - let (_, workspace) = self - .cx - .add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = self.cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); (project, workspace) } diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 1aeba9fd08..68c08f2f7a 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -41,8 +41,14 @@ impl<'a> VimTestContext<'a> { .insert_tree("/root", json!({ "dir": { "test.txt": "" } })) .await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); // Setup search toolbars workspace.update(cx, |workspace, cx| { diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 699b9b1d60..5f471ff018 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -568,8 +568,9 @@ mod tests { cx.update(|cx| init(cx)); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, default_item_factory, cx) + }); workspace.update(cx, |workspace, cx| { let left_panel = cx.add_view(|_| TestItem::new()); diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 8dd97e230f..01313f2046 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1645,8 +1645,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // 1. Add with a destination index @@ -1734,8 +1735,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // 1. Add with a destination index @@ -1811,8 +1813,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // singleton view diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7f82a46edf..a6ef7c6c01 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1073,7 +1073,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - db_id: WorkspaceId, + _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1217,7 +1217,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - db_id: serialized_workspace.workspace_id, + _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array @@ -1250,16 +1250,14 @@ impl Workspace { this } - fn new_local( - abs_paths: &[PathBuf], - app_state: &Arc, + fn new_local( + abs_paths: Vec, + app_state: Arc, cx: &mut MutableAppContext, - callback: F, - ) -> Task - where - T: 'static, - F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, - { + ) -> Task<( + ViewHandle, + Vec, Arc>>>, + )> { let project_handle = Project::local( app_state.client.clone(), app_state.user_store.clone(), @@ -1273,21 +1271,25 @@ impl Workspace { // Get project paths for all of the abs_paths let mut worktree_roots: HashSet> = Default::default(); let mut project_paths = Vec::new(); - for path in abs_paths { + for path in abs_paths.iter() { if let Some((worktree, project_entry)) = cx - .update(|cx| Workspace::project_path_for_path(project_handle, path, true, cx)) + .update(|cx| { + Workspace::project_path_for_path(project_handle.clone(), &path, true, cx) + }) .await .log_err() { worktree_roots.insert(worktree.read_with(&mut cx, |tree, _| tree.abs_path())); - project_paths.push(project_entry); + project_paths.push(Some(project_entry)); + } else { + project_paths.push(None); } } // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { cx.global::() - .workspace_for_worktree_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); // Use the serialized workspace to construct the new window @@ -1303,18 +1305,36 @@ impl Workspace { }); // Call open path for each of the project paths - // (this will bring them to the front if they were in kthe serialized workspace) - let tasks = workspace.update(&mut cx, |workspace, cx| { - let tasks = Vec::new(); - for path in project_paths { - tasks.push(workspace.open_path(path, true, cx)); - } - tasks - }); - futures::future::join_all(tasks.into_iter()).await; + // (this will bring them to the front if they were in the serialized workspace) + debug_assert!(abs_paths.len() == project_paths.len()); + let tasks = abs_paths + .iter() + .cloned() + .zip(project_paths.into_iter()) + .map(|(abs_path, project_path)| { + let workspace = workspace.clone(); + cx.spawn(|mut cx| { + let fs = app_state.fs.clone(); + async move { + let project_path = project_path?; + if fs.is_file(&abs_path).await { + Some( + workspace + .update(&mut cx, |workspace, cx| { + workspace.open_path(project_path, true, cx) + }) + .await, + ) + } else { + None + } + } + }) + }); - // Finally call callback on the workspace - workspace.update(&mut cx, |workspace, cx| callback(workspace, cx)) + let opened_items = futures::future::join_all(tasks.into_iter()).await; + + (workspace, opened_items) }) } @@ -1371,12 +1391,16 @@ impl Workspace { ) -> Task where T: 'static, - F: FnOnce(&mut Workspace, &mut ViewContext) -> T, + F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, { if self.project.read(cx).is_local() { Task::Ready(Some(callback(self, cx))) } else { - Self::new_local(&[], app_state, cx, callback) + let task = Self::new_local(Vec::new(), app_state.clone(), cx); + cx.spawn(|_vh, mut cx| async move { + let (workspace, _) = task.await; + workspace.update(&mut cx, callback) + }) } } @@ -1539,7 +1563,7 @@ impl Workspace { for path in &abs_paths { project_paths.push( this.update(&mut cx, |this, cx| { - Workspace::project_path_for_path(this.project, path, visible, cx) + Workspace::project_path_for_path(this.project.clone(), path, visible, cx) }) .await .log_err(), @@ -3017,8 +3041,15 @@ pub fn open_paths( let app_state = app_state.clone(); let abs_paths = abs_paths.to_vec(); cx.spawn(|mut cx| async move { - let workspace = if let Some(existing) = existing { - existing + if let Some(existing) = existing { + ( + existing.clone(), + existing + .update(&mut cx, |workspace, cx| { + workspace.open_paths(abs_paths, true, cx) + }) + .await, + ) } else { let contains_directory = futures::future::join_all(abs_paths.iter().map(|path| app_state.fs.is_file(path))) @@ -3026,28 +3057,32 @@ pub fn open_paths( .contains(&false); cx.update(|cx| { - Workspace::new_local(&abs_paths[..], &app_state, cx, move |workspace, cx| { - if contains_directory { - workspace.toggle_sidebar(SidebarSide::Left, cx); - } - cx.handle() + let task = Workspace::new_local(abs_paths, app_state.clone(), cx); + + cx.spawn(|mut cx| async move { + let (workspace, items) = task.await; + + workspace.update(&mut cx, |workspace, cx| { + if contains_directory { + workspace.toggle_sidebar(SidebarSide::Left, cx); + } + }); + + (workspace, items) }) }) .await - }; - - let items = workspace - .update(&mut cx, |workspace, cx| { - workspace.open_paths(abs_paths, true, cx) - }) - .await; - - (workspace, items) + } }) } fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { - Workspace::new_local(&[], app_state, cx, |_, cx| cx.dispatch_action(NewFile)) + let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); + cx.spawn(|mut cx| async move { + let (workspace, _) = task.await; + + workspace.update(&mut cx, |_, cx| cx.dispatch_action(NewFile)) + }) } #[cfg(test)] @@ -3076,8 +3111,14 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); // Adding an item with no ambiguity renders the tab without detail. let item1 = cx.add_view(&workspace, |_| { @@ -3141,8 +3182,14 @@ mod tests { .await; let project = Project::test(fs, ["root1".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); let worktree_id = project.read_with(cx, |project, cx| { project.worktrees(cx).next().unwrap().read(cx).id() }); @@ -3238,8 +3285,14 @@ mod tests { fs.insert_tree("/root", json!({ "one": "" })).await; let project = Project::test(fs, ["root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); // When there are no dirty items, there's nothing to do. let item1 = cx.add_view(&workspace, |_| TestItem::new()); @@ -3279,8 +3332,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item1 = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3375,8 +3428,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); // Create several workspace items with single project entries, and two // workspace items with multiple project entries. @@ -3477,8 +3530,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3595,7 +3648,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (_, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index c6862e66e4..84d18ba22f 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -23,7 +23,7 @@ use isahc::{config::Configurable, Request}; use language::LanguageRegistry; use log::LevelFilter; use parking_lot::Mutex; -use project::{Fs, HomeDir, ProjectStore}; +use project::{Db, Fs, HomeDir, ProjectStore}; use serde_json::json; use settings::{ self, settings_file::SettingsFile, KeymapFileContent, Settings, SettingsFileContent, @@ -148,7 +148,9 @@ fn main() { let project_store = cx.add_model(|_| ProjectStore::new()); let db = cx.background().block(db); - client.start_telemetry(db.clone()); + cx.set_global(db); + + client.start_telemetry(cx.global::().clone()); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -162,7 +164,12 @@ fn main() { initialize_workspace, default_item_factory, }); - auto_update::init(db, http, cx); + auto_update::init( + cx.global::().clone(), + http, + client::ZED_SERVER_URL.clone(), + cx, + ); workspace::init(app_state.clone(), cx); journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 71a99cb3b2..de785ca978 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -463,10 +463,11 @@ fn open_config_file( workspace .update(&mut cx, |workspace, cx| { - workspace.with_local_workspace(app_state, cx, |workspace, cx| { + workspace.with_local_workspace(&app_state, cx, |workspace, cx| { workspace.open_paths(vec![path.to_path_buf()], false, cx) }) }) + .await .await; Ok::<_, anyhow::Error>(()) }) @@ -480,51 +481,55 @@ fn open_log_file( ) { const MAX_LINES: usize = 1000; - workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { - cx.spawn_weak(|workspace, mut cx| async move { - let (old_log, new_log) = futures::join!( - app_state.fs.load(&paths::OLD_LOG), - app_state.fs.load(&paths::LOG) - ); + workspace + .with_local_workspace(&app_state.clone(), cx, move |_, cx| { + cx.spawn_weak(|workspace, mut cx| async move { + let (old_log, new_log) = futures::join!( + app_state.fs.load(&paths::OLD_LOG), + app_state.fs.load(&paths::LOG) + ); - if let Some(workspace) = workspace.upgrade(&cx) { - let mut lines = VecDeque::with_capacity(MAX_LINES); - for line in old_log - .iter() - .flat_map(|log| log.lines()) - .chain(new_log.iter().flat_map(|log| log.lines())) - { - if lines.len() == MAX_LINES { - lines.pop_front(); + if let Some(workspace) = workspace.upgrade(&cx) { + let mut lines = VecDeque::with_capacity(MAX_LINES); + for line in old_log + .iter() + .flat_map(|log| log.lines()) + .chain(new_log.iter().flat_map(|log| log.lines())) + { + if lines.len() == MAX_LINES { + lines.pop_front(); + } + lines.push_back(line); } - lines.push_back(line); - } - let log = lines - .into_iter() - .flat_map(|line| [line, "\n"]) - .collect::(); + let log = lines + .into_iter() + .flat_map(|line| [line, "\n"]) + .collect::(); - workspace.update(&mut cx, |workspace, cx| { - let project = workspace.project().clone(); - let buffer = project - .update(cx, |project, cx| project.create_buffer("", None, cx)) - .expect("creating buffers on a local workspace always succeeds"); - buffer.update(cx, |buffer, cx| buffer.edit([(0..0, log)], None, cx)); + workspace.update(&mut cx, |workspace, cx| { + let project = workspace.project().clone(); + let buffer = project + .update(cx, |project, cx| project.create_buffer("", None, cx)) + .expect("creating buffers on a local workspace always succeeds"); + buffer.update(cx, |buffer, cx| buffer.edit([(0..0, log)], None, cx)); - let buffer = cx.add_model(|cx| { - MultiBuffer::singleton(buffer, cx).with_title("Log".into()) + let buffer = cx.add_model(|cx| { + MultiBuffer::singleton(buffer, cx).with_title("Log".into()) + }); + workspace.add_item( + Box::new( + cx.add_view(|cx| { + Editor::for_multibuffer(buffer, Some(project), cx) + }), + ), + cx, + ); }); - workspace.add_item( - Box::new( - cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project), cx)), - ), - cx, - ); - }); - } + } + }) + .detach(); }) .detach(); - }); } fn open_telemetry_log_file( @@ -532,7 +537,7 @@ fn open_telemetry_log_file( app_state: Arc, cx: &mut ViewContext, ) { - workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { + workspace.with_local_workspace(&app_state.clone(), cx, move |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let workspace = workspace.upgrade(&cx)?; let path = app_state.client.telemetry_log_file_path()?; @@ -580,31 +585,36 @@ fn open_telemetry_log_file( Some(()) }) .detach(); - }); + }).detach(); } fn open_bundled_config_file( workspace: &mut Workspace, app_state: Arc, asset_path: &'static str, - title: &str, + title: &'static str, cx: &mut ViewContext, ) { - workspace.with_local_workspace(cx, app_state, |workspace, cx| { - let project = workspace.project().clone(); - let buffer = project.update(cx, |project, cx| { - let text = Assets::get(asset_path).unwrap().data; - let text = str::from_utf8(text.as_ref()).unwrap(); - project - .create_buffer(text, project.languages().get_language("JSON"), cx) - .expect("creating buffers on a local workspace always succeeds") - }); - let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx).with_title(title.into())); - workspace.add_item( - Box::new(cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project.clone()), cx))), - cx, - ); - }); + workspace + .with_local_workspace(&app_state.clone(), cx, |workspace, cx| { + let project = workspace.project().clone(); + let buffer = project.update(cx, |project, cx| { + let text = Assets::get(asset_path).unwrap().data; + let text = str::from_utf8(text.as_ref()).unwrap(); + project + .create_buffer(text, project.languages().get_language("JSON"), cx) + .expect("creating buffers on a local workspace always succeeds") + }); + let buffer = + cx.add_model(|cx| MultiBuffer::singleton(buffer, cx).with_title(title.into())); + workspace.add_item( + Box::new( + cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project.clone()), cx)), + ), + cx, + ); + }) + .detach(); } fn schema_file_match(path: &Path) -> &Path { @@ -808,8 +818,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -928,8 +939,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/dir1".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Open a file within an existing worktree. cx.update(|cx| { @@ -1088,8 +1100,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Open a file within an existing worktree. cx.update(|cx| { @@ -1131,8 +1144,9 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let worktree = cx.read(|cx| workspace.read(cx).worktrees(cx).next().unwrap()); // Create a new untitled buffer @@ -1221,8 +1235,9 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Create a new untitled buffer cx.dispatch_action(window_id, NewFile); @@ -1275,8 +1290,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -1350,8 +1366,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -1615,8 +1637,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); let entries = cx.read(|cx| workspace.file_project_paths(cx)); From ddecba143f6cc99c7dd14f7ea1d71e70ccce64da Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 16:02:14 -0700 Subject: [PATCH 128/240] Refactored workspaces API and corrected method headers + fixed bug caused by migration failures co-authored-by: kay@zed.dev --- crates/db/examples/serialize-pane.rs | 4 +--- crates/db/examples/serialize_workspace.rs | 24 +++++++---------------- crates/db/src/kvp.rs | 4 ---- crates/db/src/pane.rs | 8 +------- crates/db/src/workspace.rs | 10 +++------- 5 files changed, 12 insertions(+), 38 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index fc420b866d..b0744aa604 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -14,9 +14,7 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.make_new_workspace::(&[]); - - db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + let workspace = db.workspace_for_roots(&["/tmp"]); db.save_dock_pane(SerializedDockPane { workspace: workspace.workspace_id, diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 97d50bbe5b..5a3f2a2160 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,29 +15,19 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.make_new_workspace::(&[]); - let workspace_2 = db.make_new_workspace::(&[]); - let workspace_3 = db.make_new_workspace::(&[]); - let workspace_4 = db.make_new_workspace::(&[]); - let workspace_5 = db.make_new_workspace::(&[]); - let workspace_6 = db.make_new_workspace::(&[]); - let workspace_7 = db.make_new_workspace::(&[]); - - // Order scrambled + sleeps added because sqlite only has 1 second resolution on - // their timestamps - db.update_worktrees(&workspace_7.workspace_id, &["/tmp2"]); + db.workspace_for_roots(&["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_1.workspace_id, &["/tmp1"]); + db.workspace_for_roots(&["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); + db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); + db.workspace_for_roots(&["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); + db.workspace_for_roots(&["/tmp2", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); + db.workspace_for_roots(&["/tmp2"]); db.write_to(file).ok(); diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index eecd0238ca..96f13d8040 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -4,14 +4,10 @@ use rusqlite::OptionalExtension; use super::Db; pub(crate) const KVP_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; - -COMMIT; "; impl Db { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 447b5eed87..23423ed6f6 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -75,8 +75,6 @@ pub struct SerializedPane { } pub(crate) const PANE_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -131,8 +129,6 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; - -COMMIT; "; #[derive(Default, Debug)] @@ -222,9 +218,7 @@ mod tests { fn test_basic_dock_pane() { let db = Db::open_in_memory(); - let workspace = db.make_new_workspace::(&[]); - - db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + let workspace = db.workspace_for_roots(&["/tmp"]); db.save_dock_pane(SerializedDockPane { workspace: workspace.workspace_id, diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 0d8dae59ef..cb2d4296c1 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -17,8 +17,6 @@ use super::Db; // you might want to update some of the parsing code as well, I've left the variations in but commented // out pub(crate) const WORKSPACE_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL @@ -30,8 +28,6 @@ CREATE TABLE worktree_roots( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; - -COMMIT; "; #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] @@ -68,7 +64,7 @@ impl Db { } } - pub fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace + fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace where P: AsRef + Debug, { @@ -158,7 +154,7 @@ impl Db { }); } - pub fn last_workspace_id(&self) -> Option { + fn last_workspace_id(&self) -> Option { fn logic(connection: &mut Connection) -> Result> { let mut stmt = connection .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; @@ -432,7 +428,7 @@ mod tests { use super::WorkspaceId; #[test] - fn test_worktree_for_roots() { + fn test_new_worktrees_for_roots() { let db = Db::open_in_memory(); // Test creation in 0 case From c105f414876d116db6a9ec311dd0071568b4241e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 16:37:54 -0700 Subject: [PATCH 129/240] Started working on dock panes co-authored-by: kay@zed.dev --- crates/db/examples/serialize-pane.rs | 22 +++- crates/db/src/pane.rs | 158 +++++++++++++++------------ crates/db/src/workspace.rs | 3 +- 3 files changed, 109 insertions(+), 74 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index b0744aa604..9448336be9 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -14,15 +14,25 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.workspace_for_roots(&["/tmp"]); + let workspace_1 = db.workspace_for_roots(&["/tmp"]); + let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - db.save_dock_pane(SerializedDockPane { - workspace: workspace.workspace_id, + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_1.workspace_id, anchor_position: DockAnchor::Expanded, - shown: true, + visible: true, + }); + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_2.workspace_id, + anchor_position: DockAnchor::Bottom, + visible: true, + }); + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_3.workspace_id, + anchor_position: DockAnchor::Right, + visible: false, }); - - let _new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 23423ed6f6..9a7dfd3b6e 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,9 +1,69 @@ use gpui::Axis; +use serde::{Deserialize, Serialize}; +use serde_rusqlite::to_params_named; + use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; +pub(crate) const PANE_M_1: &str = " +CREATE TABLE dock_panes( + dock_pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + visible INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE grouped_panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE items( + item_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE group_items( + workspace_id INTEGER NOT NULL, + pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE dock_items( + workspace_id INTEGER NOT NULL, + dock_pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, dock_pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE +) STRICT; +"; + // We have an many-branched, unbalanced tree with three types: // Pane Groups // Panes @@ -74,64 +134,7 @@ pub struct SerializedPane { children: Vec, } -pub(crate) const PANE_M_1: &str = " -CREATE TABLE dock_panes( - dock_pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - shown INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE grouped_panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - group_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE items( - item_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE group_items( - workspace_id INTEGER NOT NULL, - pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE dock_items( - workspace_id INTEGER NOT NULL, - dock_pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, dock_pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE -) STRICT; -"; - -#[derive(Default, Debug)] +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum DockAnchor { #[default] Bottom, @@ -139,11 +142,11 @@ pub enum DockAnchor { Expanded, } -#[derive(Default, Debug)] +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct SerializedDockPane { - pub workspace: WorkspaceId, + pub workspace_id: WorkspaceId, pub anchor_position: DockAnchor, - pub shown: bool, + pub visible: bool, } impl Db { @@ -204,7 +207,24 @@ impl Db { unimplemented!() } - pub fn save_dock_pane(&self, _dock_pane: SerializedDockPane) {} + pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { + to_params_named(dock_pane) + .map_err(|err| dbg!(err)) + .ok() + .zip(self.real()) + .map(|(params, db)| { + // TODO: overwrite old dock panes if need be + let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; + db.connection + .lock() + .execute(query, params.to_slice().as_slice()) + .map(|_| ()) // Eat the return value + .unwrap_or_else(|err| { + dbg!(&err); + log::error!("Failed to insert new workspace into DB: {}", err); + }) + }); + } } #[cfg(test)] @@ -220,12 +240,16 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); - db.save_dock_pane(SerializedDockPane { - workspace: workspace.workspace_id, + let dock_pane = SerializedDockPane { + workspace_id: workspace.workspace_id, anchor_position: DockAnchor::Expanded, - shown: true, - }); + visible: true, + }; - let _new_workspace = db.workspace_for_roots(&["/tmp"]); + db.save_dock_pane(&dock_pane); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); + + assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cb2d4296c1..d7532b684e 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,5 +1,6 @@ use anyhow::Result; use rusqlite::{params, Connection, OptionalExtension}; +use serde::{Deserialize, Serialize}; use std::{ ffi::OsStr, @@ -30,7 +31,7 @@ CREATE TABLE worktree_roots( ) STRICT; "; -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] pub struct WorkspaceId(i64); #[derive(Default, Debug)] From e6ca0adbcba8e1724703d690622405b389b30b35 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 31 Oct 2022 12:47:13 -0700 Subject: [PATCH 130/240] Fixed failing serialization issues --- crates/db/Cargo.toml | 1 + crates/db/examples/serialize-pane.rs | 6 ++- crates/db/examples/serialize_workspace.rs | 9 +--- crates/db/src/pane.rs | 6 +-- crates/db/src/workspace.rs | 60 ++++++++++++++++------ crates/db/test.db | Bin 0 -> 57344 bytes 6 files changed, 54 insertions(+), 28 deletions(-) create mode 100644 crates/db/test.db diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9fad1aa39a..a2ac2a9fc5 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -26,3 +26,4 @@ serde_rusqlite = "0.31.0" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } tempdir = { version = "0.3.7" } +env_logger = "0.9.1" \ No newline at end of file diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 9448336be9..59ad60a6f4 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -5,6 +5,8 @@ use db::pane::{DockAnchor, SerializedDockPane}; const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { + env_logger::init(); + let db = db::Db::open_in_memory(); if db.real().is_none() { return Err(anyhow::anyhow!("Migrations failed")); @@ -17,6 +19,8 @@ fn main() -> anyhow::Result<()> { let workspace_1 = db.workspace_for_roots(&["/tmp"]); let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); + dbg!(&workspace_1, &workspace_2, &workspace_3); + db.write_to(file).ok(); db.save_dock_pane(&SerializedDockPane { workspace_id: workspace_1.workspace_id, @@ -34,7 +38,7 @@ fn main() -> anyhow::Result<()> { visible: false, }); - db.write_to(file).ok(); + // db.write_to(file).ok(); println!("Wrote database!"); diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 5a3f2a2160..4010c77976 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -1,8 +1,9 @@ -use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use std::{fs::File, path::Path}; const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { + env_logger::init(); let db = db::Db::open_in_memory(); if db.real().is_none() { return Err(anyhow::anyhow!("Migrations failed")); @@ -16,17 +17,11 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test-2", "2")?; db.workspace_for_roots(&["/tmp1"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp1", "/tmp2"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp3"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp4"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2"]); db.write_to(file).ok(); diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 9a7dfd3b6e..51d8e5ad5b 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -204,12 +204,11 @@ impl Db { } pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { - unimplemented!() + None } pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { to_params_named(dock_pane) - .map_err(|err| dbg!(err)) .ok() .zip(self.real()) .map(|(params, db)| { @@ -220,8 +219,7 @@ impl Db { .execute(query, params.to_slice().as_slice()) .map(|_| ()) // Eat the return value .unwrap_or_else(|err| { - dbg!(&err); - log::error!("Failed to insert new workspace into DB: {}", err); + log::error!("Failed to insert new dock pane into DB: {}", err); }) }); } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index d7532b684e..5d84ecfccb 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,5 @@ use anyhow::Result; + use rusqlite::{params, Connection, OptionalExtension}; use serde::{Deserialize, Serialize}; @@ -8,6 +9,7 @@ use std::{ os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, + time::{SystemTime, UNIX_EPOCH}, }; use crate::pane::SerializedDockPane; @@ -20,7 +22,7 @@ use super::Db; pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + last_opened_timestamp INTEGER NOT NULL ) STRICT; CREATE TABLE worktree_roots( @@ -77,12 +79,18 @@ impl Db { P: AsRef + Debug, { let tx = connection.transaction()?; - tx.execute("INSERT INTO workspaces DEFAULT VALUES", [])?; + + tx.execute( + "INSERT INTO workspaces(last_opened_timestamp) VALUES (?)", + [current_millis()?], + )?; let id = WorkspaceId(tx.last_insert_rowid()); update_worktree_roots(&tx, &id, worktree_roots)?; + tx.commit()?; + Ok(SerializedWorkspace { workspace_id: id, dock_pane: None, @@ -116,7 +124,7 @@ impl Db { match get_workspace_id(worktree_roots, &lock) { Ok(workspace_id) => workspace_id, Err(err) => { - log::error!("Failed ot get workspace_id: {}", err); + log::error!("Failed to get workspace_id: {}", err); None } } @@ -135,15 +143,26 @@ impl Db { where P: AsRef + Debug, { + fn logic

( + connection: &mut Connection, + workspace_id: &WorkspaceId, + worktree_roots: &[P], + ) -> Result<()> + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + update_worktree_roots(&tx, workspace_id, worktree_roots)?; + tx.commit()?; + Ok(()) + } + self.real().map(|db| { let mut lock = db.connection.lock(); - let tx = lock.transaction(); - - match tx.map(|tx| update_worktree_roots(&tx, workspace_id, worktree_roots)) { + match logic(&mut lock, workspace_id, worktree_roots) { Ok(_) => {} Err(err) => { - dbg!(&err); log::error!( "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", workspace_id, @@ -157,8 +176,9 @@ impl Db { fn last_workspace_id(&self) -> Option { fn logic(connection: &mut Connection) -> Result> { - let mut stmt = connection - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; + let mut stmt = connection.prepare( + "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", + )?; Ok(stmt .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) @@ -189,7 +209,7 @@ impl Db { let tx = connection.transaction()?; let result = { let mut stmt = tx.prepare( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?", )?; let workspace_ids = stmt @@ -234,6 +254,12 @@ impl Db { } } +fn current_millis() -> Result { + // SQLite only supports u64 integers, which means this code will trigger + // undefined behavior in 584 million years. It's probably fine. + Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64) +} + fn update_worktree_roots

( connection: &Connection, workspace_id: &WorkspaceId, @@ -271,8 +297,8 @@ where } connection.execute( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - [workspace_id.0], + "UPDATE workspaces SET last_opened_timestamp = ? WHERE workspace_id = ?", + params![current_millis()?, workspace_id.0], )?; Ok(()) @@ -440,13 +466,17 @@ mod tests { let workspace_1 = db.workspace_for_roots::(&[]); assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - sleep(Duration::from_secs(1)); + // Ensure the timestamps are different + sleep(Duration::from_millis(20)); db.make_new_workspace::(&[]); // Test pulling another value from recent workspaces let workspace_2 = db.workspace_for_roots::(&[]); assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + // Ensure the timestamps are different + sleep(Duration::from_millis(20)); + // Test creating a new workspace that doesn't exist already let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); @@ -470,6 +500,7 @@ mod tests { db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); + db.write_to("test.db").unwrap(); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); @@ -584,9 +615,6 @@ mod tests { db.update_worktrees(workspace_id, entries); } - // Make sure the timestamp updates - sleep(Duration::from_secs(1)); - // Execute the update db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); diff --git a/crates/db/test.db b/crates/db/test.db new file mode 100644 index 0000000000000000000000000000000000000000..b3a78a995a034d7f6dde834e12a47313f8bde62c GIT binary patch literal 57344 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCVBlb2VBljw01%%A!DV1%U|?WI;$vfS zFzCfg@$&y*;AMWyz#qqHx2>T|LixJAG4mqrgcSxT-{xR6#V={6#PPce00D{z*-6t^YADES&7?#%#;coMh66W`Z@+h zDtNm_Drg`(MF-+gsI58*PzyA{4s!Dka`klg1L;E1q^S_(>gF2c>gVhljPO`7ni_vU z1s7Kz*AQ0)XUAY?M;BL!FOdyJ_A!c~aEW3KI5R#oMN@$YLt$P(b|grwSOY|YOu;aO zOH&~@B*@b_#F&>&++C58Au}%}wW7E%C$l6qKC!eUAIy&jI|XD&al9dd&DR{x#V)R_ z%-B{6P8;b(`K1Mr#DT=*MoAq=5=18tXdK~)VUV+^oHC#m(mPQ=twl)>;P6UKi3iCQ zYk){lJW?k$V44C?4yN2};-1R5k^|ULkcf^qMB?x?CUdfji;FWhtAY~*C~Atq1Q$vo z01H8)AH+nCJ*4~!$u-D%7Do!m&df_u2yu-FK`T~}GaRxpC;<$YAef8cfzy=5!7i?@ z&e#em62Sogiy{;bCyK98q#(YB#wXlg?2s}>6P%=wv?DP&Py!lB0uszHdngL$g2bZKypnjZSulfi6m)gL z^_W6tUP@+iVo7STLP6)C(k(ig9kzW*FkYAixl9``}Gw9Ww@=Hqc zbHU*rl$oAU0%o~Z6eQ-Qq^3Z^zbvyjGbtw(7Wat41{8x%`T04iiFve$D0o(A5@ul+ z*VJTeP6P)lC=HYprKZM%l2|b+j};|&QRN^(j3TDs4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVD}A;1Ol z*l7PB6bhqwGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nhHVId=Kq=aUor51 z<$pD7Jvr*J(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7%?Hh$;8aS%Gjbm z<$E?KBaE2?>HoiF;Q!A5cEkk5s7FUbU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmYhX4l?vlyp-Np67=J0r6Qm<3w@&&2-*)c=1ooV`Bkx6u$74S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Ev;Tz{MiQ$q4TLGjXtpaDw~)p!t7>(f$7;B?*lBb2J1- zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhnPhQMh5e}qQ-sJBN$U^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0qLtr!nMpOul_WwszbdP#=Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ON zU^E0qLtuo4z-a$}ghu?Rw?{)@Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qR0xdr z|3_4Gk9u}A1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONV1$MM==}fD^Z!O@7Y^kB)}GXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1J;1C#{{~y7TKkD_-5Eu=C(GVC7fzc2c4S~@R7!85Z Q5Eu=C(GVC7fe{k|0Bft@^Z)<= literal 0 HcmV?d00001 From 7744c9ba45ed45886f760a6862c1a3819b9a2877 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 31 Oct 2022 14:11:55 -0700 Subject: [PATCH 131/240] Abandoning rusqlite, the API is miserable --- crates/db/examples/serialize-pane.rs | 42 +++++++++------- crates/db/src/pane.rs | 71 ++++++++++++++++++++++++--- crates/db/src/workspace.rs | 6 +++ crates/db/test.db | Bin 57344 -> 0 bytes 4 files changed, 94 insertions(+), 25 deletions(-) delete mode 100644 crates/db/test.db diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 59ad60a6f4..e828f007d1 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -19,27 +19,31 @@ fn main() -> anyhow::Result<()> { let workspace_1 = db.workspace_for_roots(&["/tmp"]); let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - dbg!(&workspace_1, &workspace_2, &workspace_3); + + db.save_dock_pane( + workspace_1.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Expanded, + visible: true, + }, + ); + db.save_dock_pane( + workspace_2.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Bottom, + visible: true, + }, + ); + db.save_dock_pane( + workspace_3.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Right, + visible: false, + }, + ); + db.write_to(file).ok(); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_1.workspace_id, - anchor_position: DockAnchor::Expanded, - visible: true, - }); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_2.workspace_id, - anchor_position: DockAnchor::Bottom, - visible: true, - }); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_3.workspace_id, - anchor_position: DockAnchor::Right, - visible: false, - }); - - // db.write_to(file).ok(); - println!("Wrote database!"); Ok(()) diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 51d8e5ad5b..0a1812c60c 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,7 +1,9 @@ + use gpui::Axis; +use rusqlite::{OptionalExtension, Connection}; use serde::{Deserialize, Serialize}; -use serde_rusqlite::to_params_named; +use serde_rusqlite::{from_row, to_params_named}; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -134,6 +136,10 @@ pub struct SerializedPane { children: Vec, } + +//********* CURRENTLY IN USE TYPES: ********* + + #[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum DockAnchor { #[default] @@ -144,11 +150,29 @@ pub enum DockAnchor { #[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct SerializedDockPane { - pub workspace_id: WorkspaceId, pub anchor_position: DockAnchor, pub visible: bool, } +impl SerializedDockPane { + pub fn to_row(&self, workspace: WorkspaceId) -> DockRow { + DockRow { workspace_id: workspace, anchor_position: self.anchor_position, visible: self.visible } + } +} + +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub(crate) struct DockRow { + workspace_id: WorkspaceId, + anchor_position: DockAnchor, + visible: bool, +} + +impl DockRow { + pub fn to_pane(&self) -> SerializedDockPane { + SerializedDockPane { anchor_position: self.anchor_position, visible: self.visible } + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -203,17 +227,52 @@ impl Db { unimplemented!(); } - pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { - None + pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { + + let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?; + + let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); + + let mut dock_panes_iter = stmt.query_and_then([workspace.raw_id()], from_row::)?; + let dock_pane = dock_panes_iter + .next() + .and_then(|dock_row| + dock_row + .ok() + .map(|dock_row| dock_row.to_pane())); + + Ok(dock_pane) + } + + self.real() + .map(|db| { + let lock = db.connection.lock(); + + match logic(&lock, workspace) { + Ok(dock_pane) => dock_pane, + Err(err) => { + log::error!("Failed to get the dock pane: {}", err); + None + }, + } + }) + .unwrap_or(None) + } - pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { - to_params_named(dock_pane) + pub fn save_dock_pane(&self, workspace: WorkspaceId, dock_pane: SerializedDockPane) { + to_params_named(dock_pane.to_row(workspace)) + .map_err(|err| { + log::error!("Failed to parse params for the dock row: {}", err); + err + }) .ok() .zip(self.real()) .map(|(params, db)| { // TODO: overwrite old dock panes if need be let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; + db.connection .lock() .execute(query, params.to_slice().as_slice()) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5d84ecfccb..2dc988a7e3 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -36,6 +36,12 @@ CREATE TABLE worktree_roots( #[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] pub struct WorkspaceId(i64); +impl WorkspaceId { + pub fn raw_id(&self) -> i64 { + self.0 + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, diff --git a/crates/db/test.db b/crates/db/test.db deleted file mode 100644 index b3a78a995a034d7f6dde834e12a47313f8bde62c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57344 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCVBlb2VBljw01%%A!DV1%U|?WI;$vfS zFzCfg@$&y*;AMWyz#qqHx2>T|LixJAG4mqrgcSxT-{xR6#V={6#PPce00D{z*-6t^YADES&7?#%#;coMh66W`Z@+h zDtNm_Drg`(MF-+gsI58*PzyA{4s!Dka`klg1L;E1q^S_(>gF2c>gVhljPO`7ni_vU z1s7Kz*AQ0)XUAY?M;BL!FOdyJ_A!c~aEW3KI5R#oMN@$YLt$P(b|grwSOY|YOu;aO zOH&~@B*@b_#F&>&++C58Au}%}wW7E%C$l6qKC!eUAIy&jI|XD&al9dd&DR{x#V)R_ z%-B{6P8;b(`K1Mr#DT=*MoAq=5=18tXdK~)VUV+^oHC#m(mPQ=twl)>;P6UKi3iCQ zYk){lJW?k$V44C?4yN2};-1R5k^|ULkcf^qMB?x?CUdfji;FWhtAY~*C~Atq1Q$vo z01H8)AH+nCJ*4~!$u-D%7Do!m&df_u2yu-FK`T~}GaRxpC;<$YAef8cfzy=5!7i?@ z&e#em62Sogiy{;bCyK98q#(YB#wXlg?2s}>6P%=wv?DP&Py!lB0uszHdngL$g2bZKypnjZSulfi6m)gL z^_W6tUP@+iVo7STLP6)C(k(ig9kzW*FkYAixl9``}Gw9Ww@=Hqc zbHU*rl$oAU0%o~Z6eQ-Qq^3Z^zbvyjGbtw(7Wat41{8x%`T04iiFve$D0o(A5@ul+ z*VJTeP6P)lC=HYprKZM%l2|b+j};|&QRN^(j3TDs4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVD}A;1Ol z*l7PB6bhqwGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nhHVId=Kq=aUor51 z<$pD7Jvr*J(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R7%?Hh$;8aS%Gjbm z<$E?KBaE2?>HoiF;Q!A5cEkk5s7FUbU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmYhX4l?vlyp-Np67=J0r6Qm<3w@&&2-*)c=1ooV`Bkx6u$74S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!85Z5Ev;Tz{MiQ$q4TLGjXtpaDw~)p!t7>(f$7;B?*lBb2J1- zLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhnPhQMh5e}qQ-sJBN$U^E0qLtr!nMnhmU z1V%$(Gz3ONU^E0qLtr!nMpOul_WwszbdP#=Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ON zU^E0qLtuo4z-a$}ghu?Rw?{)@Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qR0xdr z|3_4Gk9u}A1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONV1$MM==}fD^Z!O@7Y^kB)}GXb6mkz-S1J zhQMeDjE2By2#kinXb6mkz-S1J;1C#{{~y7TKkD_-5Eu=C(GVC7fzc2c4S~@R7!85Z Q5Eu=C(GVC7fe{k|0Bft@^Z)<= From e3fdfe02e575b271204368168137a1526d398e48 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Tue, 1 Nov 2022 13:15:58 -0700 Subject: [PATCH 132/240] WIP switching to sqlez --- Cargo.lock | 712 +++++++++++++++++++------------------ crates/db/Cargo.toml | 6 +- crates/db/src/db.rs | 133 ++----- crates/db/src/workspace.rs | 78 ++-- 4 files changed, 428 insertions(+), 501 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2cc8063ca4..e2165c0941 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.1.0" dependencies = [ "auto_update", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "language", "project", @@ -45,16 +45,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -133,9 +133,12 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +dependencies = [ + "backtrace", +] [[package]] name = "arrayref" @@ -183,9 +186,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -220,15 +223,15 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell", "slab", ] @@ -246,31 +249,32 @@ dependencies = [ [[package]] name = "async-io" -version = "1.9.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e21f3a490c72b3b0cf44962180e60045de2925d8dff97918f7ee43c8f637c7" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", "autocfg 1.1.0", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] @@ -290,26 +294,26 @@ name = "async-pipe" version = "0.1.3" source = "git+https://github.com/zed-industries/async-pipe-rs?rev=82d00a04211cf4e1236029aa03e6b6ce2a74c553#82d00a04211cf4e1236029aa03e6b6ce2a74c553" dependencies = [ - "futures 0.3.24", + "futures 0.3.25", "log", ] [[package]] name = "async-process" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02111fd8655a613c25069ea89fc8d9bb89331fa77486eb3bc059ee757cfa481c" +checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" dependencies = [ "async-io", + "async-lock", "autocfg 1.1.0", "blocking", "cfg-if 1.0.0", "event-listener", "futures-lite", "libc", - "once_cell", "signal-hook", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] @@ -364,9 +368,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", @@ -462,15 +466,15 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", "base64", "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "headers", "http", @@ -485,7 +489,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sha-1 0.10.0", + "sha-1 0.10.1", "sync_wrapper", "tokio", "tokio-tungstenite", @@ -497,12 +501,12 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "http", "http-body", @@ -518,7 +522,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69034b3b0fd97923eee2ce8a47540edb21e07f48f87f67d44bb4271cec622bdb" dependencies = [ "axum", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "http", "mime", @@ -549,15 +553,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bincode" @@ -623,16 +627,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" +checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", ] [[package]] @@ -674,15 +678,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "bytemuck" -version = "1.12.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" +checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f" [[package]] name = "byteorder" @@ -702,15 +706,9 @@ dependencies = [ [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" - -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "call" @@ -720,7 +718,7 @@ dependencies = [ "async-broadcast", "client", "collections", - "futures 0.3.24", + "futures 0.3.25", "gpui", "live_kit_client", "media", @@ -803,9 +801,9 @@ checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" dependencies = [ "jobserver", ] @@ -833,15 +831,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.45", "wasm-bindgen", "winapi 0.3.9", ] @@ -889,9 +887,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.22" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86447ad904c7fb335a790c9d7fe3d0d971dc523b8ccd1561a520de9a85302750" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", @@ -901,7 +899,7 @@ dependencies = [ "once_cell", "strsim 0.10.0", "termcolor", - "textwrap 0.15.1", + "textwrap 0.16.0", ] [[package]] @@ -931,7 +929,7 @@ name = "cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 3.2.22", + "clap 3.2.23", "core-foundation", "core-services", "dirs 3.0.2", @@ -949,7 +947,7 @@ dependencies = [ "async-tungstenite", "collections", "db", - "futures 0.3.24", + "futures 0.3.25", "gpui", "image", "isahc", @@ -965,11 +963,11 @@ dependencies = [ "sum_tree", "tempfile", "thiserror", - "time 0.3.15", + "time 0.3.17", "tiny_http", "url", "util", - "uuid 1.2.1", + "uuid 1.2.2", ] [[package]] @@ -981,9 +979,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] @@ -1038,7 +1036,7 @@ dependencies = [ "axum-extra", "base64", "call", - "clap 3.2.22", + "clap 3.2.23", "client", "collections", "ctor", @@ -1046,7 +1044,7 @@ dependencies = [ "env_logger", "envy", "fs", - "futures 0.3.24", + "futures 0.3.25", "git", "gpui", "hyper", @@ -1071,7 +1069,7 @@ dependencies = [ "sha-1 0.9.8", "sqlx", "theme", - "time 0.3.15", + "time 0.3.17", "tokio", "tokio-tungstenite", "toml", @@ -1095,7 +1093,7 @@ dependencies = [ "clock", "collections", "editor", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "gpui", "log", @@ -1144,11 +1142,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" dependencies = [ - "cache-padded", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1375,7 +1373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1386,30 +1384,30 @@ checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", - "memoffset", + "crossbeam-utils 0.8.14", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1425,9 +1423,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if 1.0.0", ] @@ -1454,9 +1452,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdffe87e1d521a10f9696f833fe502293ea446d7f256c06128293a4119bdf4cb" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn", @@ -1479,9 +1477,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.56+curl-7.83.1" +version = "0.4.59+curl-7.86.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093e169dd4de29e468fa649fbae11cdcd5551c81fe5bf1b0677adad7ef3d26f" +checksum = "6cfce34829f448b08f55b7db6d0009e23e2e86a34e8c2b366269bf5799b4a407" dependencies = [ "cc", "libc", @@ -1495,9 +1493,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f83d0ebf42c6eafb8d7c52f7e5f2d3003b89c7aa4fd2b79229209459a849af8" +checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf" dependencies = [ "cc", "cxxbridge-flags", @@ -1507,9 +1505,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d050484b55975889284352b0ffc2ecbda25c0c55978017c132b29ba0818a86" +checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39" dependencies = [ "cc", "codespan-reporting", @@ -1522,15 +1520,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d2199b00553eda8012dfec8d3b1c75fce747cf27c169a270b3b99e3448ab78" +checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12" [[package]] name = "cxxbridge-macro" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb67a6de1f602736dd7eaead0080cf3435df806c61b24b13328db128c58868f" +checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6" dependencies = [ "proc-macro2", "quote", @@ -1553,14 +1551,13 @@ dependencies = [ "anyhow", "async-trait", "collections", + "env_logger", "gpui", + "indoc", "lazy_static", "log", "parking_lot 0.11.2", - "rusqlite", - "rusqlite_migration", - "serde", - "serde_rusqlite", + "sqlez", "tempdir", ] @@ -1576,12 +1573,13 @@ dependencies = [ [[package]] name = "dhat" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0684eaa19a59be283a6f99369917b679bd4d1d06604b2eb2e2f87b4bbd67668d" +checksum = "4f2aaf837aaf456f6706cb46386ba8dffd4013a757e36f4ea05c20dd46b209a3" dependencies = [ "backtrace", "lazy_static", + "mintex", "parking_lot 0.12.1", "rustc-hash", "serde", @@ -1621,9 +1619,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", "crypto-common", @@ -1740,7 +1738,7 @@ dependencies = [ "ctor", "drag_and_drop", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -1790,9 +1788,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", @@ -1881,12 +1879,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" -[[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" - [[package]] name = "fastrand" version = "1.8.0" @@ -1934,12 +1926,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide 0.5.4", + "miniz_oxide 0.6.2", ] [[package]] @@ -2060,7 +2052,7 @@ dependencies = [ "async-trait", "collections", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "git2", "gpui", "lazy_static", @@ -2137,9 +2129,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2152,9 +2144,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2162,15 +2154,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2179,9 +2171,9 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" dependencies = [ "futures-core", "lock_api", @@ -2190,9 +2182,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-lite" @@ -2211,9 +2203,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2222,21 +2214,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2292,9 +2284,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2330,7 +2322,7 @@ dependencies = [ "async-trait", "clock", "collections", - "futures 0.3.24", + "futures 0.3.25", "git2", "lazy_static", "log", @@ -2408,7 +2400,7 @@ dependencies = [ "etagere", "font-kit", "foreign-types", - "futures 0.3.24", + "futures 0.3.25", "gpui_macros", "image", "itertools", @@ -2434,7 +2426,7 @@ dependencies = [ "smallvec", "smol", "sum_tree", - "time 0.3.15", + "time 0.3.17", "tiny-skia", "tree-sitter", "usvg", @@ -2453,11 +2445,11 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "fnv", "futures-core", "futures-sink", @@ -2505,7 +2497,7 @@ checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "headers-core", "http", "httpdate", @@ -2589,7 +2581,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2598,7 +2590,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "fnv", "itoa", ] @@ -2609,7 +2601,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "http", "pin-project-lite 0.2.9", ] @@ -2640,11 +2632,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-channel", "futures-core", "futures-util", @@ -2680,7 +2672,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "hyper", "native-tls", "tokio", @@ -2689,9 +2681,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.51" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2727,7 +2719,7 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713f1b139373f96a2e0ce3ac931cd01ee973c3c5dd7c40c0c2efe96ad2b6751d" dependencies = [ - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "globset", "lazy_static", "log", @@ -2760,9 +2752,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -2834,9 +2826,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" [[package]] name = "is-terminal" @@ -2858,7 +2850,7 @@ checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" dependencies = [ "async-channel", "castaway", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "curl", "curl-sys", "encoding_rs", @@ -2957,7 +2949,7 @@ checksum = "6204285f77fe7d9784db3fdc449ecce1a0114927a51d5a41c4c7a292011c015f" dependencies = [ "base64", "crypto-common", - "digest 0.10.5", + "digest 0.10.6", "hmac 0.12.1", "serde", "serde_json", @@ -2996,7 +2988,7 @@ dependencies = [ "ctor", "env_logger", "fs", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -3053,9 +3045,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.135" +version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" [[package]] name = "libgit2-sys" @@ -3071,9 +3063,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if 1.0.0", "winapi 0.3.9", @@ -3081,9 +3073,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "libnghttp2-sys" @@ -3097,9 +3089,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -3167,13 +3159,13 @@ dependencies = [ "async-trait", "block", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "cocoa", "collections", "core-foundation", "core-graphics", "foreign-types", - "futures 0.3.24", + "futures 0.3.25", "gpui", "hmac 0.12.1", "jwt", @@ -3197,7 +3189,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "futures 0.3.24", + "futures 0.3.25", "hmac 0.12.1", "jwt", "log", @@ -3239,7 +3231,7 @@ dependencies = [ "collections", "ctor", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "gpui", "log", "lsp-types", @@ -3322,7 +3314,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -3332,7 +3324,7 @@ dependencies = [ "anyhow", "bindgen", "block", - "bytes 1.2.1", + "bytes 1.3.0", "core-foundation", "foreign-types", "metal", @@ -3372,6 +3364,15 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg 1.1.0", +] + [[package]] name = "menu" version = "0.1.0" @@ -3433,6 +3434,25 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "mintex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd7c5ba1c3b5a23418d7bbf98c71c3d4946a0125002129231da8d6b723d559cb" +dependencies = [ + "once_cell", + "sys-info", +] + [[package]] name = "mio" version = "0.6.23" @@ -3454,14 +3474,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -3544,9 +3564,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -3562,9 +3582,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" dependencies = [ "cfg-if 0.1.10", "libc", @@ -3573,14 +3593,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", "cfg-if 1.0.0", "libc", - "memoffset", + "memoffset 0.6.5", ] [[package]] @@ -3685,30 +3705,21 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi 0.1.19", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "nvim-rs" version = "0.5.0" source = "git+https://github.com/KillTheMule/nvim-rs?branch=master#d701c2790dcb2579f8f4d7003ba30e2100a7d25b" dependencies = [ "async-trait", - "futures 0.3.24", + "futures 0.3.25", "log", "parity-tokio-ipc", "rmp", @@ -3759,9 +3770,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "opaque-debug" @@ -3771,9 +3782,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "020433887e44c27ff16365eaa2d380547a94544ad509aff6eb5b6e3e0b27b376" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3803,9 +3814,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.76" +version = "0.9.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5230151e44c0f05157effb743e8d517472843121cf9243e8b81393edb5acd9ce" +checksum = "07d5c8cb6e57b3a3612064d7b18b117912b4ce70955c2504d4b741c9e244b132" dependencies = [ "autocfg 1.1.0", "cc", @@ -3825,9 +3836,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.3.0" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "outline" @@ -3858,7 +3869,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.24", + "futures 0.3.25", "libc", "log", "rand 0.7.3", @@ -3890,7 +3901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.5", ] [[package]] @@ -3909,9 +3920,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if 1.0.0", "libc", @@ -3999,9 +4010,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" dependencies = [ "thiserror", "ucd-trie", @@ -4080,9 +4091,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "plist" @@ -4094,7 +4105,7 @@ dependencies = [ "indexmap", "line-wrap", "serde", - "time 0.3.15", + "time 0.3.17", "xml-rs", ] @@ -4147,16 +4158,16 @@ dependencies = [ [[package]] name = "polling" -version = "2.3.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899b00b9c8ab553c743b3e11e87c5c7d423b2a2de229ba95b24a756344748011" +checksum = "166ca89eb77fd403230b9c156612965a81e094ec6ec3aa13663d4c8b113fa748" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", "libc", "log", "wepoll-ffi", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] @@ -4173,7 +4184,7 @@ checksum = "a63d25391d04a097954b76aba742b6b5b74f213dfe3dbaeeb36e8ddc1c657f0b" dependencies = [ "atomic", "crossbeam-queue", - "futures 0.3.24", + "futures 0.3.25", "log", "pin-project", "pollster", @@ -4183,9 +4194,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -4244,7 +4255,7 @@ dependencies = [ "db", "fs", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -4281,7 +4292,7 @@ dependencies = [ "context_menu", "drag_and_drop", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "menu", "postage", @@ -4300,7 +4311,7 @@ version = "0.1.0" dependencies = [ "anyhow", "editor", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "gpui", "language", @@ -4318,9 +4329,9 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c8babc29389186697fe5a2a4859d697825496b83db5d0b65271cdc0488e88c" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -4337,7 +4348,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost-derive 0.8.0", ] @@ -4347,7 +4358,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost-derive 0.9.0", ] @@ -4357,7 +4368,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "heck 0.3.3", "itertools", "lazy_static", @@ -4403,7 +4414,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost 0.8.0", ] @@ -4413,7 +4424,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost 0.9.0", ] @@ -4539,7 +4550,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -4553,11 +4564,10 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b" dependencies = [ - "autocfg 1.1.0", "crossbeam-deque", "either", "rayon-core", @@ -4565,13 +4575,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel 0.5.6", "crossbeam-deque", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "num_cpus", ] @@ -4605,7 +4615,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "redox_syscall", "thiserror", ] @@ -4624,9 +4634,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -4644,9 +4654,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "region" @@ -4671,12 +4681,12 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" dependencies = [ "base64", - "bytes 1.2.1", + "bytes 1.3.0", "encoding_rs", "futures-core", "futures-util", @@ -4802,7 +4812,7 @@ dependencies = [ "collections", "ctor", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "gpui", "parking_lot 0.11.2", "prost 0.8.0", @@ -4838,35 +4848,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rusqlite" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" -dependencies = [ - "bitflags", - "fallible-iterator", - "fallible-streaming-iterator", - "hashlink", - "libsqlite3-sys", - "serde_json", - "smallvec", -] - -[[package]] -name = "rusqlite_migration" -version = "1.0.0" -source = "git+https://github.com/cljoly/rusqlite_migration?rev=c433555d7c1b41b103426e35756eb3144d0ebbc6#c433555d7c1b41b103426e35756eb3144d0ebbc6" -dependencies = [ - "log", - "rusqlite", -] - [[package]] name = "rust-embed" -version = "6.4.1" +version = "6.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26934cd67a1da1165efe61cba4047cc1b4a526019da609fcce13a1000afb5fa" +checksum = "283ffe2f866869428c92e0d61c2f35dfb4355293cdfdc48f49e895c15f1333d1" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -4875,9 +4861,9 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "6.3.0" +version = "6.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35d7b402e273544cc08e0824aa3404333fab8a90ac43589d3d5b72f4b346e12" +checksum = "31ab23d42d71fb9be1b643fe6765d292c5e14d46912d13f3ae2815ca048ea04d" dependencies = [ "proc-macro2", "quote", @@ -5187,18 +5173,18 @@ checksum = "5a9f47faea3cad316faa914d013d24f471cd90bfca1a0c70f05a3f42c6441e99" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" dependencies = [ "proc-macro2", "quote", @@ -5227,9 +5213,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "indexmap", "itoa", @@ -5257,16 +5243,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_rusqlite" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538b51f10ee271375cbd9caa04fa6e3e50af431a21db97caae48da92a074244a" -dependencies = [ - "rusqlite", - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5320,7 +5296,7 @@ dependencies = [ "assets", "collections", "fs", - "futures 0.3.24", + "futures 0.3.25", "gpui", "json_comments", "postage", @@ -5351,13 +5327,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5368,7 +5344,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5392,7 +5368,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5528,9 +5504,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "smol" -version = "1.2.5" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cf3b5351f3e783c1d79ab5fc604eeed8b8ae9abd36b166e8b87a089efd85e4" +checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" dependencies = [ "async-channel", "async-executor", @@ -5541,7 +5517,6 @@ dependencies = [ "async-process", "blocking", "futures-lite", - "once_cell", ] [[package]] @@ -5593,6 +5568,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be6c3f39c37a4283ee4b43d1311c828f2e1fb0541e76ea0cb1a2abd9ef2f5b3b" +[[package]] +name = "sqlez" +version = "0.1.0" +source = "git+https://github.com/Kethku/sqlez#10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f" +dependencies = [ + "anyhow", + "indoc", + "libsqlite3-sys", + "thread_local", +] + [[package]] name = "sqlformat" version = "0.2.0" @@ -5623,7 +5609,7 @@ dependencies = [ "base64", "bitflags", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "crc", "crossbeam-queue", "dirs 4.0.0", @@ -5662,10 +5648,10 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", - "time 0.3.15", + "time 0.3.17", "tokio-stream", "url", - "uuid 1.2.1", + "uuid 1.2.2", "webpki-roots 0.22.5", "whoami", ] @@ -5787,9 +5773,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.102" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" dependencies = [ "proc-macro2", "quote", @@ -5814,6 +5800,16 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sys-info" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "system-interface" version = "0.20.0" @@ -5832,9 +5828,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1" +checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" [[package]] name = "tempdir" @@ -5879,7 +5875,7 @@ dependencies = [ "context_menu", "dirs 4.0.0", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "itertools", "language", @@ -5936,9 +5932,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "theme" @@ -6031,9 +6027,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -6042,22 +6038,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", - "libc", - "num_threads", "serde", + "time-core", "time-macros", ] [[package]] -name = "time-macros" -version = "0.2.4" +name = "time-core" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] [[package]] name = "tiny-skia" @@ -6103,15 +6107,15 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg 1.1.0", - "bytes 1.2.1", + "bytes 1.3.0", "libc", "memchr", - "mio 0.8.4", + "mio 0.8.5", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", @@ -6144,9 +6148,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -6203,7 +6207,7 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-sink", "log", @@ -6217,7 +6221,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-io", "futures-sink", @@ -6244,7 +6248,7 @@ dependencies = [ "async-stream", "async-trait", "base64", - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-util", "h2", @@ -6288,12 +6292,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-util", "http", @@ -6592,7 +6596,7 @@ checksum = "6ad3713a14ae247f22a728a0456a545df14acf3867f905adff84be99e23b3ad1" dependencies = [ "base64", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "http", "httparse", "log", @@ -6611,12 +6615,12 @@ checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "http", "httparse", "log", "rand 0.8.5", - "sha-1 0.10.0", + "sha-1 0.10.1", "thiserror", "url", "utf-8", @@ -6787,7 +6791,7 @@ version = "0.1.0" dependencies = [ "anyhow", "backtrace", - "futures 0.3.24", + "futures 0.3.25", "git2", "lazy_static", "log", @@ -6802,16 +6806,16 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] name = "uuid" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -7055,9 +7059,9 @@ checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-encoder" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64ac98d5d61192cc45c701b7e4bd0b9aff91e2edfc7a088406cfe2288581e2c" +checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" dependencies = [ "leb128", ] @@ -7231,7 +7235,7 @@ dependencies = [ "log", "mach", "memfd", - "memoffset", + "memoffset 0.6.5", "more-asserts", "rand 0.8.5", "region", @@ -7279,9 +7283,9 @@ dependencies = [ [[package]] name = "wast" -version = "47.0.1" +version = "50.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b98502f3978adea49551e801a6687678e6015317d7d9470a67fe813393f2a8" +checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" dependencies = [ "leb128", "memchr", @@ -7291,11 +7295,11 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.49" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aab4e20c60429fbba9670a6cae0fff9520046ba0aa3e6d0b1cd2653bea14898" +checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" dependencies = [ - "wast 47.0.1", + "wast 50.0.0", ] [[package]] @@ -7621,7 +7625,7 @@ dependencies = [ "db", "drag_and_drop", "fs", - "futures 0.3.24", + "futures 0.3.25", "gpui", "language", "log", @@ -7705,7 +7709,7 @@ dependencies = [ "file_finder", "fs", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "go_to_line", "gpui", @@ -7783,9 +7787,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -7814,9 +7818,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.4+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" dependencies = [ "cc", "libc", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index a2ac2a9fc5..5530caaa81 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -11,6 +11,7 @@ doctest = false test-support = [] [dependencies] +indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } anyhow = "1.0.57" @@ -18,10 +19,7 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } -rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } -serde = { workspace = true } -serde_rusqlite = "0.31.0" +sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 107bbffdf4..e5740c5edb 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,26 +5,25 @@ pub mod pane; pub mod workspace; use std::fs; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::ops::Deref; +use std::path::Path; use anyhow::Result; -use log::error; -use parking_lot::Mutex; -use rusqlite::{backup, Connection}; +use indoc::indoc; +use sqlez::connection::Connection; +use sqlez::thread_safe_connection::ThreadSafeConnection; -use migrations::MIGRATIONS; pub use workspace::*; #[derive(Clone)] -pub enum Db { - Real(Arc), - Null, -} +struct Db(ThreadSafeConnection); -pub struct RealDb { - connection: Mutex, - path: Option, +impl Deref for Db { + type Target = sqlez::connection::Connection; + + fn deref(&self) -> &Self::Target { + &self.0.deref() + } } impl Db { @@ -36,104 +35,44 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Connection::open(db_path) - .map_err(Into::into) - .and_then(|connection| Self::initialize(connection)) - .map(|connection| { - Db::Real(Arc::new(RealDb { - connection, - path: Some(db_dir.to_path_buf()), - })) - }) - .unwrap_or_else(|e| { - error!( - "Connecting to file backed db failed. Reverting to null db. {}", - e - ); - Self::Null - }) - } - - fn initialize(mut conn: Connection) -> Result> { - MIGRATIONS.to_latest(&mut conn)?; - - conn.pragma_update(None, "journal_mode", "WAL")?; - conn.pragma_update(None, "synchronous", "NORMAL")?; - conn.pragma_update(None, "foreign_keys", true)?; - conn.pragma_update(None, "case_sensitive_like", true)?; - - Ok(Mutex::new(conn)) + Db( + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}), + ) } pub fn persisting(&self) -> bool { - self.real().and_then(|db| db.path.as_ref()).is_some() - } - - pub fn real(&self) -> Option<&RealDb> { - match self { - Db::Real(db) => Some(&db), - _ => None, - } + self.persistent() } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory() -> Self { - Connection::open_in_memory() - .map_err(Into::into) - .and_then(|connection| Self::initialize(connection)) - .map(|connection| { - Db::Real(Arc::new(RealDb { - connection, - path: None, - })) - }) - .unwrap_or_else(|e| { - error!( - "Connecting to in memory db failed. Reverting to null db. {}", - e - ); - Self::Null - }) + Db( + ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}), + ) } pub fn write_to>(&self, dest: P) -> Result<()> { - self.real() - .map(|db| { - if db.path.is_some() { - panic!("DB already exists"); - } - - let lock = db.connection.lock(); - let mut dst = Connection::open(dest)?; - let backup = backup::Backup::new(&lock, &mut dst)?; - backup.step(-1)?; - - Ok(()) - }) - .unwrap_or(Ok(())) + let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); + self.backup(&destination) } } impl Drop for Db { fn drop(&mut self) { - match self { - Db::Real(real_db) => { - let lock = real_db.connection.lock(); - - let _ = lock.pragma_update(None, "analysis_limit", "500"); - let _ = lock.pragma_update(None, "optimize", ""); - } - Db::Null => {} - } - } -} - -#[cfg(test)] -mod tests { - use crate::migrations::MIGRATIONS; - - #[test] - fn test_migrations() { - assert!(MIGRATIONS.validate().is_ok()); + self.exec(indoc! {" + PRAGMA analysis_limit=500; + PRAGMA optimize"}) + .ok(); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 2dc988a7e3..5237caa23c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,8 +1,5 @@ use anyhow::Result; -use rusqlite::{params, Connection, OptionalExtension}; -use serde::{Deserialize, Serialize}; - use std::{ ffi::OsStr, fmt::Debug, @@ -12,28 +9,34 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +use anyhow::Result; +use indoc::indoc; +use sqlez::{connection::Connection, migrations::Migration}; + use crate::pane::SerializedDockPane; use super::Db; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented -// out -pub(crate) const WORKSPACE_M_1: &str = " -CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - last_opened_timestamp INTEGER NOT NULL -) STRICT; +// out. This will panic if run on an existing db that has already been migrated +const WORKSPACES_MIGRATION: Migration = Migration::new( + "migrations", + &[indoc! {" + CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + PRIMARY KEY(worktree_root, workspace_id) + ) STRICT;"}], +); -CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) -) STRICT; -"; - -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); impl WorkspaceId { @@ -77,19 +80,9 @@ impl Db { where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - worktree_roots: &[P], - ) -> Result - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - - tx.execute( - "INSERT INTO workspaces(last_opened_timestamp) VALUES (?)", - [current_millis()?], - )?; + let result = (|| { + let tx = self.transaction()?; + tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?; let id = WorkspaceId(tx.last_insert_rowid()); @@ -101,22 +94,15 @@ impl Db { workspace_id: id, dock_pane: None, }) + })(); + + match result { + Ok(serialized_workspace) => serialized_workspace, + Err(err) => { + log::error!("Failed to insert new workspace into DB: {}", err); + Default::default() + } } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - // No need to waste the memory caching this, should happen rarely. - match logic(&mut lock, worktree_roots) { - Ok(serialized_workspace) => serialized_workspace, - Err(err) => { - log::error!("Failed to insert new workspace into DB: {}", err); - Default::default() - } - } - }) - .unwrap_or_default() } fn workspace_id

(&self, worktree_roots: &[P]) -> Option From a4a1859dfca1eadbfe1f95e44917f9958fbd6f3e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 13:31:03 -0700 Subject: [PATCH 133/240] Added sqlez api --- Cargo.lock | 1 - crates/db/Cargo.toml | 3 ++- crates/sqlez | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) create mode 160000 crates/sqlez diff --git a/Cargo.lock b/Cargo.lock index e2165c0941..2fb859dca5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5571,7 +5571,6 @@ checksum = "be6c3f39c37a4283ee4b43d1311c828f2e1fb0541e76ea0cb1a2abd9ef2f5b3b" [[package]] name = "sqlez" version = "0.1.0" -source = "git+https://github.com/Kethku/sqlez#10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f" dependencies = [ "anyhow", "indoc", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 5530caaa81..fe0b21eaf4 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -14,12 +14,13 @@ test-support = [] indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } +sqlez = { path = "../sqlez" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" } + [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/sqlez b/crates/sqlez new file mode 160000 index 0000000000..10a78dbe53 --- /dev/null +++ b/crates/sqlez @@ -0,0 +1 @@ +Subproject commit 10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f From 395070cb921eef4b813d775f6af49de65db6f544 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 13:32:46 -0700 Subject: [PATCH 134/240] remove submodule --- crates/sqlez | 1 - crates/sqlez/.gitignore | 2 + crates/sqlez/Cargo.lock | 150 +++++++++ crates/sqlez/Cargo.toml | 12 + crates/sqlez/src/bindable.rs | 209 +++++++++++++ crates/sqlez/src/connection.rs | 220 +++++++++++++ crates/sqlez/src/lib.rs | 6 + crates/sqlez/src/migrations.rs | 261 ++++++++++++++++ crates/sqlez/src/savepoint.rs | 110 +++++++ crates/sqlez/src/statement.rs | 342 +++++++++++++++++++++ crates/sqlez/src/thread_safe_connection.rs | 78 +++++ 11 files changed, 1390 insertions(+), 1 deletion(-) delete mode 160000 crates/sqlez create mode 100644 crates/sqlez/.gitignore create mode 100644 crates/sqlez/Cargo.lock create mode 100644 crates/sqlez/Cargo.toml create mode 100644 crates/sqlez/src/bindable.rs create mode 100644 crates/sqlez/src/connection.rs create mode 100644 crates/sqlez/src/lib.rs create mode 100644 crates/sqlez/src/migrations.rs create mode 100644 crates/sqlez/src/savepoint.rs create mode 100644 crates/sqlez/src/statement.rs create mode 100644 crates/sqlez/src/thread_safe_connection.rs diff --git a/crates/sqlez b/crates/sqlez deleted file mode 160000 index 10a78dbe53..0000000000 --- a/crates/sqlez +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f diff --git a/crates/sqlez/.gitignore b/crates/sqlez/.gitignore new file mode 100644 index 0000000000..8130c3ab47 --- /dev/null +++ b/crates/sqlez/.gitignore @@ -0,0 +1,2 @@ +debug/ +target/ diff --git a/crates/sqlez/Cargo.lock b/crates/sqlez/Cargo.lock new file mode 100644 index 0000000000..33348baed9 --- /dev/null +++ b/crates/sqlez/Cargo.lock @@ -0,0 +1,150 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anyhow" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +dependencies = [ + "backtrace", +] + +[[package]] +name = "backtrace" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "cc" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "gimli" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" + +[[package]] +name = "indoc" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" + +[[package]] +name = "libc" +version = "0.2.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "miniz_oxide" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +dependencies = [ + "adler", +] + +[[package]] +name = "object" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "sqlez" +version = "0.1.0" +dependencies = [ + "anyhow", + "indoc", + "libsqlite3-sys", + "thread_local", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml new file mode 100644 index 0000000000..cbb4504a04 --- /dev/null +++ b/crates/sqlez/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sqlez" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { version = "1.0.38", features = ["backtrace"] } +indoc = "1.0.7" +libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } +thread_local = "1.1.4" \ No newline at end of file diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs new file mode 100644 index 0000000000..ca3ba401cf --- /dev/null +++ b/crates/sqlez/src/bindable.rs @@ -0,0 +1,209 @@ +use anyhow::Result; + +use crate::statement::{SqlType, Statement}; + +pub trait Bind { + fn bind(&self, statement: &Statement, start_index: i32) -> Result; +} + +pub trait Column: Sized { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)>; +} + +impl Bind for &[u8] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Bind for Vec { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Column for Vec { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_blob(start_index)?; + Ok((Vec::from(result), start_index + 1)) + } +} + +impl Bind for f64 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_double(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for f64 { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_double(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for i32 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_int(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for i32 { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for i64 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_int64(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for i64 { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int64(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for usize { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + (*self as i64).bind(statement, start_index) + } +} + +impl Column for usize { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int64(start_index)?; + Ok((result as usize, start_index + 1)) + } +} + +impl Bind for () { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_null(start_index)?; + Ok(start_index + 1) + } +} + +impl Bind for &str { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Bind for String { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Column for String { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_text(start_index)?; + Ok((result.to_owned(), start_index + 1)) + } +} + +impl Bind for (T1, T2) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + self.1.bind(statement, next_index) + } +} + +impl Column for (T1, T2) { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + Ok(((first, second), next_index)) + } +} + +impl Bind for (T1, T2, T3) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + self.2.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + Ok(((first, second, third), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + self.3.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (forth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, forth), next_index)) + } +} + +impl Bind for Option { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + if let Some(this) = self { + this.bind(statement, start_index) + } else { + statement.bind_null(start_index)?; + Ok(start_index + 1) + } + } +} + +impl Column for Option { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + if let SqlType::Null = statement.column_type(start_index)? { + Ok((None, start_index + 1)) + } else { + T::column(statement, start_index).map(|(result, next_index)| (Some(result), next_index)) + } + } +} + +impl Bind for [T; COUNT] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in self { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} + +impl Column for [T; COUNT] { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let mut array = [Default::default(); COUNT]; + let mut current_index = start_index; + for i in 0..COUNT { + (array[i], current_index) = T::column(statement, current_index)?; + } + Ok((array, current_index)) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs new file mode 100644 index 0000000000..81bb9dfe78 --- /dev/null +++ b/crates/sqlez/src/connection.rs @@ -0,0 +1,220 @@ +use std::{ + ffi::{CStr, CString}, + marker::PhantomData, +}; + +use anyhow::{anyhow, Result}; +use libsqlite3_sys::*; + +use crate::statement::Statement; + +pub struct Connection { + pub(crate) sqlite3: *mut sqlite3, + persistent: bool, + phantom: PhantomData, +} +unsafe impl Send for Connection {} + +impl Connection { + fn open(uri: &str, persistent: bool) -> Result { + let mut connection = Self { + sqlite3: 0 as *mut _, + persistent, + phantom: PhantomData, + }; + + let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; + unsafe { + sqlite3_open_v2( + CString::new(uri)?.as_ptr(), + &mut connection.sqlite3, + flags, + 0 as *const _, + ); + + connection.last_error()?; + } + + Ok(connection) + } + + /// Attempts to open the database at uri. If it fails, a shared memory db will be opened + /// instead. + pub fn open_file(uri: &str) -> Self { + Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(uri)) + } + + pub fn open_memory(uri: &str) -> Self { + let in_memory_path = format!("file:{}?mode=memory&cache=shared", uri); + Self::open(&in_memory_path, false).expect("Could not create fallback in memory db") + } + + pub fn persistent(&self) -> bool { + self.persistent + } + + pub fn exec(&self, query: impl AsRef) -> Result<()> { + unsafe { + sqlite3_exec( + self.sqlite3, + CString::new(query.as_ref())?.as_ptr(), + None, + 0 as *mut _, + 0 as *mut _, + ); + self.last_error()?; + } + Ok(()) + } + + pub fn prepare>(&self, query: T) -> Result { + Statement::prepare(&self, query) + } + + pub fn backup_main(&self, destination: &Connection) -> Result<()> { + unsafe { + let backup = sqlite3_backup_init( + destination.sqlite3, + CString::new("main")?.as_ptr(), + self.sqlite3, + CString::new("main")?.as_ptr(), + ); + sqlite3_backup_step(backup, -1); + sqlite3_backup_finish(backup); + destination.last_error() + } + } + + pub(crate) fn last_error(&self) -> Result<()> { + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + unsafe { + let code = sqlite3_errcode(self.sqlite3); + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errmsg(self.sqlite3); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } + } +} + +impl Drop for Connection { + fn drop(&mut self) { + unsafe { sqlite3_close(self.sqlite3) }; + } +} + +#[cfg(test)] +mod test { + use anyhow::Result; + use indoc::indoc; + + use crate::connection::Connection; + + #[test] + fn string_round_trips() -> Result<()> { + let connection = Connection::open_memory("string_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE text ( + text TEXT + );"}) + .unwrap(); + + let text = "Some test text"; + + connection + .prepare("INSERT INTO text (text) VALUES (?);") + .unwrap() + .bound(text) + .unwrap() + .run() + .unwrap(); + + assert_eq!( + &connection + .prepare("SELECT text FROM text;") + .unwrap() + .row::() + .unwrap(), + text + ); + + Ok(()) + } + + #[test] + fn tuple_round_trips() { + let connection = Connection::open_memory("tuple_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE test ( + text TEXT, + integer INTEGER, + blob BLOB + );"}) + .unwrap(); + + let tuple1 = ("test".to_string(), 64, vec![0, 1, 2, 4, 8, 16, 32, 64]); + let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); + + let mut insert = connection + .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") + .unwrap(); + + insert.bound(tuple1.clone()).unwrap().run().unwrap(); + insert.bound(tuple2.clone()).unwrap().run().unwrap(); + + assert_eq!( + connection + .prepare("SELECT * FROM test") + .unwrap() + .rows::<(String, usize, Vec)>() + .unwrap(), + vec![tuple1, tuple2] + ); + } + + #[test] + fn backup_works() { + let connection1 = Connection::open_memory("backup_works"); + connection1 + .exec(indoc! {" + CREATE TABLE blobs ( + data BLOB + );"}) + .unwrap(); + let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; + let mut write = connection1 + .prepare("INSERT INTO blobs (data) VALUES (?);") + .unwrap(); + write.bind_blob(1, blob).unwrap(); + write.run().unwrap(); + + // Backup connection1 to connection2 + let connection2 = Connection::open_memory("backup_works_other"); + connection1.backup_main(&connection2).unwrap(); + + // Delete the added blob and verify its deleted on the other side + let read_blobs = connection1 + .prepare("SELECT * FROM blobs;") + .unwrap() + .rows::>() + .unwrap(); + assert_eq!(read_blobs, vec![blob]); + } +} diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs new file mode 100644 index 0000000000..3bed7a06cb --- /dev/null +++ b/crates/sqlez/src/lib.rs @@ -0,0 +1,6 @@ +pub mod bindable; +pub mod connection; +pub mod migrations; +pub mod savepoint; +pub mod statement; +pub mod thread_safe_connection; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs new file mode 100644 index 0000000000..4721b353c6 --- /dev/null +++ b/crates/sqlez/src/migrations.rs @@ -0,0 +1,261 @@ +// Migrations are constructed by domain, and stored in a table in the connection db with domain name, +// effected tables, actual query text, and order. +// If a migration is run and any of the query texts don't match, the app panics on startup (maybe fallback +// to creating a new db?) +// Otherwise any missing migrations are run on the connection + +use anyhow::{anyhow, Result}; +use indoc::{formatdoc, indoc}; + +use crate::connection::Connection; + +const MIGRATIONS_MIGRATION: Migration = Migration::new( + "migrations", + // The migrations migration must be infallable because it runs to completion + // with every call to migration run and is run unchecked. + &[indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + ); + "}], +); + +pub struct Migration { + domain: &'static str, + migrations: &'static [&'static str], +} + +impl Migration { + pub const fn new(domain: &'static str, migrations: &'static [&'static str]) -> Self { + Self { domain, migrations } + } + + fn run_unchecked(&self, connection: &Connection) -> Result<()> { + connection.exec(self.migrations.join(";\n")) + } + + pub fn run(&self, connection: &Connection) -> Result<()> { + // Setup the migrations table unconditionally + MIGRATIONS_MIGRATION.run_unchecked(connection)?; + + let completed_migrations = connection + .prepare(indoc! {" + SELECT domain, step, migration FROM migrations + WHERE domain = ? + ORDER BY step + "})? + .bound(self.domain)? + .rows::<(String, usize, String)>()?; + + let mut store_completed_migration = connection + .prepare("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + + for (index, migration) in self.migrations.iter().enumerate() { + if let Some((_, _, completed_migration)) = completed_migrations.get(index) { + if completed_migration != migration { + return Err(anyhow!(formatdoc! {" + Migration changed for {} at step {} + + Stored migration: + {} + + Proposed migration: + {}", self.domain, index, completed_migration, migration})); + } else { + // Migration already run. Continue + continue; + } + } + + connection.exec(migration)?; + store_completed_migration + .bound((self.domain, index, *migration))? + .run()?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use indoc::indoc; + + use crate::{connection::Connection, migrations::Migration}; + + #[test] + fn test_migrations_are_added_to_table() { + let connection = Connection::open_memory("migrations_are_added_to_table"); + + // Create first migration with a single step and run it + let mut migration = Migration::new( + "test", + &[indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + );"}], + ); + migration.run(&connection).unwrap(); + + // Verify it got added to the migrations table + assert_eq!( + &connection + .prepare("SELECT (migration) FROM migrations") + .unwrap() + .rows::() + .unwrap()[..], + migration.migrations + ); + + // Add another step to the migration and run it again + migration.migrations = &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + );"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + );"}, + ]; + migration.run(&connection).unwrap(); + + // Verify it is also added to the migrations table + assert_eq!( + &connection + .prepare("SELECT (migration) FROM migrations") + .unwrap() + .rows::() + .unwrap()[..], + migration.migrations + ); + } + + #[test] + fn test_migration_setup_works() { + let connection = Connection::open_memory("migration_setup_works"); + + connection + .exec(indoc! {"CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + );"}) + .unwrap(); + + let mut store_completed_migration = connection + .prepare(indoc! {" + INSERT INTO migrations (domain, step, migration) + VALUES (?, ?, ?)"}) + .unwrap(); + + let domain = "test_domain"; + for i in 0..5 { + // Create a table forcing a schema change + connection + .exec(format!("CREATE TABLE table{} ( test TEXT );", i)) + .unwrap(); + + store_completed_migration + .bound((domain, i, i.to_string())) + .unwrap() + .run() + .unwrap(); + } + } + + #[test] + fn migrations_dont_rerun() { + let connection = Connection::open_memory("migrations_dont_rerun"); + + // Create migration which clears a table + let migration = Migration::new("test", &["DELETE FROM test_table"]); + + // Manually create the table for that migration with a row + connection + .exec(indoc! {" + CREATE TABLE test_table ( + test_column INTEGER + ); + INSERT INTO test_table (test_column) VALUES (1)"}) + .unwrap(); + + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .row::() + .unwrap(), + 1 + ); + + // Run the migration verifying that the row got dropped + migration.run(&connection).unwrap(); + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .rows::() + .unwrap(), + Vec::new() + ); + + // Recreate the dropped row + connection + .exec("INSERT INTO test_table (test_column) VALUES (2)") + .unwrap(); + + // Run the same migration again and verify that the table was left unchanged + migration.run(&connection).unwrap(); + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .row::() + .unwrap(), + 2 + ); + } + + #[test] + fn changed_migration_fails() { + let connection = Connection::open_memory("changed_migration_fails"); + + // Create a migration with two steps and run it + Migration::new( + "test migration", + &[ + indoc! {" + CREATE TABLE test ( + col INTEGER + )"}, + indoc! {" + INSERT INTO test (col) VALUES (1)"}, + ], + ) + .run(&connection) + .unwrap(); + + // Create another migration with the same domain but different steps + let second_migration_result = Migration::new( + "test migration", + &[ + indoc! {" + CREATE TABLE test ( + color INTEGER + )"}, + indoc! {" + INSERT INTO test (color) VALUES (1)"}, + ], + ) + .run(&connection); + + // Verify new migration returns error when run + assert!(second_migration_result.is_err()) + } +} diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs new file mode 100644 index 0000000000..749c0dc948 --- /dev/null +++ b/crates/sqlez/src/savepoint.rs @@ -0,0 +1,110 @@ +use anyhow::Result; + +use crate::connection::Connection; + +impl Connection { + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback + // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save + // point is released. + pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result> + where + F: FnOnce(&mut Connection) -> Result>, + { + let name = name.as_ref().to_owned(); + self.exec(format!("SAVEPOINT {}", &name))?; + let result = f(self); + match result { + Ok(Some(_)) => { + self.exec(format!("RELEASE {}", name))?; + } + Ok(None) | Err(_) => { + self.exec(format!("ROLLBACK TO {}", name))?; + self.exec(format!("RELEASE {}", name))?; + } + } + result + } +} + +#[cfg(test)] +mod tests { + use crate::connection::Connection; + use anyhow::Result; + use indoc::indoc; + + #[test] + fn test_nested_savepoints() -> Result<()> { + let mut connection = Connection::open_memory("nested_savepoints"); + + connection + .exec(indoc! {" + CREATE TABLE text ( + text TEXT, + idx INTEGER + );"}) + .unwrap(); + + let save1_text = "test save1"; + let save2_text = "test save2"; + + connection.with_savepoint("first", |save1| { + save1 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save1_text, 1))? + .run()?; + + assert!(save1 + .with_savepoint("second", |save2| -> Result, anyhow::Error> { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save2_text, 2))? + .run()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + anyhow::bail!("Failed second save point :(") + }) + .err() + .is_some()); + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text], + ); + + save1.with_savepoint("second", |save2| { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save2_text, 2))? + .run()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(Some(())) + })?; + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(Some(())) + })?; + + Ok(()) + } +} diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs new file mode 100644 index 0000000000..774cda0e34 --- /dev/null +++ b/crates/sqlez/src/statement.rs @@ -0,0 +1,342 @@ +use std::ffi::{c_int, CString}; +use std::marker::PhantomData; +use std::{slice, str}; + +use anyhow::{anyhow, Context, Result}; +use libsqlite3_sys::*; + +use crate::bindable::{Bind, Column}; +use crate::connection::Connection; + +pub struct Statement<'a> { + raw_statement: *mut sqlite3_stmt, + connection: &'a Connection, + phantom: PhantomData, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum StepResult { + Row, + Done, + Misuse, + Other(i32), +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SqlType { + Text, + Integer, + Blob, + Float, + Null, +} + +impl<'a> Statement<'a> { + pub fn prepare>(connection: &'a Connection, query: T) -> Result { + let mut statement = Self { + raw_statement: 0 as *mut _, + connection, + phantom: PhantomData, + }; + + unsafe { + sqlite3_prepare_v2( + connection.sqlite3, + CString::new(query.as_ref())?.as_ptr(), + -1, + &mut statement.raw_statement, + 0 as *mut _, + ); + + connection.last_error().context("Prepare call failed.")?; + } + + Ok(statement) + } + + pub fn reset(&mut self) { + unsafe { + sqlite3_reset(self.raw_statement); + } + } + + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { + let index = index as c_int; + let blob_pointer = blob.as_ptr() as *const _; + let len = blob.len() as c_int; + unsafe { + sqlite3_bind_blob( + self.raw_statement, + index, + blob_pointer, + len, + SQLITE_TRANSIENT(), + ); + } + self.connection.last_error() + } + + pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { + let index = index as c_int; + let pointer = unsafe { sqlite3_column_blob(self.raw_statement, index) }; + + self.connection.last_error()?; + if pointer.is_null() { + return Ok(&[]); + } + let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + self.connection.last_error()?; + unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } + } + + pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { + let index = index as c_int; + + unsafe { + sqlite3_bind_double(self.raw_statement, index, double); + } + self.connection.last_error() + } + + pub fn column_double(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_double(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { + let index = index as c_int; + + unsafe { + sqlite3_bind_int(self.raw_statement, index, int); + } + self.connection.last_error() + } + + pub fn column_int(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_int(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { + let index = index as c_int; + unsafe { + sqlite3_bind_int64(self.raw_statement, index, int); + } + self.connection.last_error() + } + + pub fn column_int64(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_int64(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_null(&self, index: i32) -> Result<()> { + let index = index as c_int; + unsafe { + sqlite3_bind_null(self.raw_statement, index); + } + self.connection.last_error() + } + + pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { + let index = index as c_int; + let text_pointer = text.as_ptr() as *const _; + let len = text.len() as c_int; + unsafe { + sqlite3_bind_blob( + self.raw_statement, + index, + text_pointer, + len, + SQLITE_TRANSIENT(), + ); + } + self.connection.last_error() + } + + pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { + let index = index as c_int; + let pointer = unsafe { sqlite3_column_text(self.raw_statement, index) }; + + self.connection.last_error()?; + if pointer.is_null() { + return Ok(""); + } + let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + self.connection.last_error()?; + + let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; + Ok(str::from_utf8(slice)?) + } + + pub fn bind(&self, value: T) -> Result<()> { + value.bind(self, 1)?; + Ok(()) + } + + pub fn column(&mut self) -> Result { + let (result, _) = T::column(self, 0)?; + Ok(result) + } + + pub fn column_type(&mut self, index: i32) -> Result { + let result = unsafe { sqlite3_column_type(self.raw_statement, index) }; // SELECT FROM TABLE + self.connection.last_error()?; + match result { + SQLITE_INTEGER => Ok(SqlType::Integer), + SQLITE_FLOAT => Ok(SqlType::Float), + SQLITE_TEXT => Ok(SqlType::Text), + SQLITE_BLOB => Ok(SqlType::Blob), + SQLITE_NULL => Ok(SqlType::Null), + _ => Err(anyhow!("Column type returned was incorrect ")), + } + } + + pub fn bound(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind(bindings)?; + Ok(self) + } + + fn step(&mut self) -> Result { + unsafe { + match sqlite3_step(self.raw_statement) { + SQLITE_ROW => Ok(StepResult::Row), + SQLITE_DONE => Ok(StepResult::Done), + SQLITE_MISUSE => Ok(StepResult::Misuse), + other => self + .connection + .last_error() + .map(|_| StepResult::Other(other)), + } + } + } + + pub fn run(&mut self) -> Result<()> { + fn logic(this: &mut Statement) -> Result<()> { + while this.step()? == StepResult::Row {} + Ok(()) + } + let result = logic(self); + self.reset(); + result + } + + pub fn map(&mut self, callback: impl FnMut(&mut Statement) -> Result) -> Result> { + fn logic( + this: &mut Statement, + mut callback: impl FnMut(&mut Statement) -> Result, + ) -> Result> { + let mut mapped_rows = Vec::new(); + while this.step()? == StepResult::Row { + mapped_rows.push(callback(this)?); + } + Ok(mapped_rows) + } + + let result = logic(self, callback); + self.reset(); + result + } + + pub fn rows(&mut self) -> Result> { + self.map(|s| s.column::()) + } + + pub fn single(&mut self, callback: impl FnOnce(&mut Statement) -> Result) -> Result { + fn logic( + this: &mut Statement, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result { + if this.step()? != StepResult::Row { + return Err(anyhow!( + "Single(Map) called with query that returns no rows." + )); + } + callback(this) + } + let result = logic(self, callback); + self.reset(); + result + } + + pub fn row(&mut self) -> Result { + self.single(|this| this.column::()) + } + + pub fn maybe( + &mut self, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result> { + fn logic( + this: &mut Statement, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result> { + if this.step()? != StepResult::Row { + return Ok(None); + } + callback(this).map(|r| Some(r)) + } + let result = logic(self, callback); + self.reset(); + result + } + + pub fn maybe_row(&mut self) -> Result> { + self.maybe(|this| this.column::()) + } +} + +impl<'a> Drop for Statement<'a> { + fn drop(&mut self) { + unsafe { + sqlite3_finalize(self.raw_statement); + self.connection + .last_error() + .expect("sqlite3 finalize failed for statement :("); + }; + } +} + +#[cfg(test)] +mod test { + use indoc::indoc; + + use crate::{connection::Connection, statement::StepResult}; + + #[test] + fn blob_round_trips() { + let connection1 = Connection::open_memory("blob_round_trips"); + connection1 + .exec(indoc! {" + CREATE TABLE blobs ( + data BLOB + );"}) + .unwrap(); + + let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; + + let mut write = connection1 + .prepare("INSERT INTO blobs (data) VALUES (?);") + .unwrap(); + write.bind_blob(1, blob).unwrap(); + assert_eq!(write.step().unwrap(), StepResult::Done); + + // Read the blob from the + let connection2 = Connection::open_memory("blob_round_trips"); + let mut read = connection2.prepare("SELECT * FROM blobs;").unwrap(); + assert_eq!(read.step().unwrap(), StepResult::Row); + assert_eq!(read.column_blob(0).unwrap(), blob); + assert_eq!(read.step().unwrap(), StepResult::Done); + + // Delete the added blob and verify its deleted on the other side + connection2.exec("DELETE FROM blobs;").unwrap(); + let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); + assert_eq!(read.step().unwrap(), StepResult::Done); + } +} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs new file mode 100644 index 0000000000..8885edc2c0 --- /dev/null +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -0,0 +1,78 @@ +use std::{ops::Deref, sync::Arc}; + +use connection::Connection; +use thread_local::ThreadLocal; + +use crate::connection; + +pub struct ThreadSafeConnection { + uri: Arc, + persistent: bool, + initialize_query: Option<&'static str>, + connection: Arc>, +} + +impl ThreadSafeConnection { + pub fn new(uri: &str, persistent: bool) -> Self { + Self { + uri: Arc::from(uri), + persistent, + initialize_query: None, + connection: Default::default(), + } + } + + /// Sets the query to run every time a connection is opened. This must + /// be infallible (EG only use pragma statements) + pub fn with_initialize_query(mut self, initialize_query: &'static str) -> Self { + self.initialize_query = Some(initialize_query); + self + } + + /// Opens a new db connection with the initialized file path. This is internal and only + /// called from the deref function. + /// If opening fails, the connection falls back to a shared memory connection + fn open_file(&self) -> Connection { + Connection::open_file(self.uri.as_ref()) + } + + /// Opens a shared memory connection using the file path as the identifier. This unwraps + /// as we expect it always to succeed + fn open_shared_memory(&self) -> Connection { + Connection::open_memory(self.uri.as_ref()) + } +} + +impl Clone for ThreadSafeConnection { + fn clone(&self) -> Self { + Self { + uri: self.uri.clone(), + persistent: self.persistent, + initialize_query: self.initialize_query.clone(), + connection: self.connection.clone(), + } + } +} + +impl Deref for ThreadSafeConnection { + type Target = Connection; + + fn deref(&self) -> &Self::Target { + self.connection.get_or(|| { + let connection = if self.persistent { + self.open_file() + } else { + self.open_shared_memory() + }; + + if let Some(initialize_query) = self.initialize_query { + connection.exec(initialize_query).expect(&format!( + "Initialize query failed to execute: {}", + initialize_query + )); + } + + connection + }) + } +} From 777f05eb76557f83d8f03ef6abf2a6dcafa6f6d3 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 15:58:23 -0700 Subject: [PATCH 135/240] Finished implementing the workspace stuff --- crates/db/src/db.rs | 7 +- crates/db/src/kvp.rs | 65 ++--- crates/db/src/migrations.rs | 28 +- crates/db/src/pane.rs | 18 +- crates/db/src/workspace.rs | 281 +++++++-------------- crates/sqlez/src/connection.rs | 19 +- crates/sqlez/src/migrations.rs | 11 +- crates/sqlez/src/savepoint.rs | 74 +++++- crates/sqlez/src/statement.rs | 20 +- crates/sqlez/src/thread_safe_connection.rs | 18 +- 10 files changed, 263 insertions(+), 278 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index e5740c5edb..857b5f273e 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -10,6 +10,8 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; +use kvp::KVP_MIGRATION; +use pane::PANE_MIGRATIONS; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -42,7 +44,8 @@ impl Db { PRAGMA synchronous=NORMAL; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; - "}), + "}) + .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]), ) } @@ -64,7 +67,7 @@ impl Db { pub fn write_to>(&self, dest: P) -> Result<()> { let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - self.backup(&destination) + self.backup_main(&destination) } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 96f13d8040..6db99831f7 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,55 +1,38 @@ -use anyhow::Result; -use rusqlite::OptionalExtension; - use super::Db; +use anyhow::Result; +use indoc::indoc; +use sqlez::migrations::Migration; -pub(crate) const KVP_M_1: &str = " -CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL -) STRICT; -"; +pub(crate) const KVP_MIGRATION: Migration = Migration::new( + "kvp", + &[indoc! {" + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + "}], +); impl Db { pub fn read_kvp(&self, key: &str) -> Result> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?; - - Ok(stmt.query_row([key], |row| row.get(0)).optional()?) - }) - .unwrap_or(Ok(None)) + self.0 + .prepare("SELECT value FROM kv_store WHERE key = (?)")? + .bind(key)? + .maybe_row() } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached( - "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))", - )?; - - stmt.execute([key, value])?; - - Ok(()) - }) - .unwrap_or(Ok(())) + self.0 + .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? + .bind((key, value))? + .exec() } pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?; - - stmt.execute([key])?; - - Ok(()) - }) - .unwrap_or(Ok(())) + self.0 + .prepare("DELETE FROM kv_store WHERE key = (?)")? + .bind(key)? + .exec() } } diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 8caa528fc1..a95654f420 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,16 +1,14 @@ -use rusqlite_migration::{Migrations, M}; +// // use crate::items::ITEMS_M_1; +// use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACES_MIGRATION}; -// use crate::items::ITEMS_M_1; -use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1}; - -// This must be ordered by development time! Only ever add new migrations to the end!! -// Bad things will probably happen if you don't monotonically edit this vec!!!! -// And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's -// file system and so everything we do here is locked in _f_o_r_e_v_e_r_. -lazy_static::lazy_static! { - pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ - M::up(KVP_M_1), - M::up(WORKSPACE_M_1), - M::up(PANE_M_1) - ]); -} +// // This must be ordered by development time! Only ever add new migrations to the end!! +// // Bad things will probably happen if you don't monotonically edit this vec!!!! +// // And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's +// // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. +// lazy_static::lazy_static! { +// pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ +// M::up(KVP_M_1), +// M::up(WORKSPACE_M_1), +// M::up(PANE_M_1) +// ]); +// } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 0a1812c60c..0716d19b1d 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,15 +1,14 @@ use gpui::Axis; +use indoc::indoc; +use sqlez::migrations::Migration; -use rusqlite::{OptionalExtension, Connection}; -use serde::{Deserialize, Serialize}; -use serde_rusqlite::{from_row, to_params_named}; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_M_1: &str = " +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new("pane", &[indoc! {" CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -64,7 +63,7 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; -"; +"}]); // We have an many-branched, unbalanced tree with three types: // Pane Groups @@ -140,7 +139,7 @@ pub struct SerializedPane { //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub enum DockAnchor { #[default] Bottom, @@ -148,7 +147,7 @@ pub enum DockAnchor { Expanded, } -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, pub visible: bool, @@ -160,7 +159,7 @@ impl SerializedDockPane { } } -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { workspace_id: WorkspaceId, anchor_position: DockAnchor, @@ -298,12 +297,11 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); let dock_pane = SerializedDockPane { - workspace_id: workspace.workspace_id, anchor_position: DockAnchor::Expanded, visible: true, }; - db.save_dock_pane(&dock_pane); + db.save_dock_pane(workspace.workspace_id, dock_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5237caa23c..16ff0e78c0 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -6,12 +6,12 @@ use std::{ os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, - time::{SystemTime, UNIX_EPOCH}, }; -use anyhow::Result; use indoc::indoc; -use sqlez::{connection::Connection, migrations::Migration}; +use sqlez::{ + connection::Connection, migrations::Migration, +}; use crate::pane::SerializedDockPane; @@ -20,8 +20,8 @@ use super::Db; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented // out. This will panic if run on an existing db that has already been migrated -const WORKSPACES_MIGRATION: Migration = Migration::new( - "migrations", +pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( + "workspace", &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, @@ -53,8 +53,8 @@ pub struct SerializedWorkspace { } impl Db { - /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the - /// the last workspace id + /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, + /// returns the last workspace which was updated pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace where P: AsRef + Debug, @@ -80,23 +80,21 @@ impl Db { where P: AsRef + Debug, { - let result = (|| { - let tx = self.transaction()?; - tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?; + let res = self.with_savepoint("make_new_workspace", |conn| { + let workspace_id = WorkspaceId( + conn.prepare("INSERT INTO workspaces DEFAULT VALUES")? + .insert()?, + ); - let id = WorkspaceId(tx.last_insert_rowid()); - - update_worktree_roots(&tx, &id, worktree_roots)?; - - tx.commit()?; + update_worktree_roots(conn, &workspace_id, worktree_roots)?; Ok(SerializedWorkspace { - workspace_id: id, + workspace_id, dock_pane: None, }) - })(); + }); - match result { + match res { Ok(serialized_workspace) => serialized_workspace, Err(err) => { log::error!("Failed to insert new workspace into DB: {}", err); @@ -109,19 +107,13 @@ impl Db { where P: AsRef + Debug, { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - match get_workspace_id(worktree_roots, &lock) { - Ok(workspace_id) => workspace_id, - Err(err) => { - log::error!("Failed to get workspace_id: {}", err); - None - } - } - }) - .unwrap_or(None) + match get_workspace_id(worktree_roots, &self) { + Ok(workspace_id) => workspace_id, + Err(err) => { + log::error!("Failed to get workspace_id: {}", err); + None + } + } } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -135,123 +127,73 @@ impl Db { where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - workspace_id: &WorkspaceId, - worktree_roots: &[P], - ) -> Result<()> - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - update_worktree_roots(&tx, workspace_id, worktree_roots)?; - tx.commit()?; - Ok(()) + match self.with_savepoint("update_worktrees", |conn| { + update_worktree_roots(conn, workspace_id, worktree_roots) + }) { + Ok(_) => {} + Err(err) => log::error!( + "Failed to update workspace {:?} with roots {:?}, error: {}", + workspace_id, + worktree_roots, + err + ), } - - self.real().map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock, workspace_id, worktree_roots) { - Ok(_) => {} - Err(err) => { - log::error!( - "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", - workspace_id, - worktree_roots, - err - ); - } - } - }); } fn last_workspace_id(&self) -> Option { - fn logic(connection: &mut Connection) -> Result> { - let mut stmt = connection.prepare( + let res = self + .prepare( "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", - )?; + ) + .and_then(|stmt| stmt.maybe_row()) + .map(|row| row.map(|id| WorkspaceId(id))); - Ok(stmt - .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) - .optional()?) + match res { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get last workspace id, err: {}", err); + return None; + } } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock) { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get last workspace id, err: {}", err); - None - } - } - }) - .unwrap_or(None) } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { - fn logic( - connection: &mut Connection, - limit: usize, - ) -> Result>)>, anyhow::Error> { - let tx = connection.transaction()?; - let result = { - let mut stmt = tx.prepare( - "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?", - )?; - - let workspace_ids = stmt - .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? - .collect::, rusqlite::Error>>()?; - - let mut result = Vec::new(); - let mut stmt = - tx.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - for workspace_id in workspace_ids { - let roots = stmt - .query_map([workspace_id.0], |row| { - let row = row.get::<_, Vec>(0)?; - Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) - // If you need to debug this, here's the string parsing: - // let row = row.get::<_, String>(0)?; - // Ok(PathBuf::from(row).into()) - })? - .collect::, rusqlite::Error>>()?; - result.push((workspace_id, roots)) - } - - result - }; - tx.commit()?; - return Ok(result); - } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock, limit) { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() - } - } - }) - .unwrap_or_else(|| Vec::new()) + let res = self.with_savepoint("recent_workspaces", |conn| { + let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? + .bind(limit)? + .rows::()? + .iter() + .map(|row| WorkspaceId(*row)); + + let result = Vec::new(); + + let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + for workspace_id in ids { + let roots = stmt.bind(workspace_id.0)? + .rows::>()? + .iter() + .map(|row| { + PathBuf::from(OsStr::from_bytes(&row)).into() + }) + .collect(); + result.push((workspace_id, roots)) + } + + + Ok(result) + }); + + match res { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() + } + } } } -fn current_millis() -> Result { - // SQLite only supports u64 integers, which means this code will trigger - // undefined behavior in 584 million years. It's probably fine. - Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64) -} - fn update_worktree_roots

( connection: &Connection, workspace_id: &WorkspaceId, @@ -265,33 +207,32 @@ where if let Some(preexisting_id) = preexisting_id { if preexisting_id != *workspace_id { // Should also delete fields in other tables with cascading updates - connection.execute( + connection.prepare( "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; + )? + .bind(preexisting_id.0)? + .exec()?; } } - connection.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], - )?; + connection + .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? + .bind(workspace_id.0)? + .exec()?; for root in worktree_roots { let path = root.as_ref().as_os_str().as_bytes(); // If you need to debug this, here's the string parsing: // let path = root.as_ref().to_string_lossy().to_string(); - connection.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], - )?; + connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? + .bind((workspace_id.0, path))? + .exec()?; } - connection.execute( - "UPDATE workspaces SET last_opened_timestamp = ? WHERE workspace_id = ?", - params![current_millis()?, workspace_id.0], - )?; + connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? + .bind(workspace_id.0)? + .exec()?; Ok(()) } @@ -300,13 +241,6 @@ fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result< where P: AsRef + Debug, { - // fn logic

( - // worktree_roots: &[P], - // connection: &Connection, - // ) -> Result, anyhow::Error> - // where - // P: AsRef + Debug, - // { // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); @@ -324,6 +258,7 @@ where } } array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -393,43 +328,19 @@ where // caching it. let mut stmt = connection.prepare(&query)?; // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); for i in 0..worktree_roots.len() { let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); // If you need to debug this, here's the string parsing: // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.raw_bind_parameter(i + 1, path)? + stmt.bind_value(*path, i as i32 + 1); } // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + stmt.bind_value(worktree_roots.len(), worktree_roots.len() as i32 + 1)?; - let mut rows = stmt.raw_query(); - let row = rows.next(); - let result = if let Ok(Some(row)) = row { - Ok(Some(WorkspaceId(row.get(0)?))) - } else { - Ok(None) - }; - - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); - - result - // } - - // match logic(worktree_roots, connection) { - // Ok(result) => result, - // Err(err) => { - // log::error!( - // "Failed to get the workspace ID for paths {:?}, err: {}", - // worktree_roots, - // err - // ); - // None - // } - // } + stmt.maybe_row() + .map(|row| row.map(|id| WorkspaceId(id))) } #[cfg(test)] diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 81bb9dfe78..be52978495 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -53,6 +53,15 @@ impl Connection { self.persistent } + pub(crate) fn last_insert_id(&self) -> i64 { + unsafe { sqlite3_last_insert_rowid(self.sqlite3) } + } + + pub fn insert(&self, query: impl AsRef) -> Result { + self.exec(query)?; + Ok(self.last_insert_id()) + } + pub fn exec(&self, query: impl AsRef) -> Result<()> { unsafe { sqlite3_exec( @@ -140,9 +149,9 @@ mod test { connection .prepare("INSERT INTO text (text) VALUES (?);") .unwrap() - .bound(text) + .bind(text) .unwrap() - .run() + .exec() .unwrap(); assert_eq!( @@ -176,8 +185,8 @@ mod test { .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") .unwrap(); - insert.bound(tuple1.clone()).unwrap().run().unwrap(); - insert.bound(tuple2.clone()).unwrap().run().unwrap(); + insert.bind(tuple1.clone()).unwrap().exec().unwrap(); + insert.bind(tuple2.clone()).unwrap().exec().unwrap(); assert_eq!( connection @@ -203,7 +212,7 @@ mod test { .prepare("INSERT INTO blobs (data) VALUES (?);") .unwrap(); write.bind_blob(1, blob).unwrap(); - write.run().unwrap(); + write.exec().unwrap(); // Backup connection1 to connection2 let connection2 = Connection::open_memory("backup_works_other"); diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 4721b353c6..3c0771c0fe 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -22,6 +22,7 @@ const MIGRATIONS_MIGRATION: Migration = Migration::new( "}], ); +#[derive(Debug)] pub struct Migration { domain: &'static str, migrations: &'static [&'static str], @@ -46,7 +47,7 @@ impl Migration { WHERE domain = ? ORDER BY step "})? - .bound(self.domain)? + .bind(self.domain)? .rows::<(String, usize, String)>()?; let mut store_completed_migration = connection @@ -71,8 +72,8 @@ impl Migration { connection.exec(migration)?; store_completed_migration - .bound((self.domain, index, *migration))? - .run()?; + .bind((self.domain, index, *migration))? + .exec()?; } Ok(()) @@ -162,9 +163,9 @@ mod test { .unwrap(); store_completed_migration - .bound((domain, i, i.to_string())) + .bind((domain, i, i.to_string())) .unwrap() - .run() + .exec() .unwrap(); } } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 749c0dc948..50f28c7390 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -3,10 +3,36 @@ use anyhow::Result; use crate::connection::Connection; impl Connection { + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback + // returns Err(_), the savepoint will be rolled back. Otherwise, the save + // point is released. + pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result + where + F: FnOnce(&mut Connection) -> Result, + { + let name = name.as_ref().to_owned(); + self.exec(format!("SAVEPOINT {}", &name))?; + let result = f(self); + match result { + Ok(_) => { + self.exec(format!("RELEASE {}", name))?; + } + Err(_) => { + self.exec(format!("ROLLBACK TO {}", name))?; + self.exec(format!("RELEASE {}", name))?; + } + } + result + } + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result> + pub fn with_savepoint_rollback( + &mut self, + name: impl AsRef, + f: F, + ) -> Result> where F: FnOnce(&mut Connection) -> Result>, { @@ -50,15 +76,15 @@ mod tests { connection.with_savepoint("first", |save1| { save1 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save1_text, 1))? - .run()?; + .bind((save1_text, 1))? + .exec()?; assert!(save1 .with_savepoint("second", |save2| -> Result, anyhow::Error> { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save2_text, 2))? - .run()?; + .bind((save2_text, 2))? + .exec()?; assert_eq!( save2 @@ -79,11 +105,34 @@ mod tests { vec![save1_text], ); - save1.with_savepoint("second", |save2| { + save1.with_savepoint_rollback::<(), _>("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save2_text, 2))? - .run()?; + .bind((save2_text, 2))? + .exec()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(None) + })?; + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text], + ); + + save1.with_savepoint_rollback("second", |save2| { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bind((save2_text, 2))? + .exec()?; assert_eq!( save2 @@ -102,9 +151,16 @@ mod tests { vec![save1_text, save2_text], ); - Ok(Some(())) + Ok(()) })?; + assert_eq!( + connection + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + Ok(()) } } diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 774cda0e34..ac57847774 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -60,6 +60,10 @@ impl<'a> Statement<'a> { } } + pub fn parameter_count(&self) -> i32 { + unsafe { sqlite3_bind_parameter_count(self.raw_statement) } + } + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; @@ -175,8 +179,9 @@ impl<'a> Statement<'a> { Ok(str::from_utf8(slice)?) } - pub fn bind(&self, value: T) -> Result<()> { - value.bind(self, 1)?; + pub fn bind_value(&self, value: T, idx: i32) -> Result<()> { + debug_assert!(idx > 0); + value.bind(self, idx)?; Ok(()) } @@ -198,8 +203,8 @@ impl<'a> Statement<'a> { } } - pub fn bound(&mut self, bindings: impl Bind) -> Result<&mut Self> { - self.bind(bindings)?; + pub fn bind(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind_value(bindings, 1)?; Ok(self) } @@ -217,7 +222,12 @@ impl<'a> Statement<'a> { } } - pub fn run(&mut self) -> Result<()> { + pub fn insert(&mut self) -> Result { + self.exec()?; + Ok(self.connection.last_insert_id()) + } + + pub fn exec(&mut self) -> Result<()> { fn logic(this: &mut Statement) -> Result<()> { while this.step()? == StepResult::Row {} Ok(()) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 8885edc2c0..53d49464be 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -3,12 +3,13 @@ use std::{ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::connection; +use crate::{connection, migrations::Migration}; pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, + migrations: Option<&'static [Migration]>, connection: Arc>, } @@ -18,6 +19,7 @@ impl ThreadSafeConnection { uri: Arc::from(uri), persistent, initialize_query: None, + migrations: None, connection: Default::default(), } } @@ -29,6 +31,11 @@ impl ThreadSafeConnection { self } + pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { + self.migrations = Some(migrations); + self + } + /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection @@ -49,6 +56,7 @@ impl Clone for ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), + migrations: self.migrations.clone(), connection: self.connection.clone(), } } @@ -72,6 +80,14 @@ impl Deref for ThreadSafeConnection { )); } + if let Some(migrations) = self.migrations { + for migration in migrations { + migration + .run(&connection) + .expect(&format!("Migrations failed to execute: {:?}", migration)); + } + } + connection }) } From 3c1b747f641c29ec4de6111b911b608f80862dbb Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 17:26:03 -0700 Subject: [PATCH 136/240] WIP almost compiling with sqlez --- crates/db/src/kvp.rs | 6 +- crates/db/src/workspace.rs | 65 +++++++++------------- crates/sqlez/src/bindable.rs | 22 ++++++++ crates/sqlez/src/connection.rs | 14 ++++- crates/sqlez/src/migrations.rs | 6 +- crates/sqlez/src/savepoint.rs | 8 +-- crates/sqlez/src/statement.rs | 11 ++-- crates/sqlez/src/thread_safe_connection.rs | 2 + 8 files changed, 77 insertions(+), 57 deletions(-) diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 6db99831f7..a692d73d88 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -17,21 +17,21 @@ impl Db { pub fn read_kvp(&self, key: &str) -> Result> { self.0 .prepare("SELECT value FROM kv_store WHERE key = (?)")? - .bind(key)? + .with_bindings(key)? .maybe_row() } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { self.0 .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? - .bind((key, value))? + .with_bindings((key, value))? .exec() } pub fn delete_kvp(&self, key: &str) -> Result<()> { self.0 .prepare("DELETE FROM kv_store WHERE key = (?)")? - .bind(key)? + .with_bindings(key)? .exec() } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 16ff0e78c0..126a34676e 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -23,17 +23,17 @@ use super::Db; pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" - CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - - CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) - ) STRICT;"}], + CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + PRIMARY KEY(worktree_root, workspace_id) + ) STRICT;"}], ); #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] @@ -159,9 +159,9 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { - let res = self.with_savepoint("recent_workspaces", |conn| { + self.with_savepoint("recent_workspaces", |conn| { let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? - .bind(limit)? + .with_bindings(limit)? .rows::()? .iter() .map(|row| WorkspaceId(*row)); @@ -170,7 +170,7 @@ impl Db { let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; for workspace_id in ids { - let roots = stmt.bind(workspace_id.0)? + let roots = stmt.with_bindings(workspace_id.0)? .rows::>()? .iter() .map(|row| { @@ -180,17 +180,11 @@ impl Db { result.push((workspace_id, roots)) } - Ok(result) - }); - - match res { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() - } - } + }).unwrap_or_else(|err| { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() + }) } } @@ -210,14 +204,14 @@ where connection.prepare( "DELETE FROM workspaces WHERE workspace_id = ?", )? - .bind(preexisting_id.0)? + .with_bindings(preexisting_id.0)? .exec()?; } } connection .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .bind(workspace_id.0)? + .with_bindings(workspace_id.0)? .exec()?; for root in worktree_roots { @@ -226,12 +220,12 @@ where // let path = root.as_ref().to_string_lossy().to_string(); connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? - .bind((workspace_id.0, path))? + .with_bindings((workspace_id.0, path))? .exec()?; } connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? - .bind(workspace_id.0)? + .with_bindings(workspace_id.0)? .exec()?; Ok(()) @@ -330,16 +324,11 @@ where // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - for i in 0..worktree_roots.len() { - let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.bind_value(*path, i as i32 + 1); - } - // No -1, because SQLite is 1 based - stmt.bind_value(worktree_roots.len(), worktree_roots.len() as i32 + 1)?; - - stmt.maybe_row() + let root_bytes: Vec<&[u8]> = worktree_roots.iter() + .map(|root| root.as_ref().as_os_str().as_bytes()).collect(); + + stmt.with_bindings((root_bytes, root_bytes.len()))? + .maybe_row() .map(|row| row.map(|id| WorkspaceId(id))) } diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index ca3ba401cf..9b8308f70c 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -207,3 +207,25 @@ impl Column for [T; COUNT] { Ok((array, current_index)) } } + +impl Bind for Vec { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in self.iter() { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} + +impl Bind for &[T] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in *self { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index be52978495..1fd814c580 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -149,7 +149,7 @@ mod test { connection .prepare("INSERT INTO text (text) VALUES (?);") .unwrap() - .bind(text) + .with_bindings(text) .unwrap() .exec() .unwrap(); @@ -185,8 +185,16 @@ mod test { .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") .unwrap(); - insert.bind(tuple1.clone()).unwrap().exec().unwrap(); - insert.bind(tuple2.clone()).unwrap().exec().unwrap(); + insert + .with_bindings(tuple1.clone()) + .unwrap() + .exec() + .unwrap(); + insert + .with_bindings(tuple2.clone()) + .unwrap() + .exec() + .unwrap(); assert_eq!( connection diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 3c0771c0fe..9f3bd333ca 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -47,7 +47,7 @@ impl Migration { WHERE domain = ? ORDER BY step "})? - .bind(self.domain)? + .with_bindings(self.domain)? .rows::<(String, usize, String)>()?; let mut store_completed_migration = connection @@ -72,7 +72,7 @@ impl Migration { connection.exec(migration)?; store_completed_migration - .bind((self.domain, index, *migration))? + .with_bindings((self.domain, index, *migration))? .exec()?; } @@ -163,7 +163,7 @@ mod test { .unwrap(); store_completed_migration - .bind((domain, i, i.to_string())) + .with_bindings((domain, i, i.to_string())) .unwrap() .exec() .unwrap(); diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 50f28c7390..9589037e77 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -76,14 +76,14 @@ mod tests { connection.with_savepoint("first", |save1| { save1 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save1_text, 1))? + .with_bindings((save1_text, 1))? .exec()?; assert!(save1 .with_savepoint("second", |save2| -> Result, anyhow::Error> { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( @@ -108,7 +108,7 @@ mod tests { save1.with_savepoint_rollback::<(), _>("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( @@ -131,7 +131,7 @@ mod tests { save1.with_savepoint_rollback("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index ac57847774..06a090c417 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -179,10 +179,9 @@ impl<'a> Statement<'a> { Ok(str::from_utf8(slice)?) } - pub fn bind_value(&self, value: T, idx: i32) -> Result<()> { - debug_assert!(idx > 0); - value.bind(self, idx)?; - Ok(()) + pub fn bind(&self, value: T, index: i32) -> Result { + debug_assert!(index > 0); + value.bind(self, index) } pub fn column(&mut self) -> Result { @@ -203,8 +202,8 @@ impl<'a> Statement<'a> { } } - pub fn bind(&mut self, bindings: impl Bind) -> Result<&mut Self> { - self.bind_value(bindings, 1)?; + pub fn with_bindings(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind(bindings, 1)?; Ok(self) } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 53d49464be..f4f759cd6c 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -31,6 +31,8 @@ impl ThreadSafeConnection { self } + /// Migrations have to be run per connection because we fallback to memory + /// so this needs pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { self.migrations = Some(migrations); self From c8face33fa9feb9d929757de7fd3317c0456500d Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 17:46:39 -0700 Subject: [PATCH 137/240] WIP, incorporating type parsing using new sqlez patterns --- crates/db/src/pane.rs | 49 ++++++++++++++++++++++++++++++++--- crates/db/src/workspace.rs | 16 ++++++++++-- crates/sqlez/src/bindable.rs | 12 +++++++++ crates/sqlez/src/statement.rs | 2 +- 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 0716d19b1d..3292cc031d 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,7 +1,9 @@ +use std::str::FromStr; + use gpui::Axis; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{migrations::Migration, bindable::{Bind, Column}, connection::Connection, statement::Statement}; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -138,7 +140,6 @@ pub struct SerializedPane { //********* CURRENTLY IN USE TYPES: ********* - #[derive(Default, Debug, PartialEq, Eq)] pub enum DockAnchor { #[default] @@ -147,6 +148,29 @@ pub enum DockAnchor { Expanded, } +impl ToString for DockAnchor { + fn to_string(&self) -> String { + match self { + DockAnchor::Bottom => "Bottom".to_string(), + DockAnchor::Right => "Right".to_string(), + DockAnchor::Expanded => "Expanded".to_string(), + } + } +} + +impl FromStr for DockAnchor { + type Err = anyhow::Error; + + fn from_str(s: &str) -> anyhow::Result { + match s { + "Bottom" => Ok(DockAnchor::Bottom), + "Right" => Ok(DockAnchor::Right), + "Expanded" => Ok(DockAnchor::Expanded), + _ => anyhow::bail!("Not a valid dock anchor") + } + } +} + #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -159,6 +183,7 @@ impl SerializedDockPane { } } + #[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { workspace_id: WorkspaceId, @@ -172,6 +197,21 @@ impl DockRow { } } +impl Bind for DockRow { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind((self.workspace_id, self.anchor_position.to_string(), self.visible), start_index) + } +} + +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, &str, bool) as Column>::column(statement, start_index) + .map(|((workspace_id, anchor_position, visible), next_index)| { + + }) + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -229,7 +269,10 @@ impl Db { pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { - let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?; + let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .maybe_row() + .map(|row| DockRow::col); + let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 126a34676e..f454151cbb 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{Result, anyhow}; use std::{ ffi::OsStr, @@ -10,7 +10,7 @@ use std::{ use indoc::indoc; use sqlez::{ - connection::Connection, migrations::Migration, + connection::Connection, migrations::Migration, bindable::{Column, Bind}, }; use crate::pane::SerializedDockPane; @@ -45,6 +45,18 @@ impl WorkspaceId { } } +impl Bind for WorkspaceId { + fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { + todo!(); + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut sqlez::statement::Statement, start_index: i32) -> Result<(Self, i32)> { + todo!(); + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 9b8308f70c..e2cdde039e 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -10,6 +10,18 @@ pub trait Column: Sized { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)>; } +impl Bind for bool { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind(self.then_some(1).unwrap_or(0), start_index) + } +} + +impl Column for bool { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i32::column(statement, start_index).map(|(i, next_index)| (i != 0, next_index)) + } +} + impl Bind for &[u8] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_blob(start_index, self)?; diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 06a090c417..14683171a7 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -114,7 +114,7 @@ impl<'a> Statement<'a> { unsafe { sqlite3_bind_int(self.raw_statement, index, int); - } + }; self.connection.last_error() } From 406663c75ef202bddd4ed2b03260a16ba21918db Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 13:26:23 -0700 Subject: [PATCH 138/240] Converted to sqlez, so much nicer --- Cargo.lock | 1 + crates/db/Cargo.toml | 3 +- crates/db/examples/serialize-pane.rs | 12 +- crates/db/examples/serialize_workspace.rs | 6 +- crates/db/src/db.rs | 12 +- crates/db/src/kvp.rs | 22 +-- crates/db/src/pane.rs | 185 ++++++++++++---------- crates/db/src/workspace.rs | 98 +++++++----- crates/sqlez/src/connection.rs | 85 +++++++--- crates/sqlez/src/savepoint.rs | 14 +- crates/sqlez/src/statement.rs | 16 +- crates/util/src/lib.rs | 21 +++ 12 files changed, 278 insertions(+), 197 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2fb859dca5..3e8526fbed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1559,6 +1559,7 @@ dependencies = [ "parking_lot 0.11.2", "sqlez", "tempdir", + "util", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index fe0b21eaf4..1ee9de6186 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -11,11 +11,12 @@ doctest = false test-support = [] [dependencies] -indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } sqlez = { path = "../sqlez" } +util = { path = "../util" } anyhow = "1.0.57" +indoc = "1.0.4" async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index e828f007d1..6073476709 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -7,10 +7,8 @@ const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { env_logger::init(); - let db = db::Db::open_in_memory(); - if db.real().is_none() { - return Err(anyhow::anyhow!("Migrations failed")); - } + let db = db::Db::open_in_memory("db"); + let file = Path::new(TEST_FILE); let f = File::create(file)?; @@ -21,21 +19,21 @@ fn main() -> anyhow::Result<()> { let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); db.save_dock_pane( - workspace_1.workspace_id, + &workspace_1.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Expanded, visible: true, }, ); db.save_dock_pane( - workspace_2.workspace_id, + &workspace_2.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Bottom, visible: true, }, ); db.save_dock_pane( - workspace_3.workspace_id, + &workspace_3.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Right, visible: false, diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 4010c77976..9b6082ce53 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -4,10 +4,8 @@ const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { env_logger::init(); - let db = db::Db::open_in_memory(); - if db.real().is_none() { - return Err(anyhow::anyhow!("Migrations failed")); - } + let db = db::Db::open_in_memory("db"); + let file = Path::new(TEST_FILE); let f = File::create(file)?; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 857b5f273e..48a025112a 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -18,7 +18,7 @@ use sqlez::thread_safe_connection::ThreadSafeConnection; pub use workspace::*; #[derive(Clone)] -struct Db(ThreadSafeConnection); +pub struct Db(ThreadSafeConnection); impl Deref for Db { type Target = sqlez::connection::Connection; @@ -54,15 +54,15 @@ impl Db { } /// Open a in memory database for testing and as a fallback. - pub fn open_in_memory() -> Self { - Db( - ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {" + pub fn open_in_memory(db_name: &str) -> Self { + Db(ThreadSafeConnection::new(db_name, false) + .with_initialize_query(indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; - "}), - ) + "}) + .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS])) } pub fn write_to>(&self, dest: P) -> Result<()> { diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index a692d73d88..93be5e10c0 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -23,7 +23,7 @@ impl Db { pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { self.0 - .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? + .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")? .with_bindings((key, value))? .exec() } @@ -44,21 +44,21 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_kvp"); - assert_eq!(db.read_kvp("key-1")?, None); + assert_eq!(db.read_kvp("key-1").unwrap(), None); - db.write_kvp("key-1", "one")?; - assert_eq!(db.read_kvp("key-1")?, Some("one".to_string())); + db.write_kvp("key-1", "one").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string())); - db.write_kvp("key-1", "one-2")?; - assert_eq!(db.read_kvp("key-1")?, Some("one-2".to_string())); + db.write_kvp("key-1", "one-2").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string())); - db.write_kvp("key-2", "two")?; - assert_eq!(db.read_kvp("key-2")?, Some("two".to_string())); + db.write_kvp("key-2", "two").unwrap(); + assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string())); - db.delete_kvp("key-1")?; - assert_eq!(db.read_kvp("key-1")?, None); + db.delete_kvp("key-1").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), None); Ok(()) } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 3292cc031d..5db805012d 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,16 +1,21 @@ - use std::str::FromStr; use gpui::Axis; use indoc::indoc; -use sqlez::{migrations::Migration, bindable::{Bind, Column}, connection::Connection, statement::Statement}; - +use sqlez::{ + bindable::{Bind, Column}, + migrations::Migration, + statement::Statement, +}; +use util::{iife, ResultExt}; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new("pane", &[indoc! {" +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -19,7 +24,7 @@ CREATE TABLE dock_panes( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE pane_groups( +CREATE TABLE pane_groups( -- Inner nodes group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node @@ -28,7 +33,8 @@ CREATE TABLE pane_groups( FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE grouped_panes( + +CREATE TABLE grouped_panes( -- Leaf nodes pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, group_id INTEGER NOT NULL, @@ -65,7 +71,8 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; -"}]); +"}], +); // We have an many-branched, unbalanced tree with three types: // Pane Groups @@ -137,10 +144,9 @@ pub struct SerializedPane { children: Vec, } - //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq)] +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] pub enum DockAnchor { #[default] Bottom, @@ -162,15 +168,28 @@ impl FromStr for DockAnchor { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result { - match s { + match s { "Bottom" => Ok(DockAnchor::Bottom), "Right" => Ok(DockAnchor::Right), "Expanded" => Ok(DockAnchor::Expanded), - _ => anyhow::bail!("Not a valid dock anchor") + _ => anyhow::bail!("Not a valid dock anchor"), } } } +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind(self.to_string(), start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + ::column(statement, start_index) + .and_then(|(str, next_index)| Ok((DockAnchor::from_str(&str)?, next_index))) + } +} + #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -178,11 +197,30 @@ pub struct SerializedDockPane { } impl SerializedDockPane { - pub fn to_row(&self, workspace: WorkspaceId) -> DockRow { - DockRow { workspace_id: workspace, anchor_position: self.anchor_position, visible: self.visible } + fn to_row(&self, workspace: &WorkspaceId) -> DockRow { + DockRow { + workspace_id: *workspace, + anchor_position: self.anchor_position, + visible: self.visible, + } } } +impl Column for SerializedDockPane { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(DockAnchor, bool) as Column>::column(statement, start_index).map( + |((anchor_position, visible), next_index)| { + ( + SerializedDockPane { + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} #[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { @@ -191,24 +229,16 @@ pub(crate) struct DockRow { visible: bool, } -impl DockRow { - pub fn to_pane(&self) -> SerializedDockPane { - SerializedDockPane { anchor_position: self.anchor_position, visible: self.visible } - } -} - impl Bind for DockRow { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind((self.workspace_id, self.anchor_position.to_string(), self.visible), start_index) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, &str, bool) as Column>::column(statement, start_index) - .map(|((workspace_id, anchor_position, visible), next_index)| { - - }) + statement.bind( + ( + self.workspace_id, + self.anchor_position.to_string(), + self.visible, + ), + start_index, + ) } } @@ -267,75 +297,37 @@ impl Db { } pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { - - let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .maybe_row() - .map(|row| DockRow::col); - - - let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); - - let mut dock_panes_iter = stmt.query_and_then([workspace.raw_id()], from_row::)?; - let dock_pane = dock_panes_iter - .next() - .and_then(|dock_row| - dock_row - .ok() - .map(|dock_row| dock_row.to_pane())); - - Ok(dock_pane) - } - - self.real() - .map(|db| { - let lock = db.connection.lock(); - - match logic(&lock, workspace) { - Ok(dock_pane) => dock_pane, - Err(err) => { - log::error!("Failed to get the dock pane: {}", err); - None - }, - } - }) - .unwrap_or(None) - + iife!({ + self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .with_bindings(workspace)? + .maybe_row::() + }) + .log_err() + .flatten() } - pub fn save_dock_pane(&self, workspace: WorkspaceId, dock_pane: SerializedDockPane) { - to_params_named(dock_pane.to_row(workspace)) - .map_err(|err| { - log::error!("Failed to parse params for the dock row: {}", err); - err - }) - .ok() - .zip(self.real()) - .map(|(params, db)| { - // TODO: overwrite old dock panes if need be - let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; - - db.connection - .lock() - .execute(query, params.to_slice().as_slice()) - .map(|_| ()) // Eat the return value - .unwrap_or_else(|err| { - log::error!("Failed to insert new dock pane into DB: {}", err); - }) - }); + pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + iife!({ + self.prepare( + "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", + )? + .with_bindings(dock_pane.to_row(workspace))? + .insert() + }) + .log_err(); } } #[cfg(test)] mod tests { - use crate::Db; + use crate::{pane::SerializedPane, Db}; use super::{DockAnchor, SerializedDockPane}; #[test] fn test_basic_dock_pane() { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("basic_dock_pane"); let workspace = db.workspace_for_roots(&["/tmp"]); @@ -344,7 +336,28 @@ mod tests { visible: true, }; - db.save_dock_pane(workspace.workspace_id, dock_pane); + db.save_dock_pane(&workspace.workspace_id, &dock_pane); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); + + assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + } + + #[test] + fn test_dock_simple_split() { + let db = Db::open_in_memory("simple_split"); + + let workspace = db.workspace_for_roots(&["/tmp"]); + + let center_pane = SerializedPane { + pane_id: crate::pane::PaneId { + workspace_id: workspace.workspace_id, + pane_id: 1, + }, + children: vec![], + }; + + db.save_dock_pane(&workspace.workspace_id, &dock_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index f454151cbb..bf2f765e19 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::{Result, anyhow}; +use anyhow::Result; use std::{ ffi::OsStr, @@ -10,7 +10,9 @@ use std::{ use indoc::indoc; use sqlez::{ - connection::Connection, migrations::Migration, bindable::{Column, Bind}, + bindable::{Bind, Column}, + connection::Connection, + migrations::Migration, }; use crate::pane::SerializedDockPane; @@ -47,13 +49,17 @@ impl WorkspaceId { impl Bind for WorkspaceId { fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { - todo!(); + statement.bind(self.raw_id(), start_index) } } impl Column for WorkspaceId { - fn column(statement: &mut sqlez::statement::Statement, start_index: i32) -> Result<(Self, i32)> { - todo!(); + fn column( + statement: &mut sqlez::statement::Statement, + start_index: i32, + ) -> Result<(Self, i32)> { + ::column(statement, start_index) + .map(|(id, next_index)| (WorkspaceId(id), next_index)) } } @@ -154,10 +160,8 @@ impl Db { fn last_workspace_id(&self) -> Option { let res = self - .prepare( - "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", - ) - .and_then(|stmt| stmt.maybe_row()) + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1") + .and_then(|mut stmt| stmt.maybe_row()) .map(|row| row.map(|id| WorkspaceId(id))); match res { @@ -172,28 +176,30 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { self.with_savepoint("recent_workspaces", |conn| { - let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? + let rows = conn + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? - .rows::()? - .iter() - .map(|row| WorkspaceId(*row)); - - let result = Vec::new(); - - let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + .rows::()?; + + let ids = rows.iter().map(|row| WorkspaceId(*row)); + + let mut result = Vec::new(); + + let mut stmt = + conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; for workspace_id in ids { - let roots = stmt.with_bindings(workspace_id.0)? + let roots = stmt + .with_bindings(workspace_id.0)? .rows::>()? .iter() - .map(|row| { - PathBuf::from(OsStr::from_bytes(&row)).into() - }) + .map(|row| PathBuf::from(OsStr::from_bytes(&row)).into()) .collect(); result.push((workspace_id, roots)) } - + Ok(result) - }).unwrap_or_else(|err| { + }) + .unwrap_or_else(|err| { log::error!("Failed to get recent workspaces, err: {}", err); Vec::new() }) @@ -213,11 +219,10 @@ where if let Some(preexisting_id) = preexisting_id { if preexisting_id != *workspace_id { // Should also delete fields in other tables with cascading updates - connection.prepare( - "DELETE FROM workspaces WHERE workspace_id = ?", - )? - .with_bindings(preexisting_id.0)? - .exec()?; + connection + .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(preexisting_id.0)? + .exec()?; } } @@ -231,12 +236,14 @@ where // If you need to debug this, here's the string parsing: // let path = root.as_ref().to_string_lossy().to_string(); - connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? + connection + .prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? .with_bindings((workspace_id.0, path))? .exec()?; } - connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? + connection + .prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? .with_bindings(workspace_id.0)? .exec()?; @@ -264,7 +271,7 @@ where } } array_binding_stmt.push(')'); - + // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -336,10 +343,14 @@ where // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - let root_bytes: Vec<&[u8]> = worktree_roots.iter() - .map(|root| root.as_ref().as_os_str().as_bytes()).collect(); - - stmt.with_bindings((root_bytes, root_bytes.len()))? + let root_bytes: Vec<&[u8]> = worktree_roots + .iter() + .map(|root| root.as_ref().as_os_str().as_bytes()) + .collect(); + + let len = root_bytes.len(); + + stmt.with_bindings((root_bytes, len))? .maybe_row() .map(|row| row.map(|id| WorkspaceId(id))) } @@ -360,7 +371,8 @@ mod tests { #[test] fn test_new_worktrees_for_roots() { - let db = Db::open_in_memory(); + env_logger::init(); + let db = Db::open_in_memory("test_new_worktrees_for_roots"); // Test creation in 0 case let workspace_1 = db.workspace_for_roots::(&[]); @@ -371,7 +383,7 @@ mod tests { assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); // Ensure the timestamps are different - sleep(Duration::from_millis(20)); + sleep(Duration::from_secs(1)); db.make_new_workspace::(&[]); // Test pulling another value from recent workspaces @@ -379,7 +391,7 @@ mod tests { assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); // Ensure the timestamps are different - sleep(Duration::from_millis(20)); + sleep(Duration::from_secs(1)); // Test creating a new workspace that doesn't exist already let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); @@ -396,7 +408,7 @@ mod tests { #[test] fn test_empty_worktrees() { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_empty_worktrees"); assert_eq!(None, db.workspace_id::(&[])); @@ -404,7 +416,6 @@ mod tests { db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - db.write_to("test.db").unwrap(); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); @@ -436,7 +447,7 @@ mod tests { (WorkspaceId(7), vec!["/tmp2"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_more_workspace_ids"); for (workspace_id, entries) in data { db.make_new_workspace::(&[]); @@ -470,7 +481,7 @@ mod tests { (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_detect_workspace_id"); for (workspace_id, entries) in data { db.make_new_workspace::(&[]); @@ -511,7 +522,7 @@ mod tests { (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_tricky_overlapping_update"); // Load in the test data for (workspace_id, entries) in data { @@ -519,6 +530,7 @@ mod tests { db.update_worktrees(workspace_id, entries); } + sleep(Duration::from_secs(1)); // Execute the update db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 1fd814c580..fcc180a48d 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -32,6 +32,9 @@ impl Connection { 0 as *const _, ); + // Turn on extended error codes + sqlite3_extended_result_codes(connection.sqlite3, 1); + connection.last_error()?; } @@ -71,6 +74,7 @@ impl Connection { 0 as *mut _, 0 as *mut _, ); + sqlite3_errcode(self.sqlite3); self.last_error()?; } Ok(()) @@ -95,29 +99,7 @@ impl Connection { } pub(crate) fn last_error(&self) -> Result<()> { - const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; - unsafe { - let code = sqlite3_errcode(self.sqlite3); - if NON_ERROR_CODES.contains(&code) { - return Ok(()); - } - - let message = sqlite3_errmsg(self.sqlite3); - let message = if message.is_null() { - None - } else { - Some( - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(), - ) - }; - - Err(anyhow!( - "Sqlite call failed with code {} and message: {:?}", - code as isize, - message - )) - } + unsafe { error_to_result(sqlite3_errcode(self.sqlite3)) } } } @@ -127,12 +109,37 @@ impl Drop for Connection { } } +pub(crate) fn error_to_result(code: std::os::raw::c_int) -> Result<()> { + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + unsafe { + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errstr(code); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } +} + #[cfg(test)] mod test { use anyhow::Result; use indoc::indoc; - use crate::connection::Connection; + use crate::{connection::Connection, migrations::Migration}; #[test] fn string_round_trips() -> Result<()> { @@ -234,4 +241,34 @@ mod test { .unwrap(); assert_eq!(read_blobs, vec![blob]); } + + #[test] + fn test_kv_store() -> anyhow::Result<()> { + let connection = Connection::open_memory("kv_store"); + + Migration::new( + "kv", + &["CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT;"], + ) + .run(&connection) + .unwrap(); + + let mut stmt = connection.prepare("INSERT INTO kv_store(key, value) VALUES(?, ?)")?; + stmt.bind_text(1, "a").unwrap(); + stmt.bind_text(2, "b").unwrap(); + stmt.exec().unwrap(); + let id = connection.last_insert_id(); + + let res = connection + .prepare("SELECT key, value FROM kv_store WHERE rowid = ?")? + .with_bindings(id)? + .row::<(String, String)>()?; + + assert_eq!(res, ("a".to_string(), "b".to_string())); + + Ok(()) + } } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 9589037e77..3d7830dd91 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -6,9 +6,9 @@ impl Connection { // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result + pub fn with_savepoint(&self, name: impl AsRef, f: F) -> Result where - F: FnOnce(&mut Connection) -> Result, + F: FnOnce(&Connection) -> Result, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; @@ -28,13 +28,9 @@ impl Connection { // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint_rollback( - &mut self, - name: impl AsRef, - f: F, - ) -> Result> + pub fn with_savepoint_rollback(&self, name: impl AsRef, f: F) -> Result> where - F: FnOnce(&mut Connection) -> Result>, + F: FnOnce(&Connection) -> Result>, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; @@ -60,7 +56,7 @@ mod tests { #[test] fn test_nested_savepoints() -> Result<()> { - let mut connection = Connection::open_memory("nested_savepoints"); + let connection = Connection::open_memory("nested_savepoints"); connection .exec(indoc! {" diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 14683171a7..e2b59d86f1 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; -use crate::connection::Connection; +use crate::connection::{error_to_result, Connection}; pub struct Statement<'a> { raw_statement: *mut sqlite3_stmt, @@ -65,6 +65,7 @@ impl<'a> Statement<'a> { } pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { + // dbg!("bind blob", index); let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; let len = blob.len() as c_int; @@ -94,6 +95,7 @@ impl<'a> Statement<'a> { } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { + // dbg!("bind double", index); let index = index as c_int; unsafe { @@ -110,6 +112,7 @@ impl<'a> Statement<'a> { } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { + // dbg!("bind int", index); let index = index as c_int; unsafe { @@ -126,6 +129,7 @@ impl<'a> Statement<'a> { } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { + // dbg!("bind int64", index); let index = index as c_int; unsafe { sqlite3_bind_int64(self.raw_statement, index, int); @@ -141,6 +145,7 @@ impl<'a> Statement<'a> { } pub fn bind_null(&self, index: i32) -> Result<()> { + // dbg!("bind null", index); let index = index as c_int; unsafe { sqlite3_bind_null(self.raw_statement, index); @@ -149,11 +154,12 @@ impl<'a> Statement<'a> { } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { + // dbg!("bind text", index, text); let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; unsafe { - sqlite3_bind_blob( + sqlite3_bind_text( self.raw_statement, index, text_pointer, @@ -304,10 +310,8 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { unsafe { - sqlite3_finalize(self.raw_statement); - self.connection - .last_error() - .expect("sqlite3 finalize failed for statement :("); + let error = sqlite3_finalize(self.raw_statement); + error_to_result(error).expect("failed error"); }; } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 22d63a0996..3757da5854 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -204,6 +204,13 @@ impl Iterator for RandomCharIter { } } +#[macro_export] +macro_rules! iife { + ($block:block) => { + (|| $block)() + }; +} + #[cfg(test)] mod tests { use super::*; @@ -221,4 +228,18 @@ mod tests { extend_sorted(&mut vec, vec![1000, 19, 17, 9, 5], 8, |a, b| b.cmp(a)); assert_eq!(vec, &[1000, 101, 21, 19, 17, 13, 9, 8]); } + + #[test] + fn test_iife() { + fn option_returning_function() -> Option<()> { + None + } + + let foo = iife!({ + option_returning_function()?; + Some(()) + }); + + assert_eq!(foo, None); + } } From 685bc9fed30046638c0c9fcb84d6d86a26c28def Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 2 Nov 2022 14:37:51 -0700 Subject: [PATCH 139/240] impl bind and column and adjust pane tables --- crates/db/src/pane.rs | 120 ++++++++++++++++--------------------- crates/db/src/workspace.rs | 19 ++---- 2 files changed, 57 insertions(+), 82 deletions(-) diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 5db805012d..816290d870 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,5 +1,4 @@ -use std::str::FromStr; - +use anyhow::bail; use gpui::Axis; use indoc::indoc; use sqlez::{ @@ -16,15 +15,7 @@ use super::Db; pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( "pane", &[indoc! {" -CREATE TABLE dock_panes( - dock_pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - visible INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE pane_groups( -- Inner nodes +CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node @@ -33,43 +24,32 @@ CREATE TABLE pane_groups( -- Inner nodes FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - -CREATE TABLE grouped_panes( -- Leaf nodes +CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - group_id INTEGER NOT NULL, + group_id INTEGER, -- If null, this is a dock pane idx INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; +CREATE TABLE dock_panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + visible INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE +) STRICT; + CREATE TABLE items( - item_id INTEGER PRIMARY KEY, + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + pane_id INTEGER NOT NULL, workspace_id INTEGER NOT NULL, kind TEXT NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE group_items( - workspace_id INTEGER NOT NULL, - pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE dock_items( - workspace_id INTEGER NOT NULL, - dock_pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, dock_pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], ); @@ -154,39 +134,30 @@ pub enum DockAnchor { Expanded, } -impl ToString for DockAnchor { - fn to_string(&self) -> String { - match self { - DockAnchor::Bottom => "Bottom".to_string(), - DockAnchor::Right => "Right".to_string(), - DockAnchor::Expanded => "Expanded".to_string(), - } - } -} - -impl FromStr for DockAnchor { - type Err = anyhow::Error; - - fn from_str(s: &str) -> anyhow::Result { - match s { - "Bottom" => Ok(DockAnchor::Bottom), - "Right" => Ok(DockAnchor::Right), - "Expanded" => Ok(DockAnchor::Expanded), - _ => anyhow::bail!("Not a valid dock anchor"), - } - } -} - impl Bind for DockAnchor { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind(self.to_string(), start_index) + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) } } impl Column for DockAnchor { fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - ::column(statement, start_index) - .and_then(|(str, next_index)| Ok((DockAnchor::from_str(&str)?, next_index))) + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) } } @@ -232,16 +203,29 @@ pub(crate) struct DockRow { impl Bind for DockRow { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { statement.bind( - ( - self.workspace_id, - self.anchor_position.to_string(), - self.visible, - ), + (self.workspace_id, self.anchor_position, self.visible), start_index, ) } } +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((workspace_id, anchor_position, visible), next_index)| { + ( + DockRow { + workspace_id, + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index bf2f765e19..e5fe6d5aee 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -13,6 +13,7 @@ use sqlez::{ bindable::{Bind, Column}, connection::Connection, migrations::Migration, + statement::Statement, }; use crate::pane::SerializedDockPane; @@ -41,25 +42,15 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); -impl WorkspaceId { - pub fn raw_id(&self) -> i64 { - self.0 - } -} - impl Bind for WorkspaceId { - fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { - statement.bind(self.raw_id(), start_index) + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.0.bind(statement, start_index) } } impl Column for WorkspaceId { - fn column( - statement: &mut sqlez::statement::Statement, - start_index: i32, - ) -> Result<(Self, i32)> { - ::column(statement, start_index) - .map(|(id, next_index)| (WorkspaceId(id), next_index)) + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) } } From 19aac6a57f1a006ddc66f502d29854ff091a6377 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 15:20:47 -0700 Subject: [PATCH 140/240] Moved docks to a better position --- crates/db/examples/serialize-pane.rs | 2 +- crates/db/src/items.rs | 3 +- crates/db/src/pane.rs | 168 +++++++++------------- crates/db/src/workspace.rs | 207 ++++++++++++++++++++------- 4 files changed, 226 insertions(+), 154 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 6073476709..ebe88037cd 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,6 +1,6 @@ use std::{fs::File, path::Path}; -use db::pane::{DockAnchor, SerializedDockPane}; +use db::{pane::SerializedDockPane, DockAnchor}; const TEST_FILE: &'static str = "test-db.db"; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index a6497903ac..93251e5eed 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -67,8 +67,7 @@ #[derive(Debug, PartialEq, Eq)] pub struct ItemId { - workspace_id: usize, - item_id: usize, + pub item_id: usize, } // enum SerializedItemKind { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 816290d870..ffb81c4012 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,4 +1,3 @@ -use anyhow::bail; use gpui::Axis; use indoc::indoc; use sqlez::{ @@ -8,7 +7,7 @@ use sqlez::{ }; use util::{iife, ResultExt}; -use crate::{items::ItemId, workspace::WorkspaceId}; +use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor}; use super::Db; @@ -33,14 +32,15 @@ CREATE TABLE panes( FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE dock_panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - visible INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE -) STRICT; +-- MOVE TO WORKSPACE TABLE +// CREATE TABLE dock_panes( +// pane_id INTEGER PRIMARY KEY, +// workspace_id INTEGER NOT NULL, +// anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' +// visible INTEGER NOT NULL, -- Boolean +// FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +// FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE +// ) STRICT; CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique @@ -77,36 +77,34 @@ pub struct PaneId { #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneGroupId { workspace_id: WorkspaceId, - group_id: usize, } impl PaneGroupId { pub fn root(workspace_id: WorkspaceId) -> Self { Self { workspace_id, - group_id: 0, + // group_id: 0, } } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Default)] pub struct SerializedPaneGroup { - group_id: PaneGroupId, axis: Axis, children: Vec, } impl SerializedPaneGroup { - pub fn empty_root(workspace_id: WorkspaceId) -> Self { + pub fn empty_root(_workspace_id: WorkspaceId) -> Self { Self { - group_id: PaneGroupId::root(workspace_id), + // group_id: PaneGroupId::root(workspace_id), axis: Default::default(), children: Default::default(), } } } -struct PaneGroupChildRow { +struct _PaneGroupChildRow { child_pane_id: Option, child_group_id: Option, index: usize, @@ -120,47 +118,11 @@ pub enum PaneGroupChild { #[derive(Debug, PartialEq, Eq)] pub struct SerializedPane { - pane_id: PaneId, - children: Vec, + items: Vec, } //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -227,56 +189,64 @@ impl Column for DockRow { } impl Db { - pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { - let axis = self.get_pane_group_axis(pane_group_id); - let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - for child_row in self.get_pane_group_children(pane_group_id) { - if let Some(child_pane_id) = child_row.child_pane_id { - children.push(( - child_row.index, - PaneGroupChild::Pane(self.get_pane(PaneId { - workspace_id: pane_group_id.workspace_id, - pane_id: child_pane_id, - })), - )); - } else if let Some(child_group_id) = child_row.child_group_id { - children.push(( - child_row.index, - PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - workspace_id: pane_group_id.workspace_id, - group_id: child_group_id, - })), - )); - } - } - children.sort_by_key(|(index, _)| *index); - - SerializedPaneGroup { - group_id: pane_group_id, - axis, - children: children.into_iter().map(|(_, child)| child).collect(), - } + pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + unimplemented!() } - fn get_pane_group_children( + pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + unimplemented!() + // let axis = self.get_pane_group_axis(pane_group_id); + // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + // for child_row in self.get_pane_group_children(pane_group_id) { + // if let Some(child_pane_id) = child_row.child_pane_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Pane(self.get_pane(PaneId { + // workspace_id: pane_group_id.workspace_id, + // pane_id: child_pane_id, + // })), + // )); + // } else if let Some(child_group_id) = child_row.child_group_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + // workspace_id: pane_group_id.workspace_id, + // group_id: child_group_id, + // })), + // )); + // } + // } + // children.sort_by_key(|(index, _)| *index); + + // SerializedPaneGroup { + // group_id: pane_group_id, + // axis, + // children: children.into_iter().map(|(_, child)| child).collect(), + // } + } + + fn _get_pane_group_children( &self, _pane_group_id: PaneGroupId, - ) -> impl Iterator { + ) -> impl Iterator { Vec::new().into_iter() } - fn get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { + fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_pane_splits(&self, _center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits( + &self, + _workspace: &WorkspaceId, + _center_pane_group: &SerializedPaneGroup, + ) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them - // Items garbage collect themselves when dropped } - pub(crate) fn get_pane(&self, _pane_id: PaneId) -> SerializedPane { + pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } @@ -305,9 +275,9 @@ impl Db { #[cfg(test)] mod tests { - use crate::{pane::SerializedPane, Db}; + use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; - use super::{DockAnchor, SerializedDockPane}; + use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; #[test] fn test_basic_dock_pane() { @@ -333,18 +303,18 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); - let center_pane = SerializedPane { - pane_id: crate::pane::PaneId { - workspace_id: workspace.workspace_id, - pane_id: 1, - }, - children: vec![], + // Pane group -> Pane -> 10 , 20 + let center_pane = SerializedPaneGroup { + axis: gpui::Axis::Horizontal, + children: vec![PaneGroupChild::Pane(SerializedPane { + items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], + })], }; - db.save_dock_pane(&workspace.workspace_id, &dock_pane); + db.save_pane_splits(&workspace.workspace_id, ¢er_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); - assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + assert_eq!(new_workspace.center_group, center_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e5fe6d5aee..3f8dc6e498 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{bail, Result}; use std::{ ffi::OsStr, @@ -16,7 +16,7 @@ use sqlez::{ statement::Statement, }; -use crate::pane::SerializedDockPane; +use crate::pane::{SerializedDockPane, SerializedPaneGroup}; use super::Db; @@ -28,7 +28,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, + center_pane_group INTEGER NOT NULL, + dock_anchor TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER NOT NULL, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + FOREIGN KEY(center_pane_group) REFERENCES pane_groups(group_id) ) STRICT; CREATE TABLE worktree_roots( @@ -54,10 +58,71 @@ impl Column for WorkspaceId { } } +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq)] +struct WorkspaceRow { + pub workspace_id: WorkspaceId, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, +} + +impl Column for WorkspaceRow { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((id, anchor, visible), next_index)| { + ( + WorkspaceRow { + workspace_id: id, + dock_anchor: anchor, + dock_visible: visible, + }, + next_index, + ) + }, + ) + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, - // pub center_group: SerializedPaneGroup, + pub center_group: SerializedPaneGroup, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, pub dock_pane: Option, } @@ -70,15 +135,18 @@ impl Db { { // Find the workspace id which is uniquely identified by this set of paths // return it if found - let mut workspace_id = self.workspace_id(worktree_roots); - if workspace_id.is_none() && worktree_roots.len() == 0 { - workspace_id = self.last_workspace_id(); + let mut workspace_row = self.workspace(worktree_roots); + if workspace_row.is_none() && worktree_roots.len() == 0 { + workspace_row = self.last_workspace_id(); } - if let Some(workspace_id) = workspace_id { + if let Some(workspace_row) = workspace_row { SerializedWorkspace { - workspace_id, - dock_pane: self.get_dock_pane(workspace_id), + dock_pane: self.get_dock_pane(workspace_row.workspace_id), + center_group: self.get_center_group(workspace_row.workspace_id), + workspace_id: workspace_row.workspace_id, + dock_anchor: workspace_row.dock_anchor, + dock_visible: workspace_row.dock_visible, } } else { self.make_new_workspace(worktree_roots) @@ -99,7 +167,7 @@ impl Db { Ok(SerializedWorkspace { workspace_id, - dock_pane: None, + ..Default::default() }) }); @@ -112,11 +180,11 @@ impl Db { } } - fn workspace_id

(&self, worktree_roots: &[P]) -> Option + fn workspace

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { - match get_workspace_id(worktree_roots, &self) { + match get_workspace(worktree_roots, &self) { Ok(workspace_id) => workspace_id, Err(err) => { log::error!("Failed to get workspace_id: {}", err); @@ -149,11 +217,10 @@ impl Db { } } - fn last_workspace_id(&self) -> Option { + fn last_workspace_id(&self) -> Option { let res = self - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1") - .and_then(|mut stmt| stmt.maybe_row()) - .map(|row| row.map(|id| WorkspaceId(id))); + .prepare("SELECT workspace_id, dock FROM workspaces ORDER BY timestamp DESC LIMIT 1") + .and_then(|mut stmt| stmt.maybe_row::()); match res { Ok(result) => result, @@ -206,13 +273,13 @@ where P: AsRef + Debug, { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &connection)?; - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { + let preexisting_workspace = get_workspace(worktree_roots, &connection)?; + if let Some(preexisting_workspace) = preexisting_workspace { + if preexisting_workspace.workspace_id != *workspace_id { // Should also delete fields in other tables with cascading updates connection .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_id.0)? + .with_bindings(preexisting_workspace.workspace_id.0)? .exec()?; } } @@ -241,7 +308,7 @@ where Ok(()) } -fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result> +fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> where P: AsRef + Debug, { @@ -315,7 +382,7 @@ where // parameters by number. let query = format!( r#" - SELECT workspace_id + SELECT workspace_id, dock_anchor, dock_visible FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots WHERE worktree_root in {array_bind} AND workspace_id NOT IN (SELECT wt1.workspace_id FROM worktree_roots as wt1 @@ -331,6 +398,7 @@ where // This will only be called on start up and when root workspaces change, no need to waste memory // caching it. let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); @@ -339,11 +407,10 @@ where .map(|root| root.as_ref().as_os_str().as_bytes()) .collect(); - let len = root_bytes.len(); + let num_of_roots = root_bytes.len(); - stmt.with_bindings((root_bytes, len))? - .maybe_row() - .map(|row| row.map(|id| WorkspaceId(id))) + stmt.with_bindings((root_bytes, num_of_roots))? + .maybe_row::() } #[cfg(test)] @@ -401,14 +468,17 @@ mod tests { fn test_empty_worktrees() { let db = Db::open_in_memory("test_empty_worktrees"); - assert_eq!(None, db.workspace_id::(&[])); + assert_eq!(None, db.workspace::(&[])); db.make_new_workspace::(&[]); //ID 1 db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); + assert_eq!( + db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(1) + ); db.update_worktrees::(&WorkspaceId(1), &[]); @@ -416,9 +486,9 @@ mod tests { // call would be semantically correct (as those are the workspaces that // don't have roots) but I'd prefer that this API to either return exactly one // workspace, and None otherwise - assert_eq!(db.workspace_id::(&[]), None,); + assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1))); + assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), @@ -445,23 +515,42 @@ mod tests { db.update_worktrees(workspace_id, entries); } - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]), - Some(WorkspaceId(3)) + WorkspaceId(1), + db.workspace(&["/tmp1"]).unwrap().workspace_id ); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(4))); assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]), - Some(WorkspaceId(5)) + db.workspace(&["/tmp1", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(2) + ); + assert_eq!( + db.workspace(&["/tmp1", "/tmp2", "/tmp3"]) + .unwrap() + .workspace_id, + WorkspaceId(3) + ); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, + WorkspaceId(4) + ); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap() + .workspace_id, + WorkspaceId(5) + ); + assert_eq!( + db.workspace(&["/tmp2", "/tmp4"]).unwrap().workspace_id, + WorkspaceId(6) + ); + assert_eq!( + db.workspace(&["/tmp2"]).unwrap().workspace_id, + WorkspaceId(7) ); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp4"]), Some(WorkspaceId(6))); - assert_eq!(db.workspace_id(&["/tmp2"]), Some(WorkspaceId(7))); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]), None); - assert_eq!(db.workspace_id(&["/tmp5"]), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); } #[test] @@ -479,13 +568,21 @@ mod tests { db.update_worktrees(workspace_id, entries); } - assert_eq!(db.workspace_id(&["/tmp2"]), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), None); - assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(2))); + assert_eq!(db.workspace(&["/tmp2"]), None); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); assert_eq!( - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]), - Some(WorkspaceId(3)) + db.workspace(&["/tmp"]).unwrap().workspace_id, + WorkspaceId(1) + ); + assert_eq!( + db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(2) + ); + assert_eq!( + db.workspace(&["/tmp", "/tmp2", "/tmp3"]) + .unwrap() + .workspace_id, + WorkspaceId(3) ); } @@ -526,15 +623,21 @@ mod tests { db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, + WorkspaceId(2) + ); // And that workspace 1 was untouched - assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); + assert_eq!( + db.workspace(&["/tmp"]).unwrap().workspace_id, + WorkspaceId(1) + ); // And that workspace 2 is no longer registered under these roots - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), None); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(Some(WorkspaceId(2)), db.last_workspace_id()); + assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( From d492cbced9e25518440d8eaba6a638f6bdf92cee Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:26:43 -0700 Subject: [PATCH 141/240] WIP --- crates/db/src/db.rs | 9 -- crates/db/src/pane.rs | 14 +- crates/db/src/workspace.rs | 284 ++++++++++--------------------------- 3 files changed, 78 insertions(+), 229 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 48a025112a..6077bdeec1 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -70,12 +70,3 @@ impl Db { self.backup_main(&destination) } } - -impl Drop for Db { - fn drop(&mut self) { - self.exec(indoc! {" - PRAGMA analysis_limit=500; - PRAGMA optimize"}) - .ok(); - } -} diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index ffb81c4012..4904f515b9 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -32,16 +32,6 @@ CREATE TABLE panes( FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; --- MOVE TO WORKSPACE TABLE -// CREATE TABLE dock_panes( -// pane_id INTEGER PRIMARY KEY, -// workspace_id INTEGER NOT NULL, -// anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' -// visible INTEGER NOT NULL, -- Boolean -// FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -// FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE -// ) STRICT; - CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique pane_id INTEGER NOT NULL, @@ -313,8 +303,8 @@ mod tests { db.save_pane_splits(&workspace.workspace_id, ¢er_pane); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + // let new_workspace = db.workspace_for_roots(&["/tmp"]); - assert_eq!(new_workspace.center_group, center_pane); + // assert_eq!(new_workspace.center_group, center_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 3f8dc6e498..03ca321b5d 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,7 @@ -use anyhow::{bail, Result}; +use anyhow::{bail, Context, Result}; +use util::{iife, ResultExt}; use std::{ - ffi::OsStr, fmt::Debug, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, @@ -28,11 +28,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, - center_pane_group INTEGER NOT NULL, - dock_anchor TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER NOT NULL, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - FOREIGN KEY(center_pane_group) REFERENCES pane_groups(group_id) ) STRICT; CREATE TABLE worktree_roots( @@ -93,43 +91,21 @@ impl Column for DockAnchor { } } -#[derive(Debug, PartialEq, Eq)] -struct WorkspaceRow { - pub workspace_id: WorkspaceId, - pub dock_anchor: DockAnchor, - pub dock_visible: bool, -} - -impl Column for WorkspaceRow { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((id, anchor, visible), next_index)| { - ( - WorkspaceRow { - workspace_id: id, - dock_anchor: anchor, - dock_visible: visible, - }, - next_index, - ) - }, - ) - } -} +type WorkspaceRow = (WorkspaceId, DockAnchor, bool); #[derive(Default, Debug)] pub struct SerializedWorkspace { - pub workspace_id: WorkspaceId, + pub worktree_roots: Vec>, pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, - pub dock_pane: Option, + pub dock_pane: SerializedDockPane, } impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { @@ -140,57 +116,23 @@ impl Db { workspace_row = self.last_workspace_id(); } - if let Some(workspace_row) = workspace_row { - SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_row.workspace_id), - center_group: self.get_center_group(workspace_row.workspace_id), - workspace_id: workspace_row.workspace_id, - dock_anchor: workspace_row.dock_anchor, - dock_visible: workspace_row.dock_visible, - } - } else { - self.make_new_workspace(worktree_roots) - } - } - - fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace - where - P: AsRef + Debug, - { - let res = self.with_savepoint("make_new_workspace", |conn| { - let workspace_id = WorkspaceId( - conn.prepare("INSERT INTO workspaces DEFAULT VALUES")? - .insert()?, - ); - - update_worktree_roots(conn, &workspace_id, worktree_roots)?; - - Ok(SerializedWorkspace { - workspace_id, - ..Default::default() - }) - }); - - match res { - Ok(serialized_workspace) => serialized_workspace, - Err(err) => { - log::error!("Failed to insert new workspace into DB: {}", err); - Default::default() - } - } + workspace_row.and_then( + |(workspace_id, dock_anchor, dock_visible)| SerializedWorkspace { + dock_pane: self.get_dock_pane(workspace_id)?, + center_group: self.get_center_group(workspace_id), + dock_anchor, + dock_visible, + }, + ) } fn workspace

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { - match get_workspace(worktree_roots, &self) { - Ok(workspace_id) => workspace_id, - Err(err) => { - log::error!("Failed to get workspace_id: {}", err); - None - } - } + get_workspace(worktree_roots, &self) + .log_err() + .unwrap_or_default() } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -204,63 +146,35 @@ impl Db { where P: AsRef + Debug, { - match self.with_savepoint("update_worktrees", |conn| { + self.with_savepoint("update_worktrees", |conn| { update_worktree_roots(conn, workspace_id, worktree_roots) - }) { - Ok(_) => {} - Err(err) => log::error!( - "Failed to update workspace {:?} with roots {:?}, error: {}", - workspace_id, - worktree_roots, - err - ), - } + }) + .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") + .log_err(); } fn last_workspace_id(&self) -> Option { - let res = self - .prepare("SELECT workspace_id, dock FROM workspaces ORDER BY timestamp DESC LIMIT 1") - .and_then(|mut stmt| stmt.maybe_row::()); - - match res { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get last workspace id, err: {}", err); - return None; - } - } + iife! ({ + self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? + .maybe_row::() + }).log_err()? } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { + pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { - let rows = conn - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? - .with_bindings(limit)? - .rows::()?; - - let ids = rows.iter().map(|row| WorkspaceId(*row)); - - let mut result = Vec::new(); - let mut stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - for workspace_id in ids { - let roots = stmt - .with_bindings(workspace_id.0)? - .rows::>()? - .iter() - .map(|row| PathBuf::from(OsStr::from_bytes(&row)).into()) - .collect(); - result.push((workspace_id, roots)) - } - Ok(result) - }) - .unwrap_or_else(|err| { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() + conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + .with_bindings(limit)? + .rows::()? + .iter() + .map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::()) + .collect::>() }) + .log_err() + .unwrap_or_default() } } @@ -274,12 +188,12 @@ where { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. let preexisting_workspace = get_workspace(worktree_roots, &connection)?; - if let Some(preexisting_workspace) = preexisting_workspace { - if preexisting_workspace.workspace_id != *workspace_id { + if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { + if preexisting_workspace_id != *workspace_id { // Should also delete fields in other tables with cascading updates connection .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace.workspace_id.0)? + .with_bindings(preexisting_workspace_id)? .exec()?; } } @@ -319,16 +233,13 @@ where // Prepare the array binding string. SQL doesn't have syntax for this, so // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - // This uses ?NNN for numbered placeholder syntax - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } - } - array_binding_stmt.push(')'); + let array_binding_stmt = format!( + "({})", + (0..worktree_roots.len()) + .map(|index| format!("?{}", index + 1)) + .collect::>() + .join(", ") + ); // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: @@ -382,15 +293,17 @@ where // parameters by number. let query = format!( r#" - SELECT workspace_id, dock_anchor, dock_visible - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? + SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible + FROM (SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ?) as matching_workspace + JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id "#, array_bind = array_binding_stmt ); @@ -416,12 +329,7 @@ where #[cfg(test)] mod tests { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - thread::sleep, - time::Duration, - }; + use std::{path::PathBuf, thread::sleep, time::Duration}; use crate::Db; @@ -475,10 +383,7 @@ mod tests { db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!( - db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(1) - ); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); db.update_worktrees::(&WorkspaceId(1), &[]); @@ -488,11 +393,11 @@ mod tests { // workspace, and None otherwise assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(1)); + assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), - vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),], + vec![Vec::::new(), Vec::::new()], ) } @@ -515,38 +420,19 @@ mod tests { db.update_worktrees(workspace_id, entries); } + assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); + assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); assert_eq!( - WorkspaceId(1), - db.workspace(&["/tmp1"]).unwrap().workspace_id - ); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(2) - ); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2", "/tmp3"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(3) ); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); assert_eq!( - db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, - WorkspaceId(4) - ); - assert_eq!( - db.workspace(&["/tmp2", "/tmp3", "/tmp4"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, WorkspaceId(5) ); - assert_eq!( - db.workspace(&["/tmp2", "/tmp4"]).unwrap().workspace_id, - WorkspaceId(6) - ); - assert_eq!( - db.workspace(&["/tmp2"]).unwrap().workspace_id, - WorkspaceId(7) - ); + assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); + assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); assert_eq!(db.workspace(&["/tmp5"]), None); @@ -570,26 +456,14 @@ mod tests { assert_eq!(db.workspace(&["/tmp2"]), None); assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); + assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); assert_eq!( - db.workspace(&["/tmp"]).unwrap().workspace_id, - WorkspaceId(1) - ); - assert_eq!( - db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(2) - ); - assert_eq!( - db.workspace(&["/tmp", "/tmp2", "/tmp3"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(3) ); } - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - #[test] fn test_tricky_overlapping_updates() { // DB state: @@ -623,30 +497,24 @@ mod tests { db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!( - db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, - WorkspaceId(2) - ); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); // And that workspace 1 was untouched - assert_eq!( - db.workspace(&["/tmp"]).unwrap().workspace_id, - WorkspaceId(1) - ); + assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); // And that workspace 2 is no longer registered under these roots assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(2)); + assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( recent_workspaces.get(0).unwrap(), - &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) + &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] ); assert_eq!( recent_workspaces.get(1).unwrap(), - &(WorkspaceId(1), vec![arc_path("/tmp")]) + &vec![PathBuf::from("/tmp")] ); } } From b552f1788c7282f6c75d7476817770ed775b36a4 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:31:36 -0700 Subject: [PATCH 142/240] WIP2 --- crates/db/src/workspace.rs | 115 +++++++++++++++---------------------- 1 file changed, 46 insertions(+), 69 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 03ca321b5d..bec9f98823 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -5,7 +5,6 @@ use std::{ fmt::Debug, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, - sync::Arc, }; use indoc::indoc; @@ -95,7 +94,6 @@ type WorkspaceRow = (WorkspaceId, DockAnchor, bool); #[derive(Default, Debug)] pub struct SerializedWorkspace { - pub worktree_roots: Vec>, pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, @@ -105,40 +103,30 @@ pub struct SerializedWorkspace { impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { // Find the workspace id which is uniquely identified by this set of paths // return it if found - let mut workspace_row = self.workspace(worktree_roots); + let mut workspace_row = get_workspace(worktree_roots, &self) + .log_err() + .unwrap_or_default(); if workspace_row.is_none() && worktree_roots.len() == 0 { - workspace_row = self.last_workspace_id(); + workspace_row = self.last_workspace(); } - workspace_row.and_then( - |(workspace_id, dock_anchor, dock_visible)| SerializedWorkspace { + workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { + Some(SerializedWorkspace { dock_pane: self.get_dock_pane(workspace_id)?, center_group: self.get_center_group(workspace_id), dock_anchor, dock_visible, - }, - ) + }) + }) } - fn workspace

(&self, worktree_roots: &[P]) -> Option - where - P: AsRef + Debug, - { - get_workspace(worktree_roots, &self) - .log_err() - .unwrap_or_default() - } - - // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { - // unimplemented!() - // } - /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table @@ -147,13 +135,46 @@ impl Db { P: AsRef + Debug, { self.with_savepoint("update_worktrees", |conn| { - update_worktree_roots(conn, workspace_id, worktree_roots) + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_workspace = get_workspace(worktree_roots, &conn)?; + if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { + if preexisting_workspace_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(preexisting_workspace_id)? + .exec()?; + } + } + + conn.prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? + .with_bindings(workspace_id.0)? + .exec()?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); + + conn.prepare( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + )? + .with_bindings((workspace_id.0, path))? + .exec()?; + } + + conn.prepare( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + )? + .with_bindings(workspace_id.0)? + .exec()?; + + Ok(()) }) .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") .log_err(); } - fn last_workspace_id(&self) -> Option { + fn last_workspace(&self) -> Option { iife! ({ self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? .maybe_row::() @@ -178,50 +199,6 @@ impl Db { } } -fn update_worktree_roots

( - connection: &Connection, - workspace_id: &WorkspaceId, - worktree_roots: &[P], -) -> Result<()> -where - P: AsRef + Debug, -{ - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_workspace = get_workspace(worktree_roots, &connection)?; - if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { - if preexisting_workspace_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - connection - .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace_id)? - .exec()?; - } - } - - connection - .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; - - for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - - connection - .prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? - .with_bindings((workspace_id.0, path))? - .exec()?; - } - - connection - .prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; - - Ok(()) -} - fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> where P: AsRef + Debug, @@ -393,7 +370,7 @@ mod tests { // workspace, and None otherwise assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(1)); + assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), @@ -505,7 +482,7 @@ mod tests { // And that workspace 2 is no longer registered under these roots assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(2)); + assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( From aa7b909b7b63dded4702badaa4d0f92a7d3364cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:36:40 -0700 Subject: [PATCH 143/240] WIP3 --- crates/db/src/workspace.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index bec9f98823..10f99df2af 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -41,7 +41,7 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( ); #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub struct WorkspaceId(i64); +pub(crate) struct WorkspaceId(i64); impl Bind for WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { @@ -103,7 +103,6 @@ pub struct SerializedWorkspace { impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, @@ -114,7 +113,11 @@ impl Db { .log_err() .unwrap_or_default(); if workspace_row.is_none() && worktree_roots.len() == 0 { - workspace_row = self.last_workspace(); + workspace_row = self.prepare( + "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" + ).and_then(|mut stmt| stmt.maybe_row::()) + .log_err() + .flatten() } workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { @@ -127,6 +130,8 @@ impl Db { }) } + /// TODO: Change to be 'update workspace' and to serialize the whole workspace in one go. + /// /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table @@ -174,13 +179,6 @@ impl Db { .log_err(); } - fn last_workspace(&self) -> Option { - iife! ({ - self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? - .maybe_row::() - }).log_err()? - } - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { From eb0598dac2dfce10100b8b9893c61e70d3c35574 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 2 Nov 2022 18:09:35 -0700 Subject: [PATCH 144/240] more refactoring and slightly better api --- crates/db/src/db.rs | 4 +- crates/db/src/pane.rs | 310 --------------- crates/db/src/workspace.rs | 531 +++++++++++-------------- crates/db/src/{ => workspace}/items.rs | 5 - crates/db/src/workspace/model.rs | 173 ++++++++ crates/db/src/workspace/pane.rs | 169 ++++++++ crates/sqlez/src/bindable.rs | 23 ++ 7 files changed, 588 insertions(+), 627 deletions(-) delete mode 100644 crates/db/src/pane.rs rename crates/db/src/{ => workspace}/items.rs (97%) create mode 100644 crates/db/src/workspace/model.rs create mode 100644 crates/db/src/workspace/pane.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6077bdeec1..07670e309a 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,7 +1,5 @@ -pub mod items; pub mod kvp; mod migrations; -pub mod pane; pub mod workspace; use std::fs; @@ -11,10 +9,10 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; use kvp::KVP_MIGRATION; -use pane::PANE_MIGRATIONS; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; +use workspace::pane::PANE_MIGRATIONS; pub use workspace::*; #[derive(Clone)] diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs deleted file mode 100644 index 4904f515b9..0000000000 --- a/crates/db/src/pane.rs +++ /dev/null @@ -1,310 +0,0 @@ -use gpui::Axis; -use indoc::indoc; -use sqlez::{ - bindable::{Bind, Column}, - migrations::Migration, - statement::Statement, -}; -use util::{iife, ResultExt}; - -use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor}; - -use super::Db; - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" -CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - group_id INTEGER, -- If null, this is a dock pane - idx INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - pane_id INTEGER NOT NULL, - workspace_id INTEGER NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) -) STRICT; -"}], -); - -// We have an many-branched, unbalanced tree with three types: -// Pane Groups -// Panes -// Items - -// The root is always a Pane Group -// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children -// Panes can have 0 or more items as children -// Panes can be their own root -// Items cannot have children -// References pointing down is hard (SQL doesn't like arrays) -// References pointing up is easy (1-1 item / parent relationship) but is harder to query -// - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneId { - workspace_id: WorkspaceId, - pane_id: usize, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneGroupId { - workspace_id: WorkspaceId, -} - -impl PaneGroupId { - pub fn root(workspace_id: WorkspaceId) -> Self { - Self { - workspace_id, - // group_id: 0, - } - } -} - -#[derive(Debug, PartialEq, Eq, Default)] -pub struct SerializedPaneGroup { - axis: Axis, - children: Vec, -} - -impl SerializedPaneGroup { - pub fn empty_root(_workspace_id: WorkspaceId) -> Self { - Self { - // group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - } - } -} - -struct _PaneGroupChildRow { - child_pane_id: Option, - child_group_id: Option, - index: usize, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum PaneGroupChild { - Pane(SerializedPane), - Group(SerializedPaneGroup), -} - -#[derive(Debug, PartialEq, Eq)] -pub struct SerializedPane { - items: Vec, -} - -//********* CURRENTLY IN USE TYPES: ********* - -#[derive(Default, Debug, PartialEq, Eq)] -pub struct SerializedDockPane { - pub anchor_position: DockAnchor, - pub visible: bool, -} - -impl SerializedDockPane { - fn to_row(&self, workspace: &WorkspaceId) -> DockRow { - DockRow { - workspace_id: *workspace, - anchor_position: self.anchor_position, - visible: self.visible, - } - } -} - -impl Column for SerializedDockPane { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(DockAnchor, bool) as Column>::column(statement, start_index).map( - |((anchor_position, visible), next_index)| { - ( - SerializedDockPane { - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Default, Debug, PartialEq, Eq)] -pub(crate) struct DockRow { - workspace_id: WorkspaceId, - anchor_position: DockAnchor, - visible: bool, -} - -impl Bind for DockRow { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind( - (self.workspace_id, self.anchor_position, self.visible), - start_index, - ) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((workspace_id, anchor_position, visible), next_index)| { - ( - DockRow { - workspace_id, - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -impl Db { - pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { - unimplemented!() - } - - pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { - unimplemented!() - // let axis = self.get_pane_group_axis(pane_group_id); - // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - // for child_row in self.get_pane_group_children(pane_group_id) { - // if let Some(child_pane_id) = child_row.child_pane_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Pane(self.get_pane(PaneId { - // workspace_id: pane_group_id.workspace_id, - // pane_id: child_pane_id, - // })), - // )); - // } else if let Some(child_group_id) = child_row.child_group_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - // workspace_id: pane_group_id.workspace_id, - // group_id: child_group_id, - // })), - // )); - // } - // } - // children.sort_by_key(|(index, _)| *index); - - // SerializedPaneGroup { - // group_id: pane_group_id, - // axis, - // children: children.into_iter().map(|(_, child)| child).collect(), - // } - } - - fn _get_pane_group_children( - &self, - _pane_group_id: PaneGroupId, - ) -> impl Iterator { - Vec::new().into_iter() - } - - fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { - unimplemented!(); - } - - pub fn save_pane_splits( - &self, - _workspace: &WorkspaceId, - _center_pane_group: &SerializedPaneGroup, - ) { - // Delete the center pane group for this workspace and any of its children - // Generate new pane group IDs as we go through - // insert them - } - - pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { - unimplemented!(); - } - - pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - iife!({ - self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .with_bindings(workspace)? - .maybe_row::() - }) - .log_err() - .flatten() - } - - pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { - iife!({ - self.prepare( - "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", - )? - .with_bindings(dock_pane.to_row(workspace))? - .insert() - }) - .log_err(); - } -} - -#[cfg(test)] -mod tests { - - use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; - - use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; - - #[test] - fn test_basic_dock_pane() { - let db = Db::open_in_memory("basic_dock_pane"); - - let workspace = db.workspace_for_roots(&["/tmp"]); - - let dock_pane = SerializedDockPane { - anchor_position: DockAnchor::Expanded, - visible: true, - }; - - db.save_dock_pane(&workspace.workspace_id, &dock_pane); - - let new_workspace = db.workspace_for_roots(&["/tmp"]); - - assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); - } - - #[test] - fn test_dock_simple_split() { - let db = Db::open_in_memory("simple_split"); - - let workspace = db.workspace_for_roots(&["/tmp"]); - - // Pane group -> Pane -> 10 , 20 - let center_pane = SerializedPaneGroup { - axis: gpui::Axis::Horizontal, - children: vec![PaneGroupChild::Pane(SerializedPane { - items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], - })], - }; - - db.save_pane_splits(&workspace.workspace_id, ¢er_pane); - - // let new_workspace = db.workspace_for_roots(&["/tmp"]); - - // assert_eq!(new_workspace.center_group, center_pane); - } -} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 10f99df2af..4e65c9788c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,23 +1,14 @@ -use anyhow::{bail, Context, Result}; -use util::{iife, ResultExt}; +mod items; +pub mod model; +pub(crate) mod pane; -use std::{ - fmt::Debug, - os::unix::prelude::OsStrExt, - path::{Path, PathBuf}, -}; +use anyhow::{Context, Result}; +use util::ResultExt; -use indoc::indoc; -use sqlez::{ - bindable::{Bind, Column}, - connection::Connection, - migrations::Migration, - statement::Statement, -}; +use std::path::{Path, PathBuf}; -use crate::pane::{SerializedDockPane, SerializedPaneGroup}; - -use super::Db; +use indoc::{formatdoc, indoc}; +use sqlez::{connection::Connection, migrations::Migration}; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented @@ -37,87 +28,34 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) - ) STRICT;"}], + ) STRICT; + "}], ); -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub(crate) struct WorkspaceId(i64); +use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; -impl Bind for WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - self.0.bind(statement, start_index) - } -} - -impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) - } -} - -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - -type WorkspaceRow = (WorkspaceId, DockAnchor, bool); - -#[derive(Default, Debug)] -pub struct SerializedWorkspace { - pub center_group: SerializedPaneGroup, - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub dock_pane: SerializedDockPane, -} +use super::Db; impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option - where - P: AsRef + Debug, - { + pub fn workspace_for_roots>( + &self, + worktree_roots: &[P], + ) -> Option { // Find the workspace id which is uniquely identified by this set of paths // return it if found let mut workspace_row = get_workspace(worktree_roots, &self) .log_err() .unwrap_or_default(); + if workspace_row.is_none() && worktree_roots.len() == 0 { + // Return last workspace if no roots passed workspace_row = self.prepare( "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" ).and_then(|mut stmt| stmt.maybe_row::()) .log_err() - .flatten() + .flatten(); } workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { @@ -130,66 +68,56 @@ impl Db { }) } - /// TODO: Change to be 'update workspace' and to serialize the whole workspace in one go. - /// - /// Updates the open paths for the given workspace id. Will garbage collect items from - /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps - /// in the workspace id table - pub fn update_worktrees

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) - where - P: AsRef + Debug, - { + /// Saves a workspace using the worktree roots. Will garbage collect any workspaces + /// that used this workspace previously + pub fn save_workspace>( + &self, + worktree_roots: &[P], + workspace: SerializedWorkspace, + ) { self.with_savepoint("update_worktrees", |conn| { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_workspace = get_workspace(worktree_roots, &conn)?; - if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { - if preexisting_workspace_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace_id)? - .exec()?; - } + if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? { + // Should also delete fields in other tables with cascading updates and insert + // new entry + conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(id_to_delete)? + .exec()?; } - conn.prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; + // Insert new workspace into workspaces table if none were found + let workspace_id = WorkspaceId( + conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")? + .with_bindings((workspace.dock_anchor, workspace.dock_visible))? + .insert()?, + ); + // Write worktree_roots with new workspace_id for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - conn.prepare( "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", )? - .with_bindings((workspace_id.0, path))? + .with_bindings((workspace_id, root.as_ref()))? .exec()?; } - conn.prepare( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - )? - .with_bindings(workspace_id.0)? - .exec()?; - Ok(()) }) - .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") + .context("Update workspace with roots {worktree_roots:?}") .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { - let mut stmt = + let mut roots_by_id = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? .iter() - .map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::()) + .map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::()) .collect::>() }) .log_err() @@ -197,25 +125,15 @@ impl Db { } } -fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> -where - P: AsRef + Debug, -{ +fn get_workspace>( + worktree_roots: &[P], + connection: &Connection, +) -> Result> { // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); } - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let array_binding_stmt = format!( - "({})", - (0..worktree_roots.len()) - .map(|index| format!("?{}", index + 1)) - .collect::>() - .join(", ") - ); - // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -266,230 +184,225 @@ where // Note: due to limitations in SQLite's query binding, we have to generate the prepared // statement with string substitution (the {array_bind}) below, and then bind the // parameters by number. - let query = format!( - r#" - SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible - FROM (SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ?) as matching_workspace - JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id - "#, - array_bind = array_binding_stmt - ); - - // This will only be called on start up and when root workspaces change, no need to waste memory - // caching it. - let mut stmt = connection.prepare(&query)?; - - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - - let root_bytes: Vec<&[u8]> = worktree_roots - .iter() - .map(|root| root.as_ref().as_os_str().as_bytes()) - .collect(); - - let num_of_roots = root_bytes.len(); - - stmt.with_bindings((root_bytes, num_of_roots))? + connection + .prepare(formatdoc! {" + SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible + FROM (SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in ({roots}) AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots})) + GROUP BY workspace_id) + WHERE num_matching = ?) as matching_workspace + JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id", + roots = + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + (0..worktree_roots.len()) + .map(|index| format!("?{}", index + 1)) + .collect::>() + .join(", ") + })? + .with_bindings(( + worktree_roots + .into_iter() + .map(|p| p.as_ref()) + .collect::>(), + worktree_roots.len(), + ))? .maybe_row::() } #[cfg(test)] mod tests { - use std::{path::PathBuf, thread::sleep, time::Duration}; + // use std::{path::PathBuf, thread::sleep, time::Duration}; - use crate::Db; + // use crate::Db; - use super::WorkspaceId; + // use super::WorkspaceId; - #[test] - fn test_new_worktrees_for_roots() { - env_logger::init(); - let db = Db::open_in_memory("test_new_worktrees_for_roots"); + // #[test] + // fn test_workspace_saving() { + // env_logger::init(); + // let db = Db::open_in_memory("test_new_worktrees_for_roots"); - // Test creation in 0 case - let workspace_1 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + // // Test nothing returned with no roots at first + // assert_eq!(db.workspace_for_roots::(&[]), None); - // Test pulling from recent workspaces - let workspace_1 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + // // Test creation + // let workspace_1 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - // Ensure the timestamps are different - sleep(Duration::from_secs(1)); - db.make_new_workspace::(&[]); + // // Ensure the timestamps are different + // sleep(Duration::from_secs(1)); + // db.make_new_workspace::(&[]); - // Test pulling another value from recent workspaces - let workspace_2 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + // // Test pulling another value from recent workspaces + // let workspace_2 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); - // Ensure the timestamps are different - sleep(Duration::from_secs(1)); + // // Ensure the timestamps are different + // sleep(Duration::from_secs(1)); - // Test creating a new workspace that doesn't exist already - let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // // Test creating a new workspace that doesn't exist already + // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - // Make sure it's in the recent workspaces.... - let workspace_3 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // // Make sure it's in the recent workspaces.... + // let workspace_3 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - // And that it can be pulled out again - let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - } + // // And that it can be pulled out again + // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // } - #[test] - fn test_empty_worktrees() { - let db = Db::open_in_memory("test_empty_worktrees"); + // #[test] + // fn test_empty_worktrees() { + // let db = Db::open_in_memory("test_empty_worktrees"); - assert_eq!(None, db.workspace::(&[])); + // assert_eq!(None, db.workspace::(&[])); - db.make_new_workspace::(&[]); //ID 1 - db.make_new_workspace::(&[]); //ID 2 - db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); + // db.make_new_workspace::(&[]); //ID 1 + // db.make_new_workspace::(&[]); //ID 2 + // db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - // Sanity check - assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); + // // Sanity check + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); - db.update_worktrees::(&WorkspaceId(1), &[]); + // db.update_worktrees::(&WorkspaceId(1), &[]); - // Make sure 'no worktrees' fails correctly. returning [1, 2] from this - // call would be semantically correct (as those are the workspaces that - // don't have roots) but I'd prefer that this API to either return exactly one - // workspace, and None otherwise - assert_eq!(db.workspace::(&[]), None,); + // // Make sure 'no worktrees' fails correctly. returning [1, 2] from this + // // call would be semantically correct (as those are the workspaces that + // // don't have roots) but I'd prefer that this API to either return exactly one + // // workspace, and None otherwise + // assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); + // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); - assert_eq!( - db.recent_workspaces(2), - vec![Vec::::new(), Vec::::new()], - ) - } + // assert_eq!( + // db.recent_workspaces(2), + // vec![Vec::::new(), Vec::::new()], + // ) + // } - #[test] - fn test_more_workspace_ids() { - let data = &[ - (WorkspaceId(1), vec!["/tmp1"]), - (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), - (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), - (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), - (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), - (WorkspaceId(7), vec!["/tmp2"]), - ]; + // #[test] + // fn test_more_workspace_ids() { + // let data = &[ + // (WorkspaceId(1), vec!["/tmp1"]), + // (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), + // (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), + // (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), + // (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), + // (WorkspaceId(7), vec!["/tmp2"]), + // ]; - let db = Db::open_in_memory("test_more_workspace_ids"); + // let db = Db::open_in_memory("test_more_workspace_ids"); - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } - assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); - assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, - WorkspaceId(3) - ); - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); - assert_eq!( - db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, - WorkspaceId(5) - ); - assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); - assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); + // assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); + // assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); + // assert_eq!( + // db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, + // WorkspaceId(3) + // ); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); + // assert_eq!( + // db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, + // WorkspaceId(5) + // ); + // assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); + // assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); - assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); - assert_eq!(db.workspace(&["/tmp5"]), None); - assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); - } + // assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); + // assert_eq!(db.workspace(&["/tmp5"]), None); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); + // } - #[test] - fn test_detect_workspace_id() { - let data = &[ - (WorkspaceId(1), vec!["/tmp"]), - (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), - ]; + // #[test] + // fn test_detect_workspace_id() { + // let data = &[ + // (WorkspaceId(1), vec!["/tmp"]), + // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), + // ]; - let db = Db::open_in_memory("test_detect_workspace_id"); + // let db = Db::open_in_memory("test_detect_workspace_id"); - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } - assert_eq!(db.workspace(&["/tmp2"]), None); - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); - assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); - assert_eq!( - db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, - WorkspaceId(3) - ); - } + // assert_eq!(db.workspace(&["/tmp2"]), None); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); + // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); + // assert_eq!( + // db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, + // WorkspaceId(3) + // ); + // } - #[test] - fn test_tricky_overlapping_updates() { - // DB state: - // (/tree) -> ID: 1 - // (/tree, /tree2) -> ID: 2 - // (/tree2, /tree3) -> ID: 3 + // #[test] + // fn test_tricky_overlapping_updates() { + // // DB state: + // // (/tree) -> ID: 1 + // // (/tree, /tree2) -> ID: 2 + // // (/tree2, /tree3) -> ID: 3 - // -> User updates 2 to: (/tree2, /tree3) + // // -> User updates 2 to: (/tree2, /tree3) - // DB state: - // (/tree) -> ID: 1 - // (/tree2, /tree3) -> ID: 2 - // Get rid of 3 for garbage collection + // // DB state: + // // (/tree) -> ID: 1 + // // (/tree2, /tree3) -> ID: 2 + // // Get rid of 3 for garbage collection - let data = &[ - (WorkspaceId(1), vec!["/tmp"]), - (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), - ]; + // let data = &[ + // (WorkspaceId(1), vec!["/tmp"]), + // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), + // ]; - let db = Db::open_in_memory("test_tricky_overlapping_update"); + // let db = Db::open_in_memory("test_tricky_overlapping_update"); - // Load in the test data - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } + // // Load in the test data + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } - sleep(Duration::from_secs(1)); - // Execute the update - db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); + // sleep(Duration::from_secs(1)); + // // Execute the update + // db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); - // Make sure that workspace 3 doesn't exist - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); + // // Make sure that workspace 3 doesn't exist + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); - // And that workspace 1 was untouched - assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + // // And that workspace 1 was untouched + // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - // And that workspace 2 is no longer registered under these roots - assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); + // // And that workspace 2 is no longer registered under these roots + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); + // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); - let recent_workspaces = db.recent_workspaces(10); - assert_eq!( - recent_workspaces.get(0).unwrap(), - &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] - ); - assert_eq!( - recent_workspaces.get(1).unwrap(), - &vec![PathBuf::from("/tmp")] - ); - } + // let recent_workspaces = db.recent_workspaces(10); + // assert_eq!( + // recent_workspaces.get(0).unwrap(), + // &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] + // ); + // assert_eq!( + // recent_workspaces.get(1).unwrap(), + // &vec![PathBuf::from("/tmp")] + // ); + // } } diff --git a/crates/db/src/items.rs b/crates/db/src/workspace/items.rs similarity index 97% rename from crates/db/src/items.rs rename to crates/db/src/workspace/items.rs index 93251e5eed..c3405974d5 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/workspace/items.rs @@ -65,11 +65,6 @@ // ) STRICT; // "; -#[derive(Debug, PartialEq, Eq)] -pub struct ItemId { - pub item_id: usize, -} - // enum SerializedItemKind { // Editor, // Diagnostics, diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs new file mode 100644 index 0000000000..148b6b76ca --- /dev/null +++ b/crates/db/src/workspace/model.rs @@ -0,0 +1,173 @@ +use anyhow::{bail, Result}; + +use gpui::Axis; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; + +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +pub(crate) struct WorkspaceId(pub(crate) i64); + +impl Bind for WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.0.bind(statement, start_index) + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) + } +} + +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + +pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); + +#[derive(Default, Debug)] +pub struct SerializedWorkspace { + pub center_group: SerializedPaneGroup, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub dock_pane: SerializedDockPane, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct PaneId { + workspace_id: WorkspaceId, + pane_id: usize, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct PaneGroupId { + workspace_id: WorkspaceId, +} + +impl PaneGroupId { + pub fn root(workspace_id: WorkspaceId) -> Self { + Self { + workspace_id, + // group_id: 0, + } + } +} + +#[derive(Debug, PartialEq, Eq, Default)] +pub struct SerializedPaneGroup { + axis: Axis, + children: Vec, +} + +impl SerializedPaneGroup { + pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self { + Self { + // group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + } + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +pub struct SerializedDockPane { + pub anchor_position: DockAnchor, + pub visible: bool, +} + +impl SerializedDockPane { + fn to_row(&self, workspace: &WorkspaceId) -> DockRow { + DockRow { + workspace_id: *workspace, + anchor_position: self.anchor_position, + visible: self.visible, + } + } +} + +impl Column for SerializedDockPane { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(DockAnchor, bool) as Column>::column(statement, start_index).map( + |((anchor_position, visible), next_index)| { + ( + SerializedDockPane { + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct DockRow { + workspace_id: WorkspaceId, + anchor_position: DockAnchor, + visible: bool, +} + +impl Bind for DockRow { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind( + (self.workspace_id, self.anchor_position, self.visible), + start_index, + ) + } +} + +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((workspace_id, anchor_position, visible), next_index)| { + ( + DockRow { + workspace_id, + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ItemId { + pub item_id: usize, +} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs new file mode 100644 index 0000000000..3c007fd402 --- /dev/null +++ b/crates/db/src/workspace/pane.rs @@ -0,0 +1,169 @@ +use gpui::Axis; +use indoc::indoc; +use sqlez::migrations::Migration; +use util::{iife, ResultExt}; + +use super::{ + model::{PaneGroupId, PaneId, SerializedDockPane, SerializedPaneGroup, WorkspaceId}, + Db, +}; + +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER, -- If null, this is a dock pane + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + pane_id INTEGER NOT NULL, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); + +impl Db { + pub(crate) fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + unimplemented!() + } + + pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + unimplemented!() + // let axis = self.get_pane_group_axis(pane_group_id); + // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + // for child_row in self.get_pane_group_children(pane_group_id) { + // if let Some(child_pane_id) = child_row.child_pane_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Pane(self.get_pane(PaneId { + // workspace_id: pane_group_id.workspace_id, + // pane_id: child_pane_id, + // })), + // )); + // } else if let Some(child_group_id) = child_row.child_group_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + // workspace_id: pane_group_id.workspace_id, + // group_id: child_group_id, + // })), + // )); + // } + // } + // children.sort_by_key(|(index, _)| *index); + + // SerializedPaneGroup { + // group_id: pane_group_id, + // axis, + // children: children.into_iter().map(|(_, child)| child).collect(), + // } + } + + // fn _get_pane_group_children( + // &self, + // _pane_group_id: PaneGroupId, + // ) -> impl Iterator { + // Vec::new().into_iter() + // } + + pub(crate) fn save_pane_splits( + &self, + _workspace: &WorkspaceId, + _center_pane_group: &SerializedPaneGroup, + ) { + // Delete the center pane group for this workspace and any of its children + // Generate new pane group IDs as we go through + // insert them + } + + pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { + unimplemented!(); + } + + pub(crate) fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + iife!({ + self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .with_bindings(workspace)? + .maybe_row::() + }) + .log_err() + .flatten() + } + + pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + // iife!({ + // self.prepare( + // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", + // )? + // .with_bindings(dock_pane.to_row(workspace))? + // .insert() + // }) + // .log_err(); + } +} + +#[cfg(test)] +mod tests { + + // use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; + + // use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; + + // #[test] + // fn test_basic_dock_pane() { + // let db = Db::open_in_memory("basic_dock_pane"); + + // let workspace = db.workspace_for_roots(&["/tmp"]); + + // let dock_pane = SerializedDockPane { + // anchor_position: DockAnchor::Expanded, + // visible: true, + // }; + + // db.save_dock_pane(&workspace.workspace_id, &dock_pane); + + // let new_workspace = db.workspace_for_roots(&["/tmp"]); + + // assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + // } + + // #[test] + // fn test_dock_simple_split() { + // let db = Db::open_in_memory("simple_split"); + + // let workspace = db.workspace_for_roots(&["/tmp"]); + + // // Pane group -> Pane -> 10 , 20 + // let center_pane = SerializedPaneGroup { + // axis: gpui::Axis::Horizontal, + // children: vec![PaneGroupChild::Pane(SerializedPane { + // items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], + // })], + // }; + + // db.save_pane_splits(&workspace.workspace_id, ¢er_pane); + + // // let new_workspace = db.workspace_for_roots(&["/tmp"]); + + // // assert_eq!(new_workspace.center_group, center_pane); + // } +} diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index e2cdde039e..a22363ae63 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -1,3 +1,9 @@ +use std::{ + ffi::{CString, OsStr}, + os::unix::prelude::OsStrExt, + path::{Path, PathBuf}, +}; + use anyhow::Result; use crate::statement::{SqlType, Statement}; @@ -241,3 +247,20 @@ impl Bind for &[T] { Ok(current_index) } } + +impl Bind for &Path { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.as_os_str().as_bytes().bind(statement, start_index) + } +} + +impl Column for PathBuf { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + + Ok(( + PathBuf::from(OsStr::from_bytes(blob).to_owned()), + start_index + 1, + )) + } +} From d419f27d75addb64fe06deecb9be96c488afc2cc Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 3 Nov 2022 22:37:40 -0700 Subject: [PATCH 145/240] replace worktree roots table with serialized worktree roots list --- Cargo.lock | 2 + crates/db/Cargo.toml | 2 + crates/db/src/workspace.rs | 208 ++++++++----------------------- crates/db/src/workspace/model.rs | 132 +++++--------------- crates/db/src/workspace/pane.rs | 24 ++-- 5 files changed, 97 insertions(+), 271 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e8526fbed..8fa755b161 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1550,6 +1550,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bincode", "collections", "env_logger", "gpui", @@ -1557,6 +1558,7 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.11.2", + "serde", "sqlez", "tempdir", "util", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 1ee9de6186..b69779c408 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -21,6 +21,8 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" +serde = { version = "1.0", features = ["derive"] } +bincode = "1.2.1" [dev-dependencies] diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 4e65c9788c..5fc9e075e9 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -3,12 +3,12 @@ pub mod model; pub(crate) mod pane; use anyhow::{Context, Result}; -use util::ResultExt; +use util::{iife, ResultExt}; use std::path::{Path, PathBuf}; -use indoc::{formatdoc, indoc}; -use sqlez::{connection::Connection, migrations::Migration}; +use indoc::indoc; +use sqlez::migrations::Migration; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented @@ -17,18 +17,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, + workspace_id BLOB PRIMARY KEY, dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_visible INTEGER, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL ) STRICT; - - CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) - ) STRICT; "}], ); @@ -37,34 +30,39 @@ use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; use super::Db; impl Db { - /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, - /// returns the last workspace which was updated + /// Returns a serialized workspace for the given worktree_roots. If the passed array + /// is empty, the most recent workspace is returned instead. If no workspace for the + /// passed roots is stored, returns none. pub fn workspace_for_roots>( &self, worktree_roots: &[P], ) -> Option { - // Find the workspace id which is uniquely identified by this set of paths - // return it if found - let mut workspace_row = get_workspace(worktree_roots, &self) - .log_err() - .unwrap_or_default(); + let workspace_id: WorkspaceId = worktree_roots.into(); - if workspace_row.is_none() && worktree_roots.len() == 0 { - // Return last workspace if no roots passed - workspace_row = self.prepare( - "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" - ).and_then(|mut stmt| stmt.maybe_row::()) - .log_err() - .flatten(); - } + let (_, dock_anchor, dock_visible) = iife!({ + if worktree_roots.len() == 0 { + self.prepare(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + ORDER BY timestamp DESC LIMIT 1"})? + .maybe_row::() + } else { + self.prepare(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + WHERE workspace_id = ?"})? + .with_bindings(workspace_id)? + .maybe_row::() + } + }) + .log_err() + .flatten()?; - workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { - Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_id)?, - center_group: self.get_center_group(workspace_id), - dock_anchor, - dock_visible, - }) + Some(SerializedWorkspace { + dock_pane: self.get_dock_pane(workspace_id)?, + center_group: self.get_center_group(workspace_id), + dock_anchor, + dock_visible, }) } @@ -75,146 +73,40 @@ impl Db { worktree_roots: &[P], workspace: SerializedWorkspace, ) { + let workspace_id: WorkspaceId = worktree_roots.into(); + self.with_savepoint("update_worktrees", |conn| { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? { - // Should also delete fields in other tables with cascading updates and insert - // new entry - conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(id_to_delete)? - .exec()?; - } - + // Delete any previous workspaces with the same roots. This cascades to all + // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - let workspace_id = WorkspaceId( - conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")? - .with_bindings((workspace.dock_anchor, workspace.dock_visible))? - .insert()?, - ); - - // Write worktree_roots with new workspace_id - for root in worktree_roots { - conn.prepare( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - )? - .with_bindings((workspace_id, root.as_ref()))? - .exec()?; - } + self.prepare(indoc!{" + DELETE FROM workspaces WHERE workspace_id = ?1; + INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? + .with_bindings((workspace_id, workspace.dock_anchor, workspace.dock_visible))? + .exec()?; + + // Save center pane group and dock pane + Self::save_center_group(workspace_id, &workspace.center_group, conn)?; + Self::save_dock_pane(workspace_id, &workspace.dock_pane, conn)?; Ok(()) }) - .context("Update workspace with roots {worktree_roots:?}") + .with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::>())) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { - self.with_savepoint("recent_workspaces", |conn| { - let mut roots_by_id = - conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - - conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + iife!({ + self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? - .iter() - .map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::()) - .collect::>() - }) - .log_err() - .unwrap_or_default() + .into_iter().map(|id| id.0) + .collect() + }).log_err().unwrap_or_default() } } -fn get_workspace>( - worktree_roots: &[P], - connection: &Connection, -) -> Result> { - // Short circuit if we can - if worktree_roots.len() == 0 { - return Ok(None); - } - - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but non-matching* workspace IDs. For this small data set, - // this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array, minus overlapping sets, - // but including *subsets* of our worktree roots: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // - // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no - // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of - // our keys: - // - // ID num_matching - // 1 2 - // 2 1 - // - // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the - // matching ID correctly :D - // - // Note: due to limitations in SQLite's query binding, we have to generate the prepared - // statement with string substitution (the {array_bind}) below, and then bind the - // parameters by number. - connection - .prepare(formatdoc! {" - SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible - FROM (SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in ({roots}) AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots})) - GROUP BY workspace_id) - WHERE num_matching = ?) as matching_workspace - JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id", - roots = - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - (0..worktree_roots.len()) - .map(|index| format!("?{}", index + 1)) - .collect::>() - .join(", ") - })? - .with_bindings(( - worktree_roots - .into_iter() - .map(|p| p.as_ref()) - .collect::>(), - worktree_roots.len(), - ))? - .maybe_row::() -} - #[cfg(test)] mod tests { diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 148b6b76ca..37c353a47b 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -1,3 +1,5 @@ +use std::path::{Path, PathBuf}; + use anyhow::{bail, Result}; use gpui::Axis; @@ -6,18 +8,32 @@ use sqlez::{ statement::Statement, }; -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub(crate) struct WorkspaceId(pub(crate) i64); +#[derive(Debug, PartialEq, Eq, Clone)] +pub(crate) struct WorkspaceId(Vec); + +impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } +} impl Bind for WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - self.0.bind(statement, start_index) + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) } } impl Column for WorkspaceId { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) } } @@ -58,116 +74,24 @@ impl Column for DockAnchor { pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SerializedWorkspace { - pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, - pub dock_pane: SerializedDockPane, + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneId { - workspace_id: WorkspaceId, - pane_id: usize, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneGroupId { - workspace_id: WorkspaceId, -} - -impl PaneGroupId { - pub fn root(workspace_id: WorkspaceId) -> Self { - Self { - workspace_id, - // group_id: 0, - } - } -} - -#[derive(Debug, PartialEq, Eq, Default)] +#[derive(Debug, PartialEq, Eq)] pub struct SerializedPaneGroup { axis: Axis, children: Vec, } -impl SerializedPaneGroup { - pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self { - Self { - // group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - } - } +pub struct SerializedPane { + children: Vec, } -#[derive(Default, Debug, PartialEq, Eq)] -pub struct SerializedDockPane { - pub anchor_position: DockAnchor, - pub visible: bool, -} +pub enum SerializedItemKind {} -impl SerializedDockPane { - fn to_row(&self, workspace: &WorkspaceId) -> DockRow { - DockRow { - workspace_id: *workspace, - anchor_position: self.anchor_position, - visible: self.visible, - } - } -} - -impl Column for SerializedDockPane { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(DockAnchor, bool) as Column>::column(statement, start_index).map( - |((anchor_position, visible), next_index)| { - ( - SerializedDockPane { - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Default, Debug, PartialEq, Eq)] -pub(crate) struct DockRow { - workspace_id: WorkspaceId, - anchor_position: DockAnchor, - visible: bool, -} - -impl Bind for DockRow { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind( - (self.workspace_id, self.anchor_position, self.visible), - start_index, - ) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((workspace_id, anchor_position, visible), next_index)| { - ( - DockRow { - workspace_id, - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Debug, PartialEq, Eq)] -pub struct ItemId { - pub item_id: usize, -} +pub enum SerializedItem {} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 3c007fd402..4f263e496c 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use gpui::Axis; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{connection::Connection, migrations::Migration}; use util::{iife, ResultExt}; use super::{ @@ -13,26 +13,28 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( &[indoc! {" CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + PRIMARY KEY(group_id, workspace_id) ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, group_id INTEGER, -- If null, this is a dock pane idx INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + PRIMARY KEY(pane_id, workspace_id) ) STRICT; CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique pane_id INTEGER NOT NULL, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, kind TEXT NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE @@ -46,7 +48,7 @@ impl Db { unimplemented!() } - pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { unimplemented!() // let axis = self.get_pane_group_axis(pane_group_id); // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); @@ -85,17 +87,17 @@ impl Db { // Vec::new().into_iter() // } - pub(crate) fn save_pane_splits( - &self, + pub(crate) fn save_center_group( _workspace: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, + _connection: &Connection, ) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them } - pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { + pub fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } @@ -109,7 +111,11 @@ impl Db { .flatten() } - pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + pub(crate) fn save_dock_pane( + workspace: &WorkspaceId, + dock_pane: &SerializedDockPane, + connection: &Connection, + ) { // iife!({ // self.prepare( // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", From 6b214acbc4f3c02df6e02b2d009a0ce7e7edb9ad Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 10:34:28 -0700 Subject: [PATCH 146/240] =?UTF-8?q?Got=20Zed=20compiling=20again=20?= =?UTF-8?q?=F0=9F=A5=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/db/examples/serialize-pane.rs | 50 +++++++++++++--------------- crates/db/src/workspace.rs | 26 +++++++-------- crates/db/src/workspace/model.rs | 11 +++--- crates/db/src/workspace/pane.rs | 37 +++++++++----------- crates/sqlez/src/bindable.rs | 2 +- crates/workspace/src/workspace.rs | 8 ++--- 6 files changed, 64 insertions(+), 70 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index ebe88037cd..2f362fb997 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,7 +1,5 @@ use std::{fs::File, path::Path}; -use db::{pane::SerializedDockPane, DockAnchor}; - const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { @@ -14,31 +12,31 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace_1 = db.workspace_for_roots(&["/tmp"]); - let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); + // let workspace_1 = db.workspace_for_roots(&["/tmp"]); + // let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - db.save_dock_pane( - &workspace_1.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Expanded, - visible: true, - }, - ); - db.save_dock_pane( - &workspace_2.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Bottom, - visible: true, - }, - ); - db.save_dock_pane( - &workspace_3.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Right, - visible: false, - }, - ); + // db.save_dock_pane( + // &workspace_1.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Expanded, + // visible: true, + // }, + // ); + // db.save_dock_pane( + // &workspace_2.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Bottom, + // visible: true, + // }, + // ); + // db.save_dock_pane( + // &workspace_3.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Right, + // visible: false, + // }, + // ); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5fc9e075e9..cf09bdd06e 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -2,7 +2,7 @@ mod items; pub mod model; pub(crate) mod pane; -use anyhow::{Context, Result}; +use anyhow::Context; use util::{iife, ResultExt}; use std::path::{Path, PathBuf}; @@ -10,9 +10,6 @@ use std::path::{Path, PathBuf}; use indoc::indoc; use sqlez::migrations::Migration; -// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging -// you might want to update some of the parsing code as well, I've left the variations in but commented -// out. This will panic if run on an existing db that has already been migrated pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" @@ -39,7 +36,9 @@ impl Db { ) -> Option { let workspace_id: WorkspaceId = worktree_roots.into(); - let (_, dock_anchor, dock_visible) = iife!({ + // Note that we re-assign the workspace_id here in case it's empty + // and we've grabbed the most recent workspace + let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { self.prepare(indoc! {" SELECT workspace_id, dock_anchor, dock_visible @@ -51,7 +50,7 @@ impl Db { SELECT workspace_id, dock_anchor, dock_visible FROM workspaces WHERE workspace_id = ?"})? - .with_bindings(workspace_id)? + .with_bindings(&workspace_id)? .maybe_row::() } }) @@ -59,8 +58,8 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_id)?, - center_group: self.get_center_group(workspace_id), + dock_pane: self.get_dock_pane(&workspace_id)?, + center_group: self.get_center_group(&workspace_id), dock_anchor, dock_visible, }) @@ -82,12 +81,12 @@ impl Db { self.prepare(indoc!{" DELETE FROM workspaces WHERE workspace_id = ?1; INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? - .with_bindings((workspace_id, workspace.dock_anchor, workspace.dock_visible))? + .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? .exec()?; // Save center pane group and dock pane - Self::save_center_group(workspace_id, &workspace.center_group, conn)?; - Self::save_dock_pane(workspace_id, &workspace.dock_pane, conn)?; + Self::save_center_group(&workspace_id, &workspace.center_group, conn)?; + Self::save_dock_pane(&workspace_id, &workspace.dock_pane, conn)?; Ok(()) }) @@ -98,11 +97,12 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { iife!({ - self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + Ok::<_, anyhow::Error>(self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? .into_iter().map(|id| id.0) - .collect() + .collect::>>()) + }).log_err().unwrap_or_default() } } diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 37c353a47b..1a6b4ee41f 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -9,7 +9,7 @@ use sqlez::{ }; #[derive(Debug, PartialEq, Eq, Clone)] -pub(crate) struct WorkspaceId(Vec); +pub(crate) struct WorkspaceId(pub(crate) Vec); impl, T: IntoIterator> From for WorkspaceId { fn from(iterator: T) -> Self { @@ -22,7 +22,7 @@ impl, T: IntoIterator> From for WorkspaceId { } } -impl Bind for WorkspaceId { +impl Bind for &WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { bincode::serialize(&self.0) .expect("Bincode serialization of paths should not fail") @@ -85,13 +85,16 @@ pub struct SerializedWorkspace { #[derive(Debug, PartialEq, Eq)] pub struct SerializedPaneGroup { axis: Axis, - children: Vec, + children: Vec, } +#[derive(Debug)] pub struct SerializedPane { - children: Vec, + _children: Vec, } +#[derive(Debug)] pub enum SerializedItemKind {} +#[derive(Debug)] pub enum SerializedItem {} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 4f263e496c..73306707cf 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,10 +1,11 @@ -use gpui::Axis; +use anyhow::Result; use indoc::indoc; use sqlez::{connection::Connection, migrations::Migration}; -use util::{iife, ResultExt}; + +use crate::model::SerializedPane; use super::{ - model::{PaneGroupId, PaneId, SerializedDockPane, SerializedPaneGroup, WorkspaceId}, + model::{SerializedPaneGroup, WorkspaceId}, Db, }; @@ -44,11 +45,11 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( ); impl Db { - pub(crate) fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + pub(crate) fn get_center_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { unimplemented!() } - pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub(crate) fn _get_pane_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { unimplemented!() // let axis = self.get_pane_group_axis(pane_group_id); // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); @@ -91,31 +92,22 @@ impl Db { _workspace: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, _connection: &Connection, - ) { + ) -> Result<()> { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them + Ok(()) } - pub fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { - unimplemented!(); - } - - pub(crate) fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - iife!({ - self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .with_bindings(workspace)? - .maybe_row::() - }) - .log_err() - .flatten() + pub(crate) fn get_dock_pane(&self, _workspace: &WorkspaceId) -> Option { + unimplemented!() } pub(crate) fn save_dock_pane( - workspace: &WorkspaceId, - dock_pane: &SerializedDockPane, - connection: &Connection, - ) { + _workspace: &WorkspaceId, + _dock_pane: &SerializedPane, + _connection: &Connection, + ) -> Result<()> { // iife!({ // self.prepare( // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", @@ -124,6 +116,7 @@ impl Db { // .insert() // }) // .log_err(); + Ok(()) } } diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index a22363ae63..1ce350a550 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -1,5 +1,5 @@ use std::{ - ffi::{CString, OsStr}, + ffi::OsStr, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, }; diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index a6ef7c6c01..d1dbc6982b 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{Db, SerializedWorkspace, WorkspaceId}; +use db::{model::SerializedWorkspace, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1073,7 +1073,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - _db_id: WorkspaceId, + // _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1120,7 +1120,7 @@ enum FollowerItem { impl Workspace { pub fn new( - serialized_workspace: SerializedWorkspace, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1217,7 +1217,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - _db_id: serialized_workspace.workspace_id, + // _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array From 01862894208f931a80be097ebd0e62bd89ecf949 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 13:22:35 -0700 Subject: [PATCH 147/240] Refined sqlez, implemented 60% of workspace serialization sql --- crates/db/src/db.rs | 52 ++--- crates/db/src/migrations.rs | 14 -- crates/db/src/workspace.rs | 313 ++++++++++++------------------- crates/db/src/workspace/items.rs | 108 ++++++----- crates/db/src/workspace/model.rs | 148 +++++++++++++-- crates/db/src/workspace/pane.rs | 162 ++++++++-------- crates/sqlez/src/bindable.rs | 25 ++- crates/sqlez/src/connection.rs | 78 +++++--- crates/sqlez/src/savepoint.rs | 38 ++-- crates/sqlez/src/statement.rs | 54 +++++- crates/util/src/lib.rs | 10 + 11 files changed, 569 insertions(+), 433 deletions(-) delete mode 100644 crates/db/src/migrations.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 07670e309a..97dfce0e19 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,5 +1,4 @@ pub mod kvp; -mod migrations; pub mod workspace; use std::fs; @@ -11,8 +10,9 @@ use indoc::indoc; use kvp::KVP_MIGRATION; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; - +use workspace::items::ITEM_MIGRATIONS; use workspace::pane::PANE_MIGRATIONS; + pub use workspace::*; #[derive(Clone)] @@ -35,32 +35,21 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Db( - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]), - ) - } - - pub fn persisting(&self) -> bool { - self.persistent() + Db(initialize_connection(ThreadSafeConnection::new( + db_path.to_string_lossy().as_ref(), + true, + ))) } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory(db_name: &str) -> Self { - Db(ThreadSafeConnection::new(db_name, false) - .with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS])) + Db(initialize_connection(ThreadSafeConnection::new( + db_name, false, + ))) + } + + pub fn persisting(&self) -> bool { + self.persistent() } pub fn write_to>(&self, dest: P) -> Result<()> { @@ -68,3 +57,18 @@ impl Db { self.backup_main(&destination) } } + +fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection { + conn.with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}) + .with_migrations(&[ + KVP_MIGRATION, + WORKSPACES_MIGRATION, + PANE_MIGRATIONS, + ITEM_MIGRATIONS, + ]) +} diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs deleted file mode 100644 index a95654f420..0000000000 --- a/crates/db/src/migrations.rs +++ /dev/null @@ -1,14 +0,0 @@ -// // use crate::items::ITEMS_M_1; -// use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACES_MIGRATION}; - -// // This must be ordered by development time! Only ever add new migrations to the end!! -// // Bad things will probably happen if you don't monotonically edit this vec!!!! -// // And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's -// // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. -// lazy_static::lazy_static! { -// pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ -// M::up(KVP_M_1), -// M::up(WORKSPACE_M_1), -// M::up(PANE_M_1) -// ]); -// } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cf09bdd06e..b1d139066f 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -mod items; +pub(crate) mod items; pub mod model; pub(crate) mod pane; @@ -58,8 +58,14 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(&workspace_id)?, - center_group: self.get_center_group(&workspace_id), + dock_pane: self + .get_dock_pane(&workspace_id) + .context("Getting dock pane") + .log_err()?, + center_group: self + .get_center_group(&workspace_id) + .context("Getting center group") + .log_err()?, dock_anchor, dock_visible, }) @@ -70,231 +76,152 @@ impl Db { pub fn save_workspace>( &self, worktree_roots: &[P], - workspace: SerializedWorkspace, + old_roots: Option<&[P]>, + workspace: &SerializedWorkspace, ) { let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", |conn| { + self.with_savepoint("update_worktrees", || { + if let Some(old_roots) = old_roots { + let old_id: WorkspaceId = old_roots.into(); + + self.prepare("DELETE FROM WORKSPACES WHERE workspace_id = ?")? + .with_bindings(&old_id)? + .exec()?; + } + // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.prepare(indoc!{" - DELETE FROM workspaces WHERE workspace_id = ?1; - INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? + self.prepare("DELETE FROM workspaces WHERE workspace_id = ?;")? + .with_bindings(&workspace_id)? + .exec()?; + + self.prepare( + "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", + )? .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? .exec()?; - + // Save center pane group and dock pane - Self::save_center_group(&workspace_id, &workspace.center_group, conn)?; - Self::save_dock_pane(&workspace_id, &workspace.dock_pane, conn)?; + self.save_center_group(&workspace_id, &workspace.center_group)?; + self.save_dock_pane(&workspace_id, &workspace.dock_pane)?; Ok(()) }) - .with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::>())) + .with_context(|| { + format!( + "Update workspace with roots {:?}", + worktree_roots + .iter() + .map(|p| p.as_ref()) + .collect::>() + ) + }) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { iife!({ - Ok::<_, anyhow::Error>(self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html + Ok::<_, anyhow::Error>( + self.prepare( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )? .with_bindings(limit)? .rows::()? - .into_iter().map(|id| id.0) - .collect::>>()) - - }).log_err().unwrap_or_default() + .into_iter() + .map(|id| id.paths()) + .collect::>>(), + ) + }) + .log_err() + .unwrap_or_default() } } #[cfg(test)] mod tests { + use crate::{ + model::{ + DockAnchor::{Bottom, Expanded, Right}, + SerializedWorkspace, + }, + Db, + }; - // use std::{path::PathBuf, thread::sleep, time::Duration}; + #[test] + fn test_basic_functionality() { + env_logger::init(); - // use crate::Db; + let db = Db::open_in_memory("test_basic_functionality"); - // use super::WorkspaceId; + let workspace_1 = SerializedWorkspace { + dock_anchor: Bottom, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; - // #[test] - // fn test_workspace_saving() { - // env_logger::init(); - // let db = Db::open_in_memory("test_new_worktrees_for_roots"); + let workspace_2 = SerializedWorkspace { + dock_anchor: Expanded, + dock_visible: false, + center_group: Default::default(), + dock_pane: Default::default(), + }; - // // Test nothing returned with no roots at first - // assert_eq!(db.workspace_for_roots::(&[]), None); + let workspace_3 = SerializedWorkspace { + dock_anchor: Right, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; - // // Test creation - // let workspace_1 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); + db.save_workspace(&["/tmp"], None, &workspace_2); - // // Ensure the timestamps are different - // sleep(Duration::from_secs(1)); - // db.make_new_workspace::(&[]); + db.write_to("test.db").unwrap(); - // // Test pulling another value from recent workspaces - // let workspace_2 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + // Test that paths are treated as a set + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_1 + ); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + workspace_1 + ); - // // Ensure the timestamps are different - // sleep(Duration::from_secs(1)); + // Make sure that other keys work + assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); + assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); - // // Test creating a new workspace that doesn't exist already - // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // Test 'mutate' case of updating a pre-existing id + db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_2 + ); - // // Make sure it's in the recent workspaces.... - // let workspace_3 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // Test other mechanism for mutating + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_3 + ); - // // And that it can be pulled out again - // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - // } - - // #[test] - // fn test_empty_worktrees() { - // let db = Db::open_in_memory("test_empty_worktrees"); - - // assert_eq!(None, db.workspace::(&[])); - - // db.make_new_workspace::(&[]); //ID 1 - // db.make_new_workspace::(&[]); //ID 2 - // db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - - // // Sanity check - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); - - // db.update_worktrees::(&WorkspaceId(1), &[]); - - // // Make sure 'no worktrees' fails correctly. returning [1, 2] from this - // // call would be semantically correct (as those are the workspaces that - // // don't have roots) but I'd prefer that this API to either return exactly one - // // workspace, and None otherwise - // assert_eq!(db.workspace::(&[]), None,); - - // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); - - // assert_eq!( - // db.recent_workspaces(2), - // vec![Vec::::new(), Vec::::new()], - // ) - // } - - // #[test] - // fn test_more_workspace_ids() { - // let data = &[ - // (WorkspaceId(1), vec!["/tmp1"]), - // (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), - // (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), - // (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), - // (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), - // (WorkspaceId(7), vec!["/tmp2"]), - // ]; - - // let db = Db::open_in_memory("test_more_workspace_ids"); - - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); - // assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); - // assert_eq!( - // db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, - // WorkspaceId(3) - // ); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); - // assert_eq!( - // db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, - // WorkspaceId(5) - // ); - // assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); - // assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); - - // assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); - // assert_eq!(db.workspace(&["/tmp5"]), None); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); - // } - - // #[test] - // fn test_detect_workspace_id() { - // let data = &[ - // (WorkspaceId(1), vec!["/tmp"]), - // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), - // ]; - - // let db = Db::open_in_memory("test_detect_workspace_id"); - - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // assert_eq!(db.workspace(&["/tmp2"]), None); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); - // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); - // assert_eq!( - // db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, - // WorkspaceId(3) - // ); - // } - - // #[test] - // fn test_tricky_overlapping_updates() { - // // DB state: - // // (/tree) -> ID: 1 - // // (/tree, /tree2) -> ID: 2 - // // (/tree2, /tree3) -> ID: 3 - - // // -> User updates 2 to: (/tree2, /tree3) - - // // DB state: - // // (/tree) -> ID: 1 - // // (/tree2, /tree3) -> ID: 2 - // // Get rid of 3 for garbage collection - - // let data = &[ - // (WorkspaceId(1), vec!["/tmp"]), - // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), - // ]; - - // let db = Db::open_in_memory("test_tricky_overlapping_update"); - - // // Load in the test data - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // sleep(Duration::from_secs(1)); - // // Execute the update - // db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); - - // // Make sure that workspace 3 doesn't exist - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); - - // // And that workspace 1 was untouched - // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - - // // And that workspace 2 is no longer registered under these roots - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - - // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); - - // let recent_workspaces = db.recent_workspaces(10); - // assert_eq!( - // recent_workspaces.get(0).unwrap(), - // &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] - // ); - // assert_eq!( - // recent_workspaces.get(1).unwrap(), - // &vec![PathBuf::from("/tmp")] - // ); - // } + // Make sure that updating paths differently also works + db.save_workspace( + &["/tmp3", "/tmp4", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_3, + ); + assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(), + workspace_3 + ); + } } diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index c3405974d5..87437ccf73 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -1,13 +1,11 @@ -// use std::{ -// ffi::OsStr, -// fmt::Display, -// hash::Hash, -// os::unix::prelude::OsStrExt, -// path::{Path, PathBuf}, -// sync::Arc, -// }; +use anyhow::{Context, Result}; +use indoc::indoc; +use sqlez::migrations::Migration; -// use anyhow::Result; +use crate::{ + model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, + Db, +}; // use collections::HashSet; // use rusqlite::{named_params, params, types::FromSql}; @@ -65,45 +63,61 @@ // ) STRICT; // "; -// enum SerializedItemKind { -// Editor, -// Diagnostics, -// ProjectSearch, -// Terminal, -// } +pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( + "item", + &[indoc! {" + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); -// struct SerializedItemRow { -// kind: SerializedItemKind, -// item_id: usize, -// path: Option>, -// query: Option, -// } +impl Db { + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self + .prepare(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})? + .with_bindings(pane_id)? + .rows::<(ItemId, SerializedItemKind)>()? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } -// #[derive(Debug, PartialEq, Eq)] -// pub enum SerializedItem { -// Editor { item_id: usize, path: Arc }, -// Diagnostics { item_id: usize }, -// ProjectSearch { item_id: usize, query: String }, -// Terminal { item_id: usize }, -// } + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .prepare("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.prepare( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old + .with_bindings((workspace_id, pane_id, item.item_id()))? + .exec()?; -// impl SerializedItem { -// pub fn item_id(&self) -> usize { -// match self { -// SerializedItem::Editor { item_id, .. } => *item_id, -// SerializedItem::Diagnostics { item_id } => *item_id, -// SerializedItem::ProjectSearch { item_id, .. } => *item_id, -// SerializedItem::Terminal { item_id } => *item_id, -// } -// } -// } + insert_new + .with_bindings((item.item_id(), workspace_id, pane_id, item.kind(), position))? + .exec()?; + } -// impl Db { -// pub fn get_item(&self, item_id: ItemId) -> SerializedItem { -// unimplemented!() -// } - -// pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} - -// pub fn close_item(&self, item_id: ItemId) {} -// } + Ok(()) + } +} diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 1a6b4ee41f..a2bb0c1cd2 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -1,4 +1,7 @@ -use std::path::{Path, PathBuf}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use anyhow::{bail, Result}; @@ -8,8 +11,14 @@ use sqlez::{ statement::Statement, }; -#[derive(Debug, PartialEq, Eq, Clone)] -pub(crate) struct WorkspaceId(pub(crate) Vec); +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct WorkspaceId(Vec); + +impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } +} impl, T: IntoIterator> From for WorkspaceId { fn from(iterator: T) -> Self { @@ -74,7 +83,7 @@ impl Column for DockAnchor { pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { pub dock_anchor: DockAnchor, pub dock_visible: bool, @@ -82,19 +91,134 @@ pub struct SerializedWorkspace { pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Default)] pub struct SerializedPaneGroup { axis: Axis, children: Vec, } -#[derive(Debug)] -pub struct SerializedPane { - _children: Vec, +impl SerializedPaneGroup { + pub fn new() -> Self { + SerializedPaneGroup { + axis: Axis::Horizontal, + children: Vec::new(), + } + } } -#[derive(Debug)] -pub enum SerializedItemKind {} +#[derive(Debug, PartialEq, Eq, Default)] +pub struct SerializedPane { + pub(crate) children: Vec, +} -#[derive(Debug)] -pub enum SerializedItem {} +impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } +} + +pub type GroupId = i64; +pub type PaneId = i64; +pub type ItemId = usize; + +pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } +} + +impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} + +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } +} + +#[cfg(test)] +mod tests { + use sqlez::connection::Connection; + + use crate::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.prepare("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap() + .with_bindings((&workspace_id, DockAnchor::Bottom)) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + db.prepare("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap() + .row::<(WorkspaceId, DockAnchor)>() + .unwrap(), + (WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom) + ); + } +} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 73306707cf..f2b7fc8ef0 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,8 +1,9 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use indoc::indoc; -use sqlez::{connection::Connection, migrations::Migration}; +use sqlez::migrations::Migration; +use util::unzip_option; -use crate::model::SerializedPane; +use crate::model::{GroupId, PaneId, SerializedPane}; use super::{ model::{SerializedPaneGroup, WorkspaceId}, @@ -19,79 +20,31 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE - PRIMARY KEY(group_id, workspace_id) ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, group_id INTEGER, -- If null, this is a dock pane - idx INTEGER NOT NULL, + position INTEGER, -- If null, this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - PRIMARY KEY(pane_id, workspace_id) - ) STRICT; - - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - pane_id INTEGER NOT NULL, - workspace_id BLOB NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], ); impl Db { - pub(crate) fn get_center_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { - unimplemented!() + pub(crate) fn get_center_group( + &self, + _workspace_id: &WorkspaceId, + ) -> Result { + Ok(SerializedPaneGroup::new()) } - pub(crate) fn _get_pane_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { - unimplemented!() - // let axis = self.get_pane_group_axis(pane_group_id); - // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - // for child_row in self.get_pane_group_children(pane_group_id) { - // if let Some(child_pane_id) = child_row.child_pane_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Pane(self.get_pane(PaneId { - // workspace_id: pane_group_id.workspace_id, - // pane_id: child_pane_id, - // })), - // )); - // } else if let Some(child_group_id) = child_row.child_group_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - // workspace_id: pane_group_id.workspace_id, - // group_id: child_group_id, - // })), - // )); - // } - // } - // children.sort_by_key(|(index, _)| *index); - - // SerializedPaneGroup { - // group_id: pane_group_id, - // axis, - // children: children.into_iter().map(|(_, child)| child).collect(), - // } - } - - // fn _get_pane_group_children( - // &self, - // _pane_group_id: PaneGroupId, - // ) -> impl Iterator { - // Vec::new().into_iter() - // } - pub(crate) fn save_center_group( - _workspace: &WorkspaceId, + &self, + _workspace_id: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, - _connection: &Connection, ) -> Result<()> { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through @@ -99,51 +52,86 @@ impl Db { Ok(()) } - pub(crate) fn get_dock_pane(&self, _workspace: &WorkspaceId) -> Option { - unimplemented!() + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self + .prepare(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND group_id IS NULL AND position IS NULL"})? + .with_bindings(workspace_id)? + .row::()?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) } pub(crate) fn save_dock_pane( - _workspace: &WorkspaceId, - _dock_pane: &SerializedPane, - _connection: &Connection, + &self, + workspace: &WorkspaceId, + dock_pane: &SerializedPane, ) -> Result<()> { - // iife!({ - // self.prepare( - // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", - // )? - // .with_bindings(dock_pane.to_row(workspace))? - // .insert() - // }) - // .log_err(); - Ok(()) + self.save_pane(workspace, &dock_pane, None) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self + .prepare("INSERT INTO panes(workspace_id, group_id, position) VALUES (?, ?, ?)")? + .with_bindings((workspace_id, parent_id, order))? + .insert()? as PaneId; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") } } #[cfg(test)] mod tests { - // use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; + use crate::{ + model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace}, + Db, + }; - // use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; + fn default_workspace( + dock_pane: SerializedPane, + center_group: SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: crate::model::DockAnchor::Right, + dock_visible: false, + center_group, + dock_pane, + } + } - // #[test] - // fn test_basic_dock_pane() { - // let db = Db::open_in_memory("basic_dock_pane"); + #[test] + fn test_basic_dock_pane() { + let db = Db::open_in_memory("basic_dock_pane"); - // let workspace = db.workspace_for_roots(&["/tmp"]); + let dock_pane = crate::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; - // let dock_pane = SerializedDockPane { - // anchor_position: DockAnchor::Expanded, - // visible: true, - // }; + let workspace = default_workspace(dock_pane, SerializedPaneGroup::new()); - // db.save_dock_pane(&workspace.workspace_id, &dock_pane); + db.save_workspace(&["/tmp"], None, &workspace); - // let new_workspace = db.workspace_for_roots(&["/tmp"]); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); - // assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); - // } + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } // #[test] // fn test_dock_simple_split() { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 1ce350a550..7a3483bcea 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -178,8 +178,29 @@ impl Column for (T1, T2, T3, T4) let (first, next_index) = T1::column(statement, start_index)?; let (second, next_index) = T2::column(statement, next_index)?; let (third, next_index) = T3::column(statement, next_index)?; - let (forth, next_index) = T4::column(statement, next_index)?; - Ok(((first, second, third, forth), next_index)) + let (fourth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, fourth), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4, T5) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + let next_index = self.3.bind(statement, next_index)?; + self.4.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4, T5) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + let (fifth, next_index) = T5::column(statement, next_index)?; + Ok(((first, second, third, fourth, fifth), next_index)) } } diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index fcc180a48d..04a12cfc97 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -99,7 +99,29 @@ impl Connection { } pub(crate) fn last_error(&self) -> Result<()> { - unsafe { error_to_result(sqlite3_errcode(self.sqlite3)) } + unsafe { + let code = sqlite3_errcode(self.sqlite3); + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errmsg(self.sqlite3); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } } } @@ -109,31 +131,6 @@ impl Drop for Connection { } } -pub(crate) fn error_to_result(code: std::os::raw::c_int) -> Result<()> { - const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; - unsafe { - if NON_ERROR_CODES.contains(&code) { - return Ok(()); - } - - let message = sqlite3_errstr(code); - let message = if message.is_null() { - None - } else { - Some( - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(), - ) - }; - - Err(anyhow!( - "Sqlite call failed with code {} and message: {:?}", - code as isize, - message - )) - } -} - #[cfg(test)] mod test { use anyhow::Result; @@ -213,6 +210,35 @@ mod test { ); } + #[test] + fn bool_round_trips() { + let connection = Connection::open_memory("bool_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE bools ( + t INTEGER, + f INTEGER + );"}) + .unwrap(); + + connection + .prepare("INSERT INTO bools(t, f) VALUES (?, ?);") + .unwrap() + .with_bindings((true, false)) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + &connection + .prepare("SELECT * FROM bools;") + .unwrap() + .row::<(bool, bool)>() + .unwrap(), + &(true, false) + ); + } + #[test] fn backup_works() { let connection1 = Connection::open_memory("backup_works"); diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 3d7830dd91..ba4b1e774b 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -8,11 +8,11 @@ impl Connection { // point is released. pub fn with_savepoint(&self, name: impl AsRef, f: F) -> Result where - F: FnOnce(&Connection) -> Result, + F: FnOnce() -> Result, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; - let result = f(self); + let result = f(); match result { Ok(_) => { self.exec(format!("RELEASE {}", name))?; @@ -30,11 +30,11 @@ impl Connection { // point is released. pub fn with_savepoint_rollback(&self, name: impl AsRef, f: F) -> Result> where - F: FnOnce(&Connection) -> Result>, + F: FnOnce() -> Result>, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; - let result = f(self); + let result = f(); match result { Ok(Some(_)) => { self.exec(format!("RELEASE {}", name))?; @@ -69,21 +69,21 @@ mod tests { let save1_text = "test save1"; let save2_text = "test save2"; - connection.with_savepoint("first", |save1| { - save1 + connection.with_savepoint("first", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save1_text, 1))? .exec()?; - assert!(save1 - .with_savepoint("second", |save2| -> Result, anyhow::Error> { - save2 + assert!(connection + .with_savepoint("second", || -> Result, anyhow::Error> { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -95,20 +95,20 @@ mod tests { .is_some()); assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text], ); - save1.with_savepoint_rollback::<(), _>("second", |save2| { - save2 + connection.with_savepoint_rollback::<(), _>("second", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -118,20 +118,20 @@ mod tests { })?; assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text], ); - save1.with_savepoint_rollback("second", |save2| { - save2 + connection.with_savepoint_rollback("second", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -141,7 +141,7 @@ mod tests { })?; assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index e2b59d86f1..f0de8703ab 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; -use crate::connection::{error_to_result, Connection}; +use crate::connection::Connection; pub struct Statement<'a> { raw_statement: *mut sqlite3_stmt, @@ -48,7 +48,9 @@ impl<'a> Statement<'a> { 0 as *mut _, ); - connection.last_error().context("Prepare call failed.")?; + connection + .last_error() + .with_context(|| format!("Prepare call failed for query:\n{}", query.as_ref()))?; } Ok(statement) @@ -309,10 +311,7 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { - unsafe { - let error = sqlite3_finalize(self.raw_statement); - error_to_result(error).expect("failed error"); - }; + unsafe { sqlite3_finalize(self.raw_statement) }; } } @@ -327,9 +326,9 @@ mod test { let connection1 = Connection::open_memory("blob_round_trips"); connection1 .exec(indoc! {" - CREATE TABLE blobs ( - data BLOB - );"}) + CREATE TABLE blobs ( + data BLOB + );"}) .unwrap(); let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; @@ -352,4 +351,41 @@ mod test { let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Done); } + + #[test] + pub fn maybe_returns_options() { + let connection = Connection::open_memory("maybe_returns_options"); + connection + .exec(indoc! {" + CREATE TABLE texts ( + text TEXT + );"}) + .unwrap(); + + assert!(connection + .prepare("SELECT text FROM texts") + .unwrap() + .maybe_row::() + .unwrap() + .is_none()); + + let text_to_insert = "This is a test"; + + connection + .prepare("INSERT INTO texts VALUES (?)") + .unwrap() + .with_bindings(text_to_insert) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + connection + .prepare("SELECT text FROM texts") + .unwrap() + .maybe_row::() + .unwrap(), + Some(text_to_insert.to_string()) + ); + } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 3757da5854..19d17c1190 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -204,6 +204,16 @@ impl Iterator for RandomCharIter { } } +// copy unstable standard feature option unzip +// https://github.com/rust-lang/rust/issues/87800 +// Remove when this ship in Rust 1.66 or 1.67 +pub fn unzip_option(option: Option<(T, U)>) -> (Option, Option) { + match option { + Some((a, b)) => (Some(a), Some(b)), + None => (None, None), + } +} + #[macro_export] macro_rules! iife { ($block:block) => { From f27a9d77d18cc6f1c9ab7efcf6929dd1eadf7c98 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 17:48:29 -0700 Subject: [PATCH 148/240] Finished the bulk of workspace serialization. Just items and wiring it all through. Co-Authored-By: kay@zed.dev --- crates/db/src/workspace.rs | 10 +- crates/db/src/workspace/items.rs | 60 +---------- crates/db/src/workspace/model.rs | 64 +++++++++--- crates/db/src/workspace/pane.rs | 170 +++++++++++++++++++++++-------- crates/db/test.db | Bin 0 -> 40960 bytes 5 files changed, 185 insertions(+), 119 deletions(-) create mode 100644 crates/db/test.db diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index b1d139066f..9b2d9e4563 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -63,7 +63,7 @@ impl Db { .context("Getting dock pane") .log_err()?, center_group: self - .get_center_group(&workspace_id) + .get_center_pane_group(&workspace_id) .context("Getting center group") .log_err()?, dock_anchor, @@ -104,8 +104,8 @@ impl Db { .exec()?; // Save center pane group and dock pane - self.save_center_group(&workspace_id, &workspace.center_group)?; - self.save_dock_pane(&workspace_id, &workspace.dock_pane)?; + self.save_pane_group(&workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace_id, &workspace.dock_pane, None)?; Ok(()) }) @@ -152,8 +152,8 @@ mod tests { }; #[test] - fn test_basic_functionality() { - env_logger::init(); + fn test_workspace_assignment() { + env_logger::try_init().ok(); let db = Db::open_in_memory("test_basic_functionality"); diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index 87437ccf73..25873a7f9b 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -6,63 +6,11 @@ use crate::{ model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, Db, }; -// use collections::HashSet; -// use rusqlite::{named_params, params, types::FromSql}; - -// use crate::workspace::WorkspaceId; - -// use super::Db; - -// /// Current design makes the cut at the item level, -// /// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate -// /// - items table, with a kind, and an integer that acts as a key to one of these other tables -// /// This column is a foreign key to ONE OF: editors, terminals, searches -// /// - - -// // (workspace_id, item_id) -// // kind -> ::Editor:: - -// // -> -// // At the workspace level -// // -> (Workspace_ID, item_id) -// // -> One shot, big query, load everything up: - -// // -> SerializedWorkspace::deserialize(tx, itemKey) -// // -> SerializedEditor::deserialize(tx, itemKey) - -// // -> -// // -> Workspace::new(SerializedWorkspace) -// // -> Editor::new(serialized_workspace[???]serializedEditor) - -// // //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) -// // //Cons: DB has to know the internals of the entire rest of the app - -// // Workspace -// // Worktree roots -// // Pane groups -// // Dock -// // Items -// // Sidebars - -// // Things I'm doing: finding about nullability for foreign keys -// pub(crate) const ITEMS_M_1: &str = " -// CREATE TABLE project_searches( -// workspace_id INTEGER, -// item_id INTEGER, -// query TEXT, -// PRIMARY KEY (workspace_id, item_id) -// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -// ) STRICT; - -// CREATE TABLE editors( -// workspace_id INTEGER, -// item_id INTEGER, -// path BLOB NOT NULL, -// PRIMARY KEY (workspace_id, item_id) -// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -// ) STRICT; -// "; +// 1) Move all of this into Workspace crate +// 2) Deserialize items fully +// 3) Typed prepares (including how you expect to pull data out) +// 4) Investigate Tree column impls pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( "item", &[indoc! {" diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index a2bb0c1cd2..1d9065f6d9 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -5,7 +5,6 @@ use std::{ use anyhow::{bail, Result}; -use gpui::Axis; use sqlez::{ bindable::{Bind, Column}, statement::Statement, @@ -91,22 +90,61 @@ pub struct SerializedWorkspace { pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq, Default)] -pub struct SerializedPaneGroup { - axis: Axis, - children: Vec, +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub enum Axis { + #[default] + Horizontal, + Vertical, } -impl SerializedPaneGroup { - pub fn new() -> Self { - SerializedPaneGroup { +impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } +} + +impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), +} + +// Dock panes, and grouped panes combined? +// AND we're collapsing PaneGroup::Pane +// In the case where + +impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { axis: Axis::Horizontal, - children: Vec::new(), + children: vec![Self::Pane(Default::default())], } } } -#[derive(Debug, PartialEq, Eq, Default)] +#[derive(Debug, PartialEq, Eq, Default, Clone)] pub struct SerializedPane { pub(crate) children: Vec, } @@ -142,9 +180,9 @@ impl Bind for SerializedItemKind { impl Column for SerializedItemKind { fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { Ok(( - match anchor_text.as_ref() { + match kind_text.as_ref() { "Editor" => SerializedItemKind::Editor, "Diagnostics" => SerializedItemKind::Diagnostics, "ProjectSearch" => SerializedItemKind::ProjectSearch, @@ -157,7 +195,7 @@ impl Column for SerializedItemKind { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum SerializedItem { Editor { item_id: usize, path: Arc }, Diagnostics { item_id: usize }, diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index f2b7fc8ef0..7fef2d6b75 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,9 +1,9 @@ -use anyhow::{Context, Result}; +use anyhow::{bail, Context, Result}; use indoc::indoc; use sqlez::migrations::Migration; use util::unzip_option; -use crate::model::{GroupId, PaneId, SerializedPane}; +use crate::model::{Axis, GroupId, PaneId, SerializedPane}; use super::{ model::{SerializedPaneGroup, WorkspaceId}, @@ -16,47 +16,107 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - group_id INTEGER, -- If null, this is a dock pane - position INTEGER, -- If null, this is a dock pane + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; "}], ); impl Db { - pub(crate) fn get_center_group( + pub(crate) fn get_center_pane_group( &self, - _workspace_id: &WorkspaceId, + workspace_id: &WorkspaceId, ) -> Result { - Ok(SerializedPaneGroup::new()) + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") } - pub(crate) fn save_center_group( + fn get_pane_group_children( &self, - _workspace_id: &WorkspaceId, - _center_pane_group: &SerializedPaneGroup, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + let children = self + .prepare(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})? + .with_bindings((group_id, workspace_id))? + .rows::<(Option, Option, Option)>()?; + + children + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children(workspace_id, Some(group_id))?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items(pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, ) -> Result<()> { - // Delete the center pane group for this workspace and any of its children - // Generate new pane group IDs as we go through - // insert them - Ok(()) + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.prepare("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + .with_bindings((workspace_id, parent_id, position, *axis))? + .insert()? as GroupId; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { let pane_id = self .prepare(indoc! {" SELECT pane_id FROM panes - WHERE workspace_id = ? AND group_id IS NULL AND position IS NULL"})? + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})? .with_bindings(workspace_id)? .row::()?; @@ -65,14 +125,6 @@ impl Db { )) } - pub(crate) fn save_dock_pane( - &self, - workspace: &WorkspaceId, - dock_pane: &SerializedPane, - ) -> Result<()> { - self.save_pane(workspace, &dock_pane, None) - } - pub(crate) fn save_pane( &self, workspace_id: &WorkspaceId, @@ -82,7 +134,7 @@ impl Db { let (parent_id, order) = unzip_option(parent); let pane_id = self - .prepare("INSERT INTO panes(workspace_id, group_id, position) VALUES (?, ?, ?)")? + .prepare("INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)")? .with_bindings((workspace_id, parent_id, order))? .insert()? as PaneId; @@ -101,18 +153,20 @@ mod tests { fn default_workspace( dock_pane: SerializedPane, - center_group: SerializedPaneGroup, + center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { dock_anchor: crate::model::DockAnchor::Right, dock_visible: false, - center_group, + center_group: center_group.clone(), dock_pane, } } #[test] fn test_basic_dock_pane() { + env_logger::try_init().ok(); + let db = Db::open_in_memory("basic_dock_pane"); let dock_pane = crate::model::SerializedPane { @@ -124,7 +178,7 @@ mod tests { ], }; - let workspace = default_workspace(dock_pane, SerializedPaneGroup::new()); + let workspace = default_workspace(dock_pane, &Default::default()); db.save_workspace(&["/tmp"], None, &workspace); @@ -133,24 +187,50 @@ mod tests { assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } - // #[test] - // fn test_dock_simple_split() { - // let db = Db::open_in_memory("simple_split"); + #[test] + fn test_simple_split() { + env_logger::try_init().ok(); - // let workspace = db.workspace_for_roots(&["/tmp"]); + let db = Db::open_in_memory("simple_split"); - // // Pane group -> Pane -> 10 , 20 - // let center_pane = SerializedPaneGroup { - // axis: gpui::Axis::Horizontal, - // children: vec![PaneGroupChild::Pane(SerializedPane { - // items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], - // })], - // }; + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: crate::model::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: crate::model::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; - // db.save_pane_splits(&workspace.workspace_id, ¢er_pane); + let workspace = default_workspace(Default::default(), ¢er_pane); - // // let new_workspace = db.workspace_for_roots(&["/tmp"]); + db.save_workspace(&["/tmp"], None, &workspace); - // // assert_eq!(new_workspace.center_group, center_pane); - // } + assert_eq!(workspace.center_group, center_pane); + } } diff --git a/crates/db/test.db b/crates/db/test.db new file mode 100644 index 0000000000000000000000000000000000000000..09a0bc8f1126715177b2517c92827685e8b0fb7c GIT binary patch literal 40960 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCVBlt8VBlgv01%%A!DV1XV&h_RGw8)j z@$$DYaB|o&@b~aN<`3pw!?%um3zs^F9p@ih+D64kLtr!nMnhmU1V%$(Gz1232sB%B zu#1a}Gq#16BqrsgW|pMp7J~^+=O9Hj$qTuNl;_B`i zq~PZtqTmC5jyj>$TkR70@5ajCS8szHd>>8{9mncS44Z##wGZ#BJqFal>5e;%fF__>$iD8LwICBwS$5H%4Elg z7^nm!dSLdzlQ-4lMj-g3q{I23`~=Q&7_kn@N%4t!$r<@Y;4lT}9Y|2B zJLQ*@gS!Qu&Qcfx?X@QM!%FoY9P0RxadP!z( zYH>+oZUNW^1s7L0$55XT1?SM9AXmST_z+KD*WeIG-vD@t3@@>aS=q$h6>*hVaPJkz z8zR_jc}(o$va*bg`ruH@E{iWN$uCNU(ab0z1`~yZS9WS8#EBTeRF;@inhKGF)nIT( z>NB&6JIms7B+Q0*Ll_Ixr{)0F@kNZ_=!H5oH#5B`5mW;eBUnr*jzx$oK-`;>pPQJO z2X+=DB^H;Y7Qmwc9QJTMU{ze2ybKHsES#$t_!skS;*Q~51#-tI9u0xf5Eu=C(GVC7 zfzc2c4S~@R7z`oMtk1$K%Fz}H>X4%}13?1CkOm-#32NKJI^D3gDYywfI=TWMu>tp! zL1QRLgEb&AYL2d0bF~FCvx;)`DuPT#i7}ACXm5Oo_r|TcxG*9WnhnA6i!{iI(S3yX zPC=O!+!2T8RB$g1ZL|p9g#(RijO?*5Yp%vTCRTZlW?y(1qQnAR6dDz9K0Kr`A_F{T zF=&T8thu5YS%oLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1O`P2fad?1_&XW+_w#oS3I~lkVl)IsLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1cq}6m@u)L$Z{CN4%Gu43IOu1eo1Zt?1%*;10y3{LqlBy zQw0M9D`R6T6Ek~8RzFEjVLqL*+71X20)}fbTW@Rv8f#gZ;j1ySGWc Date: Fri, 4 Nov 2022 17:56:47 -0700 Subject: [PATCH 149/240] Re-use big union statement for get_center_pane --- crates/db/src/workspace/pane.rs | 44 +++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 7fef2d6b75..8528acb8af 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Context, Result}; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{migrations::Migration, statement::Statement}; use util::unzip_option; use crate::model::{Axis, GroupId, PaneId, SerializedPane}; @@ -39,19 +39,7 @@ impl Db { &self, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children( - &self, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - let children = self - .prepare(indoc! {" + let mut query = self.prepare(indoc! {" SELECT group_id, axis, pane_id FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id FROM pane_groups @@ -62,9 +50,25 @@ impl Db { WHERE parent_group_id IS NOT NULL and position IS NOT NULL) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position - "})? - .with_bindings((group_id, workspace_id))? - .rows::<(Option, Option, Option)>()?; + "})?; + + self.get_pane_group_children(workspace_id, None, &mut query)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + query: &mut Statement, + ) -> Result> { + let children = query.with_bindings((group_id, workspace_id))?.rows::<( + Option, + Option, + Option, + )>()?; children .into_iter() @@ -72,7 +76,11 @@ impl Db { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children(workspace_id, Some(group_id))?, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + query, + )?, }) } else if let Some(pane_id) = pane_id { Ok(SerializedPaneGroup::Pane(SerializedPane { From 4a00f0b062c0f55d178c72f6b5e3c3cae20d6308 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Sun, 6 Nov 2022 17:00:34 -0800 Subject: [PATCH 150/240] Add typed statements --- .gitignore | 3 - crates/db/src/kvp.rs | 19 +-- crates/db/src/workspace.rs | 36 ++--- crates/db/src/workspace/items.rs | 34 ++-- crates/db/src/workspace/model.rs | 23 ++- crates/db/src/workspace/pane.rs | 62 +++---- crates/db/test.db | Bin 40960 -> 40960 bytes crates/sqlez/src/connection.rs | 161 +++++++----------- crates/sqlez/src/lib.rs | 1 + crates/sqlez/src/migrations.rs | 114 ++++++------- crates/sqlez/src/savepoint.rs | 80 ++++----- crates/sqlez/src/statement.rs | 179 ++++++++++++--------- crates/sqlez/src/thread_safe_connection.rs | 3 +- crates/sqlez/src/typed_statements.rs | 67 ++++++++ 14 files changed, 388 insertions(+), 394 deletions(-) create mode 100644 crates/sqlez/src/typed_statements.rs diff --git a/.gitignore b/.gitignore index da1950f2b3..e2d90adbb1 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ /assets/themes/Internal/*.json /assets/themes/Experiments/*.json **/venv -<<<<<<< HEAD .build Packages *.xcodeproj @@ -19,6 +18,4 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc -======= crates/db/test-db.db ->>>>>>> 9d9ad38ce (Successfully detecting workplace IDs :D) diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 93be5e10c0..6f1230f7b8 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -15,24 +15,19 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( impl Db { pub fn read_kvp(&self, key: &str) -> Result> { - self.0 - .prepare("SELECT value FROM kv_store WHERE key = (?)")? - .with_bindings(key)? - .maybe_row() + self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.0 - .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")? - .with_bindings((key, value))? - .exec() + self.exec_bound("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?(( + key, value, + ))?; + + Ok(()) } pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.0 - .prepare("DELETE FROM kv_store WHERE key = (?)")? - .with_bindings(key)? - .exec() + self.exec_bound("DELETE FROM kv_store WHERE key = (?)")?(key) } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 9b2d9e4563..c4e4873dce 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -22,7 +22,7 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "}], ); -use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; +use self::model::{SerializedWorkspace, WorkspaceId}; use super::Db; @@ -40,21 +40,19 @@ impl Db { // and we've grabbed the most recent workspace let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { - self.prepare(indoc! {" + self.select_row(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})? - .maybe_row::() + ORDER BY timestamp DESC LIMIT 1"})?()? } else { - self.prepare(indoc! {" + self.select_row_bound(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces - WHERE workspace_id = ?"})? - .with_bindings(&workspace_id)? - .maybe_row::() + WHERE workspace_id = ?"})?(&workspace_id)? } + .context("No workspaces found") }) - .log_err() + .warn_on_err() .flatten()?; Some(SerializedWorkspace { @@ -85,23 +83,17 @@ impl Db { if let Some(old_roots) = old_roots { let old_id: WorkspaceId = old_roots.into(); - self.prepare("DELETE FROM WORKSPACES WHERE workspace_id = ?")? - .with_bindings(&old_id)? - .exec()?; + self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; } // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.prepare("DELETE FROM workspaces WHERE workspace_id = ?;")? - .with_bindings(&workspace_id)? - .exec()?; + self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - self.prepare( + self.exec_bound( "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )? - .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? - .exec()?; + )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; // Save center pane group and dock pane self.save_pane_group(&workspace_id, &workspace.center_group, None)?; @@ -126,11 +118,9 @@ impl Db { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.prepare( + self.select_bound::( "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )? - .with_bindings(limit)? - .rows::()? + )?(limit)? .into_iter() .map(|id| id.paths()) .collect::>>(), diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index 25873a7f9b..9e859ffdad 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -3,7 +3,7 @@ use indoc::indoc; use sqlez::migrations::Migration; use crate::{ - model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, + model::{PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, Db, }; @@ -29,19 +29,16 @@ pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( impl Db { pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self - .prepare(indoc! {" + Ok(self.select_bound(indoc! {" SELECT item_id, kind FROM items WHERE pane_id = ? - ORDER BY position"})? - .with_bindings(pane_id)? - .rows::<(ItemId, SerializedItemKind)>()? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) } pub(crate) fn save_items( @@ -51,19 +48,14 @@ impl Db { items: &[SerializedItem], ) -> Result<()> { let mut delete_old = self - .prepare("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") .context("Preparing deletion")?; - let mut insert_new = self.prepare( + let mut insert_new = self.exec_bound( "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - delete_old - .with_bindings((workspace_id, pane_id, item.item_id()))? - .exec()?; - - insert_new - .with_bindings((item.item_id(), workspace_id, pane_id, item.kind(), position))? - .exec()?; + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; } Ok(()) diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 1d9065f6d9..36099f66e6 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -80,8 +80,6 @@ impl Column for DockAnchor { } } -pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); - #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { pub dock_anchor: DockAnchor, @@ -240,23 +238,20 @@ mod tests { workspace_id BLOB, dock_anchor TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - db.prepare("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap() - .with_bindings((&workspace_id, DockAnchor::Bottom)) - .unwrap() - .exec() - .unwrap(); + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); assert_eq!( - db.prepare("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap() - .row::<(WorkspaceId, DockAnchor)>() - .unwrap(), - (WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom) + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) ); } } diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 8528acb8af..24d6a3f938 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Context, Result}; use indoc::indoc; -use sqlez::{migrations::Migration, statement::Statement}; +use sqlez::migrations::Migration; use util::unzip_option; use crate::model::{Axis, GroupId, PaneId, SerializedPane}; @@ -39,38 +39,29 @@ impl Db { &self, workspace_id: &WorkspaceId, ) -> Result { - let mut query = self.prepare(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?; - - self.get_pane_group_children(workspace_id, None, &mut query)? + self.get_pane_group_children(workspace_id, None)? .into_iter() .next() .context("No center pane group") } - fn get_pane_group_children( + fn get_pane_group_children<'a>( &self, workspace_id: &WorkspaceId, group_id: Option, - query: &mut Statement, ) -> Result> { - let children = query.with_bindings((group_id, workspace_id))?.rows::<( - Option, - Option, - Option, - )>()?; - - children + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? .into_iter() .map(|(group_id, axis, pane_id)| { if let Some((group_id, axis)) = group_id.zip(axis) { @@ -79,7 +70,6 @@ impl Db { children: self.get_pane_group_children( workspace_id, Some(group_id), - query, )?, }) } else if let Some(pane_id) = pane_id { @@ -107,9 +97,8 @@ impl Db { match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.prepare("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - .with_bindings((workspace_id, parent_id, position, *axis))? - .insert()? as GroupId; + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; for (position, group) in children.iter().enumerate() { self.save_pane_group(workspace_id, group, Some((parent_id, position)))? @@ -121,12 +110,12 @@ impl Db { } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self - .prepare(indoc! {" + let pane_id = self.select_row_bound(indoc! {" SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})? - .with_bindings(workspace_id)? - .row::()?; + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, @@ -141,10 +130,9 @@ impl Db { ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - let pane_id = self - .prepare("INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)")? - .with_bindings((workspace_id, parent_id, order))? - .insert()? as PaneId; + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items") diff --git a/crates/db/test.db b/crates/db/test.db index 09a0bc8f1126715177b2517c92827685e8b0fb7c..cedefe5f832586d90e62e9a50c9e8c7506cf81e7 100644 GIT binary patch delta 64 zcmZoTz|?SnX@ayM9|Hpe7Xt$DP1G@Fd$4SU}#`vXkuk(I(fgpITCkKKmY)W Cy$v-0 delta 64 zcmZoTz|?SnX@ayMHvd$4WU|?WnY;0v>HhI6lITCkKKmY)V Cat$&7 diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 04a12cfc97..b673167c86 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -6,8 +6,6 @@ use std::{ use anyhow::{anyhow, Result}; use libsqlite3_sys::*; -use crate::statement::Statement; - pub struct Connection { pub(crate) sqlite3: *mut sqlite3, persistent: bool, @@ -60,30 +58,6 @@ impl Connection { unsafe { sqlite3_last_insert_rowid(self.sqlite3) } } - pub fn insert(&self, query: impl AsRef) -> Result { - self.exec(query)?; - Ok(self.last_insert_id()) - } - - pub fn exec(&self, query: impl AsRef) -> Result<()> { - unsafe { - sqlite3_exec( - self.sqlite3, - CString::new(query.as_ref())?.as_ptr(), - None, - 0 as *mut _, - 0 as *mut _, - ); - sqlite3_errcode(self.sqlite3); - self.last_error()?; - } - Ok(()) - } - - pub fn prepare>(&self, query: T) -> Result { - Statement::prepare(&self, query) - } - pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( @@ -136,7 +110,7 @@ mod test { use anyhow::Result; use indoc::indoc; - use crate::{connection::Connection, migrations::Migration}; + use crate::connection::Connection; #[test] fn string_round_trips() -> Result<()> { @@ -146,25 +120,19 @@ mod test { CREATE TABLE text ( text TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let text = "Some test text"; connection - .prepare("INSERT INTO text (text) VALUES (?);") - .unwrap() - .with_bindings(text) - .unwrap() - .exec() - .unwrap(); + .insert_bound("INSERT INTO text (text) VALUES (?);") + .unwrap()(text) + .unwrap(); assert_eq!( - &connection - .prepare("SELECT text FROM text;") - .unwrap() - .row::() - .unwrap(), - text + connection.select_row("SELECT text FROM text;").unwrap()().unwrap(), + Some(text.to_string()) ); Ok(()) @@ -180,32 +148,26 @@ mod test { integer INTEGER, blob BLOB );"}) - .unwrap(); + .unwrap()() + .unwrap(); let tuple1 = ("test".to_string(), 64, vec![0, 1, 2, 4, 8, 16, 32, 64]); let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); let mut insert = connection - .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") + .insert_bound::<(String, usize, Vec)>( + "INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)", + ) .unwrap(); - insert - .with_bindings(tuple1.clone()) - .unwrap() - .exec() - .unwrap(); - insert - .with_bindings(tuple2.clone()) - .unwrap() - .exec() - .unwrap(); + insert(tuple1.clone()).unwrap(); + insert(tuple2.clone()).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test") - .unwrap() - .rows::<(String, usize, Vec)>() - .unwrap(), + .select::<(String, usize, Vec)>("SELECT * FROM test") + .unwrap()() + .unwrap(), vec![tuple1, tuple2] ); } @@ -219,23 +181,20 @@ mod test { t INTEGER, f INTEGER );"}) - .unwrap(); + .unwrap()() + .unwrap(); connection - .prepare("INSERT INTO bools(t, f) VALUES (?, ?);") - .unwrap() - .with_bindings((true, false)) - .unwrap() - .exec() - .unwrap(); + .insert_bound("INSERT INTO bools(t, f) VALUES (?, ?);") + .unwrap()((true, false)) + .unwrap(); assert_eq!( - &connection - .prepare("SELECT * FROM bools;") - .unwrap() - .row::<(bool, bool)>() - .unwrap(), - &(true, false) + connection + .select_row::<(bool, bool)>("SELECT * FROM bools;") + .unwrap()() + .unwrap(), + Some((true, false)) ); } @@ -247,13 +206,13 @@ mod test { CREATE TABLE blobs ( data BLOB );"}) - .unwrap(); - let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; - let mut write = connection1 - .prepare("INSERT INTO blobs (data) VALUES (?);") - .unwrap(); - write.bind_blob(1, blob).unwrap(); - write.exec().unwrap(); + .unwrap()() + .unwrap(); + let blob = vec![0, 1, 2, 4, 8, 16, 32, 64]; + connection1 + .insert_bound::>("INSERT INTO blobs (data) VALUES (?);") + .unwrap()(blob.clone()) + .unwrap(); // Backup connection1 to connection2 let connection2 = Connection::open_memory("backup_works_other"); @@ -261,40 +220,36 @@ mod test { // Delete the added blob and verify its deleted on the other side let read_blobs = connection1 - .prepare("SELECT * FROM blobs;") - .unwrap() - .rows::>() - .unwrap(); + .select::>("SELECT * FROM blobs;") + .unwrap()() + .unwrap(); assert_eq!(read_blobs, vec![blob]); } #[test] - fn test_kv_store() -> anyhow::Result<()> { - let connection = Connection::open_memory("kv_store"); + fn multi_step_statement_works() { + let connection = Connection::open_memory("multi_step_statement_works"); - Migration::new( - "kv", - &["CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT;"], - ) - .run(&connection) + connection + .exec(indoc! {" + CREATE TABLE test ( + col INTEGER + )"}) + .unwrap()() .unwrap(); - let mut stmt = connection.prepare("INSERT INTO kv_store(key, value) VALUES(?, ?)")?; - stmt.bind_text(1, "a").unwrap(); - stmt.bind_text(2, "b").unwrap(); - stmt.exec().unwrap(); - let id = connection.last_insert_id(); + connection + .exec(indoc! {" + INSERT INTO test(col) VALUES (2)"}) + .unwrap()() + .unwrap(); - let res = connection - .prepare("SELECT key, value FROM kv_store WHERE rowid = ?")? - .with_bindings(id)? - .row::<(String, String)>()?; - - assert_eq!(res, ("a".to_string(), "b".to_string())); - - Ok(()) + assert_eq!( + connection + .select_row::("SELECt * FROM test") + .unwrap()() + .unwrap(), + Some(2) + ); } } diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index 3bed7a06cb..155fb28901 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -4,3 +4,4 @@ pub mod migrations; pub mod savepoint; pub mod statement; pub mod thread_safe_connection; +pub mod typed_statements; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 9f3bd333ca..89eaebb494 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -18,7 +18,7 @@ const MIGRATIONS_MIGRATION: Migration = Migration::new( domain TEXT, step INTEGER, migration TEXT - ); + ) "}], ); @@ -34,24 +34,26 @@ impl Migration { } fn run_unchecked(&self, connection: &Connection) -> Result<()> { - connection.exec(self.migrations.join(";\n")) + for migration in self.migrations { + connection.exec(migration)?()?; + } + + Ok(()) } pub fn run(&self, connection: &Connection) -> Result<()> { // Setup the migrations table unconditionally MIGRATIONS_MIGRATION.run_unchecked(connection)?; - let completed_migrations = connection - .prepare(indoc! {" - SELECT domain, step, migration FROM migrations - WHERE domain = ? - ORDER BY step - "})? - .with_bindings(self.domain)? - .rows::<(String, usize, String)>()?; + let completed_migrations = + connection.select_bound::<&str, (String, usize, String)>(indoc! {" + SELECT domain, step, migration FROM migrations + WHERE domain = ? + ORDER BY step + "})?(self.domain)?; let mut store_completed_migration = connection - .prepare("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + .insert_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; for (index, migration) in self.migrations.iter().enumerate() { if let Some((_, _, completed_migration)) = completed_migrations.get(index) { @@ -70,10 +72,8 @@ impl Migration { } } - connection.exec(migration)?; - store_completed_migration - .with_bindings((self.domain, index, *migration))? - .exec()?; + connection.exec(migration)?()?; + store_completed_migration((self.domain, index, *migration))?; } Ok(()) @@ -97,17 +97,16 @@ mod test { CREATE TABLE test1 ( a TEXT, b TEXT - );"}], + )"}], ); migration.run(&connection).unwrap(); // Verify it got added to the migrations table assert_eq!( &connection - .prepare("SELECT (migration) FROM migrations") - .unwrap() - .rows::() - .unwrap()[..], + .select::("SELECT (migration) FROM migrations") + .unwrap()() + .unwrap()[..], migration.migrations ); @@ -117,22 +116,21 @@ mod test { CREATE TABLE test1 ( a TEXT, b TEXT - );"}, + )"}, indoc! {" CREATE TABLE test2 ( c TEXT, d TEXT - );"}, + )"}, ]; migration.run(&connection).unwrap(); // Verify it is also added to the migrations table assert_eq!( &connection - .prepare("SELECT (migration) FROM migrations") - .unwrap() - .rows::() - .unwrap()[..], + .select::("SELECT (migration) FROM migrations") + .unwrap()() + .unwrap()[..], migration.migrations ); } @@ -142,15 +140,17 @@ mod test { let connection = Connection::open_memory("migration_setup_works"); connection - .exec(indoc! {"CREATE TABLE IF NOT EXISTS migrations ( + .exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( domain TEXT, step INTEGER, migration TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let mut store_completed_migration = connection - .prepare(indoc! {" + .insert_bound::<(&str, usize, String)>(indoc! {" INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)"}) .unwrap(); @@ -159,14 +159,11 @@ mod test { for i in 0..5 { // Create a table forcing a schema change connection - .exec(format!("CREATE TABLE table{} ( test TEXT );", i)) - .unwrap(); + .exec(&format!("CREATE TABLE table{} ( test TEXT );", i)) + .unwrap()() + .unwrap(); - store_completed_migration - .with_bindings((domain, i, i.to_string())) - .unwrap() - .exec() - .unwrap(); + store_completed_migration((domain, i, i.to_string())).unwrap(); } } @@ -180,46 +177,49 @@ mod test { // Manually create the table for that migration with a row connection .exec(indoc! {" - CREATE TABLE test_table ( - test_column INTEGER - ); - INSERT INTO test_table (test_column) VALUES (1)"}) - .unwrap(); + CREATE TABLE test_table ( + test_column INTEGER + );"}) + .unwrap()() + .unwrap(); + connection + .exec(indoc! {" + INSERT INTO test_table (test_column) VALUES (1);"}) + .unwrap()() + .unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .row::() - .unwrap(), - 1 + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + Some(1) ); // Run the migration verifying that the row got dropped migration.run(&connection).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .rows::() - .unwrap(), - Vec::new() + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + None ); // Recreate the dropped row connection .exec("INSERT INTO test_table (test_column) VALUES (2)") - .unwrap(); + .unwrap()() + .unwrap(); // Run the same migration again and verify that the table was left unchanged migration.run(&connection).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .row::() - .unwrap(), - 2 + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + Some(2) ); } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index ba4b1e774b..b78358deb9 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use indoc::{formatdoc, indoc}; use crate::connection::Connection; @@ -10,16 +11,17 @@ impl Connection { where F: FnOnce() -> Result, { - let name = name.as_ref().to_owned(); - self.exec(format!("SAVEPOINT {}", &name))?; + let name = name.as_ref(); + self.exec(&format!("SAVEPOINT {name}"))?()?; let result = f(); match result { Ok(_) => { - self.exec(format!("RELEASE {}", name))?; + self.exec(&format!("RELEASE {name}"))?()?; } Err(_) => { - self.exec(format!("ROLLBACK TO {}", name))?; - self.exec(format!("RELEASE {}", name))?; + self.exec(&formatdoc! {" + ROLLBACK TO {name}; + RELEASE {name}"})?()?; } } result @@ -32,16 +34,17 @@ impl Connection { where F: FnOnce() -> Result>, { - let name = name.as_ref().to_owned(); - self.exec(format!("SAVEPOINT {}", &name))?; + let name = name.as_ref(); + self.exec(&format!("SAVEPOINT {name}"))?()?; let result = f(); match result { Ok(Some(_)) => { - self.exec(format!("RELEASE {}", name))?; + self.exec(&format!("RELEASE {name}"))?()?; } Ok(None) | Err(_) => { - self.exec(format!("ROLLBACK TO {}", name))?; - self.exec(format!("RELEASE {}", name))?; + self.exec(&formatdoc! {" + ROLLBACK TO {name}; + RELEASE {name}"})?()?; } } result @@ -64,28 +67,25 @@ mod tests { text TEXT, idx INTEGER );"}) - .unwrap(); + .unwrap()() + .unwrap(); let save1_text = "test save1"; let save2_text = "test save2"; connection.with_savepoint("first", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save1_text, 1))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?((save1_text, 1))?; assert!(connection .with_savepoint("second", || -> Result, anyhow::Error> { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + .select::("SELECT text FROM text ORDER BY text.idx ASC")?( + )?, vec![save1_text, save2_text], ); @@ -95,22 +95,17 @@ mod tests { .is_some()); assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text], ); connection.with_savepoint_rollback::<(), _>("second", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -118,22 +113,17 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text], ); connection.with_savepoint_rollback("second", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -141,9 +131,7 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -151,9 +139,7 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f0de8703ab..e0b284e628 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -1,6 +1,6 @@ -use std::ffi::{c_int, CString}; +use std::ffi::{c_int, CStr, CString}; use std::marker::PhantomData; -use std::{slice, str}; +use std::{ptr, slice, str}; use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; @@ -9,7 +9,8 @@ use crate::bindable::{Bind, Column}; use crate::connection::Connection; pub struct Statement<'a> { - raw_statement: *mut sqlite3_stmt, + raw_statements: Vec<*mut sqlite3_stmt>, + current_statement: usize, connection: &'a Connection, phantom: PhantomData, } @@ -34,19 +35,31 @@ pub enum SqlType { impl<'a> Statement<'a> { pub fn prepare>(connection: &'a Connection, query: T) -> Result { let mut statement = Self { - raw_statement: 0 as *mut _, + raw_statements: Default::default(), + current_statement: 0, connection, phantom: PhantomData, }; unsafe { - sqlite3_prepare_v2( - connection.sqlite3, - CString::new(query.as_ref())?.as_ptr(), - -1, - &mut statement.raw_statement, - 0 as *mut _, - ); + let sql = CString::new(query.as_ref())?; + let mut remaining_sql = sql.as_c_str(); + while { + let remaining_sql_str = remaining_sql.to_str()?; + remaining_sql_str.trim() != ";" && !remaining_sql_str.is_empty() + } { + let mut raw_statement = 0 as *mut sqlite3_stmt; + let mut remaining_sql_ptr = ptr::null(); + sqlite3_prepare_v2( + connection.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ); + remaining_sql = CStr::from_ptr(remaining_sql_ptr); + statement.raw_statements.push(raw_statement); + } connection .last_error() @@ -56,131 +69,138 @@ impl<'a> Statement<'a> { Ok(statement) } + fn current_statement(&self) -> *mut sqlite3_stmt { + *self.raw_statements.get(self.current_statement).unwrap() + } + pub fn reset(&mut self) { unsafe { - sqlite3_reset(self.raw_statement); + for raw_statement in self.raw_statements.iter() { + sqlite3_reset(*raw_statement); + } } + self.current_statement = 0; } pub fn parameter_count(&self) -> i32 { - unsafe { sqlite3_bind_parameter_count(self.raw_statement) } + unsafe { + self.raw_statements + .iter() + .map(|raw_statement| sqlite3_bind_parameter_count(*raw_statement)) + .max() + .unwrap_or(0) + } } pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { - // dbg!("bind blob", index); let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; let len = blob.len() as c_int; unsafe { - sqlite3_bind_blob( - self.raw_statement, - index, - blob_pointer, - len, - SQLITE_TRANSIENT(), - ); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); + } } self.connection.last_error() } pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { let index = index as c_int; - let pointer = unsafe { sqlite3_column_blob(self.raw_statement, index) }; + let pointer = unsafe { sqlite3_column_blob(self.current_statement(), index) }; self.connection.last_error()?; if pointer.is_null() { return Ok(&[]); } - let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; self.connection.last_error()?; unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { - // dbg!("bind double", index); let index = index as c_int; unsafe { - sqlite3_bind_double(self.raw_statement, index, double); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_double(*raw_statement, index, double); + } } self.connection.last_error() } pub fn column_double(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_double(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_double(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { - // dbg!("bind int", index); let index = index as c_int; unsafe { - sqlite3_bind_int(self.raw_statement, index, int); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_int(*raw_statement, index, int); + } }; self.connection.last_error() } pub fn column_int(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_int(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_int(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { - // dbg!("bind int64", index); let index = index as c_int; unsafe { - sqlite3_bind_int64(self.raw_statement, index, int); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_int64(*raw_statement, index, int); + } } self.connection.last_error() } pub fn column_int64(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_int64(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_int64(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_null(&self, index: i32) -> Result<()> { - // dbg!("bind null", index); let index = index as c_int; unsafe { - sqlite3_bind_null(self.raw_statement, index); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_null(*raw_statement, index); + } } self.connection.last_error() } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { - // dbg!("bind text", index, text); let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; unsafe { - sqlite3_bind_text( - self.raw_statement, - index, - text_pointer, - len, - SQLITE_TRANSIENT(), - ); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); + } } self.connection.last_error() } pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { let index = index as c_int; - let pointer = unsafe { sqlite3_column_text(self.raw_statement, index) }; + let pointer = unsafe { sqlite3_column_text(self.current_statement(), index) }; self.connection.last_error()?; if pointer.is_null() { return Ok(""); } - let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; self.connection.last_error()?; let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; @@ -198,7 +218,7 @@ impl<'a> Statement<'a> { } pub fn column_type(&mut self, index: i32) -> Result { - let result = unsafe { sqlite3_column_type(self.raw_statement, index) }; // SELECT FROM TABLE + let result = unsafe { sqlite3_column_type(self.current_statement(), index) }; self.connection.last_error()?; match result { SQLITE_INTEGER => Ok(SqlType::Integer), @@ -217,9 +237,16 @@ impl<'a> Statement<'a> { fn step(&mut self) -> Result { unsafe { - match sqlite3_step(self.raw_statement) { + match sqlite3_step(self.current_statement()) { SQLITE_ROW => Ok(StepResult::Row), - SQLITE_DONE => Ok(StepResult::Done), + SQLITE_DONE => { + if self.current_statement >= self.raw_statements.len() - 1 { + Ok(StepResult::Done) + } else { + self.current_statement += 1; + self.step() + } + } SQLITE_MISUSE => Ok(StepResult::Misuse), other => self .connection @@ -311,7 +338,11 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { - unsafe { sqlite3_finalize(self.raw_statement) }; + unsafe { + for raw_statement in self.raw_statements.iter() { + sqlite3_finalize(*raw_statement); + } + } } } @@ -319,7 +350,10 @@ impl<'a> Drop for Statement<'a> { mod test { use indoc::indoc; - use crate::{connection::Connection, statement::StepResult}; + use crate::{ + connection::Connection, + statement::{Statement, StepResult}, + }; #[test] fn blob_round_trips() { @@ -327,28 +361,28 @@ mod test { connection1 .exec(indoc! {" CREATE TABLE blobs ( - data BLOB - );"}) - .unwrap(); + data BLOB + )"}) + .unwrap()() + .unwrap(); let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; - let mut write = connection1 - .prepare("INSERT INTO blobs (data) VALUES (?);") - .unwrap(); + let mut write = + Statement::prepare(&connection1, "INSERT INTO blobs (data) VALUES (?)").unwrap(); write.bind_blob(1, blob).unwrap(); assert_eq!(write.step().unwrap(), StepResult::Done); // Read the blob from the let connection2 = Connection::open_memory("blob_round_trips"); - let mut read = connection2.prepare("SELECT * FROM blobs;").unwrap(); + let mut read = Statement::prepare(&connection2, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Row); assert_eq!(read.column_blob(0).unwrap(), blob); assert_eq!(read.step().unwrap(), StepResult::Done); // Delete the added blob and verify its deleted on the other side - connection2.exec("DELETE FROM blobs;").unwrap(); - let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); + connection2.exec("DELETE FROM blobs").unwrap()().unwrap(); + let mut read = Statement::prepare(&connection1, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Done); } @@ -359,32 +393,25 @@ mod test { .exec(indoc! {" CREATE TABLE texts ( text TEXT - );"}) - .unwrap(); + )"}) + .unwrap()() + .unwrap(); assert!(connection - .prepare("SELECT text FROM texts") - .unwrap() - .maybe_row::() - .unwrap() - .is_none()); + .select_row::("SELECT text FROM texts") + .unwrap()() + .unwrap() + .is_none()); let text_to_insert = "This is a test"; connection - .prepare("INSERT INTO texts VALUES (?)") - .unwrap() - .with_bindings(text_to_insert) - .unwrap() - .exec() - .unwrap(); + .exec_bound("INSERT INTO texts VALUES (?)") + .unwrap()(text_to_insert) + .unwrap(); assert_eq!( - connection - .prepare("SELECT text FROM texts") - .unwrap() - .maybe_row::() - .unwrap(), + connection.select_row("SELECT text FROM texts").unwrap()().unwrap(), Some(text_to_insert.to_string()) ); } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index f4f759cd6c..45e22e4b3f 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -79,7 +79,8 @@ impl Deref for ThreadSafeConnection { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query - )); + ))() + .unwrap(); } if let Some(migrations) = self.migrations { diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs new file mode 100644 index 0000000000..f2d66a781f --- /dev/null +++ b/crates/sqlez/src/typed_statements.rs @@ -0,0 +1,67 @@ +use anyhow::Result; + +use crate::{ + bindable::{Bind, Column}, + connection::Connection, + statement::Statement, +}; + +impl Connection { + pub fn exec<'a>(&'a self, query: &str) -> Result Result<()>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.exec()) + } + + pub fn exec_bound<'a, B: Bind>( + &'a self, + query: &str, + ) -> Result Result<()>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.exec()) + } + + pub fn insert<'a>(&'a self, query: &str) -> Result Result> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.insert()) + } + + pub fn insert_bound<'a, B: Bind>( + &'a self, + query: &str, + ) -> Result Result> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.insert()) + } + + pub fn select<'a, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.rows::()) + } + + pub fn select_bound<'a, B: Bind, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.rows::()) + } + + pub fn select_row<'a, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.maybe_row::()) + } + + pub fn select_row_bound<'a, B: Bind, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) + } +} From c84201fc9fda59f0d71a6fe49c519902135252af Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 15:29:29 -0800 Subject: [PATCH 151/240] Done first draft of strongly typed migrations --- crates/auto_update/src/auto_update.rs | 8 +- crates/client/src/client.rs | 4 +- crates/client/src/telemetry.rs | 4 +- crates/db/examples/serialize-pane.rs | 46 ---- crates/db/examples/serialize_workspace.rs | 30 --- crates/db/src/db.rs | 48 ++-- crates/db/src/kvp.rs | 13 +- crates/db/src/workspace.rs | 294 ++++++++++++++++++++- crates/db/src/workspace/items.rs | 63 ----- crates/db/src/workspace/pane.rs | 232 ---------------- crates/project/src/project.rs | 2 +- crates/sqlez/src/domain.rs | 39 +++ crates/sqlez/src/lib.rs | 1 + crates/sqlez/src/savepoint.rs | 2 +- crates/sqlez/src/thread_safe_connection.rs | 45 ++-- crates/workspace/src/pane.rs | 2 +- crates/workspace/src/workspace.rs | 5 +- crates/zed/src/main.rs | 6 +- 18 files changed, 396 insertions(+), 448 deletions(-) delete mode 100644 crates/db/examples/serialize-pane.rs delete mode 100644 crates/db/examples/serialize_workspace.rs delete mode 100644 crates/db/src/workspace/items.rs delete mode 100644 crates/db/src/workspace/pane.rs create mode 100644 crates/sqlez/src/domain.rs diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 1baf609268..d6eaaab826 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -2,7 +2,7 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; -use db::Db; +use db::{kvp::KeyValue, Db}; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, @@ -42,7 +42,7 @@ pub struct AutoUpdater { current_version: AppVersion, http_client: Arc, pending_poll: Option>, - db: project::Db, + db: project::Db, server_url: String, } @@ -57,7 +57,7 @@ impl Entity for AutoUpdater { } pub fn init( - db: Db, + db: Db, http_client: Arc, server_url: String, cx: &mut MutableAppContext, @@ -126,7 +126,7 @@ impl AutoUpdater { fn new( current_version: AppVersion, - db: project::Db, + db: project::Db, http_client: Arc, server_url: String, ) -> Self { diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index c943b27417..907f7e80f1 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -11,7 +11,7 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use db::Db; +use db::{kvp::KeyValue, Db}; use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; use gpui::{ actions, @@ -1218,7 +1218,7 @@ impl Client { self.peer.respond_with_error(receipt, error) } - pub fn start_telemetry(&self, db: Db) { + pub fn start_telemetry(&self, db: Db) { self.telemetry.start(db.clone()); } diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index f8e7d161c3..16a7c1cc82 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -1,5 +1,5 @@ use crate::http::HttpClient; -use db::Db; +use db::{kvp::KeyValue, Db}; use gpui::{ executor::Background, serde_json::{self, value::Map, Value}, @@ -148,7 +148,7 @@ impl Telemetry { Some(self.state.lock().log_file.as_ref()?.path().to_path_buf()) } - pub fn start(self: &Arc, db: Db) { + pub fn start(self: &Arc, db: Db) { let this = self.clone(); self.executor .spawn( diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs deleted file mode 100644 index 2f362fb997..0000000000 --- a/crates/db/examples/serialize-pane.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::{fs::File, path::Path}; - -const TEST_FILE: &'static str = "test-db.db"; - -fn main() -> anyhow::Result<()> { - env_logger::init(); - - let db = db::Db::open_in_memory("db"); - - let file = Path::new(TEST_FILE); - - let f = File::create(file)?; - drop(f); - - // let workspace_1 = db.workspace_for_roots(&["/tmp"]); - // let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - - // db.save_dock_pane( - // &workspace_1.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Expanded, - // visible: true, - // }, - // ); - // db.save_dock_pane( - // &workspace_2.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Bottom, - // visible: true, - // }, - // ); - // db.save_dock_pane( - // &workspace_3.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Right, - // visible: false, - // }, - // ); - - db.write_to(file).ok(); - - println!("Wrote database!"); - - Ok(()) -} diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs deleted file mode 100644 index 9b6082ce53..0000000000 --- a/crates/db/examples/serialize_workspace.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::{fs::File, path::Path}; - -const TEST_FILE: &'static str = "test-db.db"; - -fn main() -> anyhow::Result<()> { - env_logger::init(); - let db = db::Db::open_in_memory("db"); - - let file = Path::new(TEST_FILE); - - let f = File::create(file)?; - drop(f); - - db.write_kvp("test", "1")?; - db.write_kvp("test-2", "2")?; - - db.workspace_for_roots(&["/tmp1"]); - db.workspace_for_roots(&["/tmp1", "/tmp2"]); - db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); - db.workspace_for_roots(&["/tmp2", "/tmp3"]); - db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); - db.workspace_for_roots(&["/tmp2", "/tmp4"]); - db.workspace_for_roots(&["/tmp2"]); - - db.write_to(file).ok(); - - println!("Wrote database!"); - - Ok(()) -} diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 97dfce0e19..4e348b5614 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -7,18 +7,23 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; -use kvp::KVP_MIGRATION; use sqlez::connection::Connection; +use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; -use workspace::items::ITEM_MIGRATIONS; -use workspace::pane::PANE_MIGRATIONS; pub use workspace::*; -#[derive(Clone)] -pub struct Db(ThreadSafeConnection); +const INITIALIZE_QUERY: &'static str = indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; +"}; -impl Deref for Db { +#[derive(Clone)] +pub struct Db(ThreadSafeConnection); + +impl Deref for Db { type Target = sqlez::connection::Connection; fn deref(&self) -> &Self::Target { @@ -26,7 +31,7 @@ impl Deref for Db { } } -impl Db { +impl Db { /// Open or create a database at the given directory path. pub fn open(db_dir: &Path, channel: &'static str) -> Self { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -35,17 +40,15 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Db(initialize_connection(ThreadSafeConnection::new( - db_path.to_string_lossy().as_ref(), - true, - ))) + Db( + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(INITIALIZE_QUERY), + ) } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory(db_name: &str) -> Self { - Db(initialize_connection(ThreadSafeConnection::new( - db_name, false, - ))) + Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY)) } pub fn persisting(&self) -> bool { @@ -56,19 +59,8 @@ impl Db { let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); self.backup_main(&destination) } -} -fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection { - conn.with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[ - KVP_MIGRATION, - WORKSPACES_MIGRATION, - PANE_MIGRATIONS, - ITEM_MIGRATIONS, - ]) + pub fn open_as(&self) -> Db { + Db(self.0.for_domain()) + } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 6f1230f7b8..c5c9c1c5b5 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,7 +1,7 @@ use super::Db; use anyhow::Result; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; pub(crate) const KVP_MIGRATION: Migration = Migration::new( "kvp", @@ -13,7 +13,16 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( "}], ); -impl Db { +#[derive(Clone)] +pub enum KeyValue {} + +impl Domain for KeyValue { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + KVP_MIGRATION.run(conn) + } +} + +impl Db { pub fn read_kvp(&self, key: &str) -> Result> { self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index c4e4873dce..17ff9cf22c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,14 +1,24 @@ -pub(crate) mod items; pub mod model; -pub(crate) mod pane; -use anyhow::Context; -use util::{iife, ResultExt}; +use anyhow::{bail, Context, Result}; +use util::{iife, unzip_option, ResultExt}; use std::path::{Path, PathBuf}; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{domain::Domain, migrations::Migration}; + +use self::model::{ + Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + SerializedWorkspace, WorkspaceId, +}; + +use super::Db; + +// 1) Move all of this into Workspace crate +// 2) Deserialize items fully +// 3) Typed prepares (including how you expect to pull data out) +// 4) Investigate Tree column impls pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", @@ -22,11 +32,58 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "}], ); -use self::model::{SerializedWorkspace, WorkspaceId}; +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + "}], +); -use super::Db; +pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( + "item", + &[indoc! {" + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); -impl Db { +#[derive(Clone)] +pub enum Workspace {} + +impl Domain for Workspace { + fn migrate(conn: &sqlez::connection::Connection) -> anyhow::Result<()> { + WORKSPACES_MIGRATION.run(&conn)?; + PANE_MIGRATIONS.run(&conn)?; + ITEM_MIGRATIONS.run(&conn) + } +} + +impl Db { /// Returns a serialized workspace for the given worktree_roots. If the passed array /// is empty, the most recent workspace is returned instead. If no workspace for the /// passed roots is stored, returns none. @@ -129,6 +186,142 @@ impl Db { .log_err() .unwrap_or_default() } + + pub(crate) fn get_center_pane_group( + &self, + workspace_id: &WorkspaceId, + ) -> Result { + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children<'a>( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + )?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items(pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } + } + + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self.select_row_bound(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") + } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self.select_bound(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } + + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.exec_bound( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + } + + Ok(()) + } } #[cfg(test)] @@ -214,4 +407,89 @@ mod tests { workspace_3 ); } + + use crate::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + + fn default_workspace( + dock_pane: SerializedPane, + center_group: &SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: crate::model::DockAnchor::Right, + dock_visible: false, + center_group: center_group.clone(), + dock_pane, + } + } + + #[test] + fn test_basic_dock_pane() { + env_logger::try_init().ok(); + + let db = Db::open_in_memory("basic_dock_pane"); + + let dock_pane = crate::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; + + let workspace = default_workspace(dock_pane, &Default::default()); + + db.save_workspace(&["/tmp"], None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } + + #[test] + fn test_simple_split() { + env_logger::try_init().ok(); + + let db = Db::open_in_memory("simple_split"); + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: crate::model::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: crate::model::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; + + let workspace = default_workspace(Default::default(), ¢er_pane); + + db.save_workspace(&["/tmp"], None, &workspace); + + assert_eq!(workspace.center_group, center_pane); + } } diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs deleted file mode 100644 index 9e859ffdad..0000000000 --- a/crates/db/src/workspace/items.rs +++ /dev/null @@ -1,63 +0,0 @@ -use anyhow::{Context, Result}; -use indoc::indoc; -use sqlez::migrations::Migration; - -use crate::{ - model::{PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, - Db, -}; - -// 1) Move all of this into Workspace crate -// 2) Deserialize items fully -// 3) Typed prepares (including how you expect to pull data out) -// 4) Investigate Tree column impls -pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( - "item", - &[indoc! {" - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); - -impl Db { - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" - SELECT item_id, kind FROM items - WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) - } - - pub(crate) fn save_items( - &self, - workspace_id: &WorkspaceId, - pane_id: PaneId, - items: &[SerializedItem], - ) -> Result<()> { - let mut delete_old = self - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; - for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; - } - - Ok(()) - } -} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs deleted file mode 100644 index 24d6a3f938..0000000000 --- a/crates/db/src/workspace/pane.rs +++ /dev/null @@ -1,232 +0,0 @@ -use anyhow::{bail, Context, Result}; -use indoc::indoc; -use sqlez::migrations::Migration; -use util::unzip_option; - -use crate::model::{Axis, GroupId, PaneId, SerializedPane}; - -use super::{ - model::{SerializedPaneGroup, WorkspaceId}, - Db, -}; - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - "}], -); - -impl Db { - pub(crate) fn get_center_pane_group( - &self, - workspace_id: &WorkspaceId, - ) -> Result { - self.get_pane_group_children(workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children<'a>( - &self, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?((group_id, workspace_id))? - .into_iter() - .map(|(group_id, axis, pane_id)| { - if let Some((group_id, axis)) = group_id.zip(axis) { - Ok(SerializedPaneGroup::Group { - axis, - children: self.get_pane_group_children( - workspace_id, - Some(group_id), - )?, - }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items(pane_id)?, - })) - } else { - bail!("Pane Group Child was neither a pane group or a pane"); - } - }) - .collect::>() - } - - pub(crate) fn save_pane_group( - &self, - workspace_id: &WorkspaceId, - pane_group: &SerializedPaneGroup, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } - - let (parent_id, position) = unzip_option(parent); - - match pane_group { - SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; - - for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? - } - Ok(()) - } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), - } - } - - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( - workspace_id, - )? - .context("No dock pane for workspace")?; - - Ok(SerializedPane::new( - self.get_items(pane_id).context("Reading items")?, - )) - } - - pub(crate) fn save_pane( - &self, - workspace_id: &WorkspaceId, - pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - - let pane_id = self.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; - - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") - } -} - -#[cfg(test)] -mod tests { - - use crate::{ - model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace}, - Db, - }; - - fn default_workspace( - dock_pane: SerializedPane, - center_group: &SerializedPaneGroup, - ) -> SerializedWorkspace { - SerializedWorkspace { - dock_anchor: crate::model::DockAnchor::Right, - dock_visible: false, - center_group: center_group.clone(), - dock_pane, - } - } - - #[test] - fn test_basic_dock_pane() { - env_logger::try_init().ok(); - - let db = Db::open_in_memory("basic_dock_pane"); - - let dock_pane = crate::model::SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }; - - let workspace = default_workspace(dock_pane, &Default::default()); - - db.save_workspace(&["/tmp"], None, &workspace); - - let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); - - assert_eq!(workspace.dock_pane, new_workspace.dock_pane); - } - - #[test] - fn test_simple_split() { - env_logger::try_init().ok(); - - let db = Db::open_in_memory("simple_split"); - - // ----------------- - // | 1,2 | 5,6 | - // | - - - | | - // | 3,4 | | - // ----------------- - let center_pane = SerializedPaneGroup::Group { - axis: crate::model::Axis::Horizontal, - children: vec![ - SerializedPaneGroup::Group { - axis: crate::model::Axis::Vertical, - children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }), - ], - }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, - ], - }), - ], - }; - - let workspace = default_workspace(Default::default(), ¢er_pane); - - db.save_workspace(&["/tmp"], None, &workspace); - - assert_eq!(workspace.center_group, center_pane); - } -} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 08714d6cd3..d1d8c96ce2 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -63,7 +63,7 @@ use std::{ use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; -pub use db::Db; +pub use db::{kvp::KeyValue, Db}; pub use fs::*; pub use worktree::*; diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs new file mode 100644 index 0000000000..01b17eea31 --- /dev/null +++ b/crates/sqlez/src/domain.rs @@ -0,0 +1,39 @@ +use crate::connection::Connection; + +pub trait Domain: Send + Sync + Clone { + fn migrate(conn: &Connection) -> anyhow::Result<()>; +} + +impl Domain for (D1, D2) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn) + } +} + +impl Domain for (D1, D2, D3) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn) + } +} + +impl Domain for (D1, D2, D3, D4) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn)?; + D4::migrate(conn) + } +} + +impl Domain for (D1, D2, D3, D4, D5) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn)?; + D4::migrate(conn)?; + D5::migrate(conn) + } +} diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index 155fb28901..ecebbd2643 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,5 +1,6 @@ pub mod bindable; pub mod connection; +pub mod domain; pub mod migrations; pub mod savepoint; pub mod statement; diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index b78358deb9..9751aac51d 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use indoc::{formatdoc, indoc}; +use indoc::formatdoc; use crate::connection::Connection; diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 45e22e4b3f..1081101f6a 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,26 +1,26 @@ -use std::{ops::Deref, sync::Arc}; +use std::{marker::PhantomData, ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::{connection, migrations::Migration}; +use crate::{connection, domain::Domain}; -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, - migrations: Option<&'static [Migration]>, connection: Arc>, + _pd: PhantomData, } -impl ThreadSafeConnection { +impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { uri: Arc::from(uri), persistent, initialize_query: None, - migrations: None, connection: Default::default(), + _pd: PhantomData, } } @@ -31,13 +31,6 @@ impl ThreadSafeConnection { self } - /// Migrations have to be run per connection because we fallback to memory - /// so this needs - pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { - self.migrations = Some(migrations); - self - } - /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection @@ -50,21 +43,33 @@ impl ThreadSafeConnection { fn open_shared_memory(&self) -> Connection { Connection::open_memory(self.uri.as_ref()) } + + // Open a new connection for the given domain, leaving this + // connection intact. + pub fn for_domain(&self) -> ThreadSafeConnection { + ThreadSafeConnection { + uri: self.uri.clone(), + persistent: self.persistent, + initialize_query: self.initialize_query, + connection: Default::default(), + _pd: PhantomData, + } + } } -impl Clone for ThreadSafeConnection { +impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), - migrations: self.migrations.clone(), connection: self.connection.clone(), + _pd: PhantomData, } } } -impl Deref for ThreadSafeConnection { +impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { @@ -83,13 +88,7 @@ impl Deref for ThreadSafeConnection { .unwrap(); } - if let Some(migrations) = self.migrations { - for migration in migrations { - migration - .run(&connection) - .expect(&format!("Migrations failed to execute: {:?}", migration)); - } - } + D::migrate(&connection).expect("Migrations failed"); connection }) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 01313f2046..644fa9481e 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1925,7 +1925,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + cx.add_window(|cx| Workspace::new(None, project, |_, _| unimplemented!(), cx)); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); add_labled_item(&workspace, &pane, "A", cx); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index d1dbc6982b..990f7142ee 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{model::SerializedWorkspace, Db}; +use db::{kvp::KeyValue, model::SerializedWorkspace, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1288,7 +1288,8 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { - cx.global::() + cx.global::>() + .open_as::() .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 84d18ba22f..359648b7d7 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -57,7 +57,7 @@ fn main() { init_panic_hook(app_version, http.clone(), app.background()); let db = app.background().spawn(async move { - project::Db::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) + project::Db::::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) }); load_embedded_fonts(&app); @@ -150,7 +150,7 @@ fn main() { let db = cx.background().block(db); cx.set_global(db); - client.start_telemetry(cx.global::().clone()); + client.start_telemetry(cx.global::>().clone()); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -165,7 +165,7 @@ fn main() { default_item_factory, }); auto_update::init( - cx.global::().clone(), + cx.global::>().clone(), http, client::ZED_SERVER_URL.clone(), cx, From e578f2530e1c2b6c54dc00416234d16e401a4622 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 16:40:35 -0800 Subject: [PATCH 152/240] WIP commit, migrating workspace serialization code into the workspace --- crates/db/src/db.rs | 3 - crates/db/src/workspace/model.rs | 257 ----------------- crates/workspace/src/workspace.rs | 9 +- .../src/workspace_db.rs} | 262 +++++++++++++++++- 4 files changed, 266 insertions(+), 265 deletions(-) delete mode 100644 crates/db/src/workspace/model.rs rename crates/{db/src/workspace.rs => workspace/src/workspace_db.rs} (68%) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 4e348b5614..02fc51ee8d 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,5 +1,4 @@ pub mod kvp; -pub mod workspace; use std::fs; use std::ops::Deref; @@ -11,8 +10,6 @@ use sqlez::connection::Connection; use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; -pub use workspace::*; - const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs deleted file mode 100644 index 36099f66e6..0000000000 --- a/crates/db/src/workspace/model.rs +++ /dev/null @@ -1,257 +0,0 @@ -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; - -use anyhow::{bail, Result}; - -use sqlez::{ - bindable::{Bind, Column}, - statement::Statement, -}; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Vec); - -impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 - } -} - -impl, T: IntoIterator> From for WorkspaceId { - fn from(iterator: T) -> Self { - let mut roots = iterator - .into_iter() - .map(|p| p.as_ref().to_path_buf()) - .collect::>(); - roots.sort(); - Self(roots) - } -} - -impl Bind for &WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - bincode::serialize(&self.0) - .expect("Bincode serialization of paths should not fail") - .bind(statement, start_index) - } -} - -impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) - } -} - -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq)] -pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub center_group: SerializedPaneGroup, - pub dock_pane: SerializedPane, -} - -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -pub enum Axis { - #[default] - Horizontal, - Vertical, -} - -impl Bind for Axis { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - Axis::Horizontal => "Horizontal", - Axis::Vertical => "Vertical", - } - .bind(statement, start_index) - } -} - -impl Column for Axis { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(axis_text, next_index)| { - Ok(( - match axis_text.as_str() { - "Horizontal" => Axis::Horizontal, - "Vertical" => Axis::Vertical, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedPaneGroup { - Group { - axis: Axis, - children: Vec, - }, - Pane(SerializedPane), -} - -// Dock panes, and grouped panes combined? -// AND we're collapsing PaneGroup::Pane -// In the case where - -impl Default for SerializedPaneGroup { - fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], - } - } -} - -#[derive(Debug, PartialEq, Eq, Default, Clone)] -pub struct SerializedPane { - pub(crate) children: Vec, -} - -impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } - } -} - -pub type GroupId = i64; -pub type PaneId = i64; -pub type ItemId = usize; - -pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, -} - -impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", - } - .bind(statement, start_index) - } -} - -impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, -} - -impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } - - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } - } -} - -#[cfg(test)] -mod tests { - use sqlez::connection::Connection; - - use crate::model::DockAnchor; - - use super::WorkspaceId; - - #[test] - fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); - - db.exec(indoc::indoc! {" - CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT - );"}) - .unwrap()() - .unwrap(); - - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - - db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap()((&workspace_id, DockAnchor::Bottom)) - .unwrap(); - - assert_eq!( - db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap()() - .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) - ); - } -} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 990f7142ee..a994b8a833 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -10,12 +10,13 @@ pub mod shared_screen; pub mod sidebar; mod status_bar; mod toolbar; +mod workspace_db; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{kvp::KeyValue, model::SerializedWorkspace, Db}; +use db::{kvp::KeyValue, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -61,6 +62,8 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; +use crate::workspace_db::model; + type ProjectItemBuilders = HashMap< TypeId, fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, @@ -1120,7 +1123,7 @@ enum FollowerItem { impl Workspace { pub fn new( - _serialized_workspace: Option, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1289,7 +1292,7 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { cx.global::>() - .open_as::() + .open_as::() .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); diff --git a/crates/db/src/workspace.rs b/crates/workspace/src/workspace_db.rs similarity index 68% rename from crates/db/src/workspace.rs rename to crates/workspace/src/workspace_db.rs index 17ff9cf22c..3e10b06f85 100644 --- a/crates/db/src/workspace.rs +++ b/crates/workspace/src/workspace_db.rs @@ -1,5 +1,3 @@ -pub mod model; - use anyhow::{bail, Context, Result}; use util::{iife, unzip_option, ResultExt}; @@ -493,3 +491,263 @@ mod tests { assert_eq!(workspace.center_group, center_pane); } } + +pub mod model { + use std::{ + path::{Path, PathBuf}, + sync::Arc, + }; + + use anyhow::{bail, Result}; + + use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, + }; + + #[derive(Debug, Clone, PartialEq, Eq)] + pub(crate) struct WorkspaceId(Vec); + + impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } + } + + impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } + } + + impl Bind for &WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) + } + } + + impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + } + } + + #[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] + pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, + } + + impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } + } + + impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq)] + pub struct SerializedWorkspace { + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, + } + + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] + pub enum Axis { + #[default] + Horizontal, + Vertical, + } + + impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } + } + + impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone)] + pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), + } + + // Dock panes, and grouped panes combined? + // AND we're collapsing PaneGroup::Pane + // In the case where + + impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { + axis: Axis::Horizontal, + children: vec![Self::Pane(Default::default())], + } + } + } + + #[derive(Debug, PartialEq, Eq, Default, Clone)] + pub struct SerializedPane { + pub(crate) children: Vec, + } + + impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } + } + + pub type GroupId = i64; + pub type PaneId = i64; + pub type ItemId = usize; + + pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, + } + + impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } + } + + impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { + Ok(( + match kind_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone)] + pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, + } + + impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } + } + + #[cfg(test)] + mod tests { + use sqlez::connection::Connection; + + use crate::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap()() + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); + + assert_eq!( + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + ); + } + } +} From a5edac312e4b03fb9a5c30ac80943278f8e9307a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 21:08:20 -0800 Subject: [PATCH 153/240] Moved to workspaces crate... don't feel great about it --- Cargo.lock | 5 +- crates/collab/Cargo.toml | 1 + crates/collab/src/integration_tests.rs | 10 +- crates/collab/src/main.rs | 2 +- crates/command_palette/src/command_palette.rs | 4 +- crates/db/Cargo.toml | 1 - crates/sqlez/src/domain.rs | 2 +- crates/workspace/Cargo.toml | 4 + crates/workspace/src/workspace.rs | 16 +- crates/workspace/src/workspace_db.rs | 170 ++++++++++-------- crates/zed/src/main.rs | 6 +- crates/zed/src/zed.rs | 4 +- 12 files changed, 128 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fa755b161..9048225474 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1040,6 +1040,7 @@ dependencies = [ "client", "collections", "ctor", + "db", "editor", "env_logger", "envy", @@ -1550,7 +1551,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bincode", "collections", "env_logger", "gpui", @@ -7620,6 +7620,7 @@ name = "workspace" version = "0.1.0" dependencies = [ "anyhow", + "bincode", "call", "client", "collections", @@ -7629,6 +7630,7 @@ dependencies = [ "fs", "futures 0.3.25", "gpui", + "indoc", "language", "log", "menu", @@ -7639,6 +7641,7 @@ dependencies = [ "serde_json", "settings", "smallvec", + "sqlez", "theme", "util", ] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 09f379526e..1722d3374a 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -18,6 +18,7 @@ live_kit_server = { path = "../live_kit_server" } rpc = { path = "../rpc" } util = { path = "../util" } +db = { path = "../db" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 5de28f1c65..bfc14618ea 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,8 +1,9 @@ use crate::{ - db::{NewUserParams, ProjectId, SqliteTestDb as TestDb, UserId}, + db::{Db, NewUserParams, ProjectId, UserId}, rpc::{Executor, Server}, AppState, }; + use ::rpc::Peer; use anyhow::anyhow; use call::{room, ActiveCall, ParticipantLocation, Room}; @@ -11,6 +12,7 @@ use client::{ User, UserStore, RECEIVE_TIMEOUT, }; use collections::{BTreeMap, HashMap, HashSet}; +use db as SqliteDb; use editor::{ self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Redo, Rename, ToOffset, ToggleCodeActions, Undo, @@ -5836,7 +5838,11 @@ impl TestServer { Project::init(&client); cx.update(|cx| { - workspace::init(app_state.clone(), cx); + workspace::init( + app_state.clone(), + cx, + SqliteDb::open_in_memory("integration tests"), + ); call::init(client.clone(), user_store.clone(), cx); }); diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index dc98a2ee68..d26ea1a0fa 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -9,11 +9,11 @@ mod db_tests; #[cfg(test)] mod integration_tests; +use crate::db::{Db, PostgresDb}; use crate::rpc::ResultExt as _; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; -use db::DefaultDb as Db; use serde::Deserialize; use std::{ env::args, diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index 5af23b45d7..f2542c9bc8 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -320,7 +320,7 @@ mod tests { use super::*; use editor::Editor; use gpui::TestAppContext; - use project::Project; + use project::{Db, Project}; use workspace::{AppState, Workspace}; #[test] @@ -345,7 +345,7 @@ mod tests { cx.update(|cx| { editor::init(cx); - workspace::init(app_state.clone(), cx); + workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); init(cx); }); diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index b69779c408..27a11bea7b 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,7 +22,6 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } -bincode = "1.2.1" [dev-dependencies] diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index 01b17eea31..f57e89a5c8 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,6 +1,6 @@ use crate::connection::Connection; -pub trait Domain: Send + Sync + Clone { +pub trait Domain { fn migrate(conn: &Connection) -> anyhow::Result<()>; } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index c481792f7c..f8bcba5eb7 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -30,8 +30,10 @@ language = { path = "../language" } menu = { path = "../menu" } project = { path = "../project" } settings = { path = "../settings" } +sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } +bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" log = { version = "0.4.16", features = ["kv_unstable_serde"] } @@ -40,6 +42,8 @@ postage = { version = "0.4.1", features = ["futures-traits"] } serde = { version = "1.0", features = ["derive", "rc"] } serde_json = { version = "1.0", features = ["preserve_order"] } smallvec = { version = "1.6", features = ["union"] } +indoc = "1.0.4" + [dev-dependencies] call = { path = "../call", features = ["test-support"] } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index a994b8a833..39843859c0 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -12,6 +12,7 @@ mod status_bar; mod toolbar; mod workspace_db; +use crate::workspace_db::model::SerializedWorkspace; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; @@ -62,8 +63,6 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::workspace_db::model; - type ProjectItemBuilders = HashMap< TypeId, fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, @@ -166,7 +165,9 @@ impl_internal_actions!( ); impl_actions!(workspace, [ActivatePane]); -pub fn init(app_state: Arc, cx: &mut MutableAppContext) { +pub fn init(app_state: Arc, cx: &mut MutableAppContext, db: Db) { + cx.set_global(db); + pane::init(cx); dock::init(cx); @@ -1123,7 +1124,7 @@ enum FollowerItem { impl Workspace { pub fn new( - _serialized_workspace: Option, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1291,9 +1292,10 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { - cx.global::>() - .open_as::() - .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + Workspace::workspace_for_roots( + cx.global::>(), + &Vec::from_iter(worktree_roots.into_iter())[..], + ) }); // Use the serialized workspace to construct the new window diff --git a/crates/workspace/src/workspace_db.rs b/crates/workspace/src/workspace_db.rs index 3e10b06f85..e896dd6c27 100644 --- a/crates/workspace/src/workspace_db.rs +++ b/crates/workspace/src/workspace_db.rs @@ -1,18 +1,20 @@ use anyhow::{bail, Context, Result}; + +use db::Db; use util::{iife, unzip_option, ResultExt}; use std::path::{Path, PathBuf}; use indoc::indoc; -use sqlez::{domain::Domain, migrations::Migration}; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use super::Workspace; use self::model::{ Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceId, }; -use super::Db; - // 1) Move all of this into Workspace crate // 2) Deserialize items fully // 3) Typed prepares (including how you expect to pull data out) @@ -70,23 +72,20 @@ pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( "}], ); -#[derive(Clone)] -pub enum Workspace {} - impl Domain for Workspace { - fn migrate(conn: &sqlez::connection::Connection) -> anyhow::Result<()> { + fn migrate(conn: &Connection) -> anyhow::Result<()> { WORKSPACES_MIGRATION.run(&conn)?; PANE_MIGRATIONS.run(&conn)?; ITEM_MIGRATIONS.run(&conn) } } -impl Db { +impl Workspace { /// Returns a serialized workspace for the given worktree_roots. If the passed array /// is empty, the most recent workspace is returned instead. If no workspace for the /// passed roots is stored, returns none. pub fn workspace_for_roots>( - &self, + db: &Db, worktree_roots: &[P], ) -> Option { let workspace_id: WorkspaceId = worktree_roots.into(); @@ -95,12 +94,12 @@ impl Db { // and we've grabbed the most recent workspace let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { - self.select_row(indoc! {" + db.select_row(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { - self.select_row_bound(indoc! {" + db.select_row_bound(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces WHERE workspace_id = ?"})?(&workspace_id)? @@ -111,12 +110,10 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self - .get_dock_pane(&workspace_id) + dock_pane: Workspace::get_dock_pane(&db, &workspace_id) .context("Getting dock pane") .log_err()?, - center_group: self - .get_center_pane_group(&workspace_id) + center_group: Workspace::get_center_pane_group(&db, &workspace_id) .context("Getting center group") .log_err()?, dock_anchor, @@ -127,32 +124,32 @@ impl Db { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously pub fn save_workspace>( - &self, + db: &Db, worktree_roots: &[P], old_roots: Option<&[P]>, workspace: &SerializedWorkspace, ) { let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", || { + db.with_savepoint("update_worktrees", || { if let Some(old_roots) = old_roots { let old_id: WorkspaceId = old_roots.into(); - self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + db.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; } // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; + db.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - self.exec_bound( + db.exec_bound( "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; // Save center pane group and dock pane - self.save_pane_group(&workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + Workspace::save_pane_group(db, &workspace_id, &workspace.center_group, None)?; + Workspace::save_pane(db, &workspace_id, &workspace.dock_pane, None)?; Ok(()) }) @@ -169,11 +166,11 @@ impl Db { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec> { + pub fn recent_workspaces(conn: &Connection, limit: usize) -> Vec> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.select_bound::( + conn.select_bound::( "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", )?(limit)? .into_iter() @@ -186,21 +183,21 @@ impl Db { } pub(crate) fn get_center_pane_group( - &self, + db: &Db, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? + Workspace::get_pane_group_children(&db, workspace_id, None)? .into_iter() .next() .context("No center pane group") } fn get_pane_group_children<'a>( - &self, + db: &Db, workspace_id: &WorkspaceId, group_id: Option, ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + db.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" SELECT group_id, axis, pane_id FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id FROM pane_groups @@ -217,14 +214,15 @@ impl Db { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children( + children: Workspace::get_pane_group_children( + db, workspace_id, Some(group_id), )?, }) } else if let Some(pane_id) = pane_id { Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items(pane_id)?, + children: Workspace::get_items(db, pane_id)?, })) } else { bail!("Pane Group Child was neither a pane group or a pane"); @@ -234,7 +232,7 @@ impl Db { } pub(crate) fn save_pane_group( - &self, + db: &Db, workspace_id: &WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, @@ -247,20 +245,28 @@ impl Db { match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + let parent_id = db.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? ((workspace_id, parent_id, position, *axis))?; for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + Workspace::save_pane_group( + db, + workspace_id, + group, + Some((parent_id, position)), + )? } Ok(()) } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + SerializedPaneGroup::Pane(pane) => Workspace::save_pane(db, workspace_id, pane, parent), } } - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" + pub(crate) fn get_dock_pane( + db: &Db, + workspace_id: &WorkspaceId, + ) -> Result { + let pane_id = db.select_row_bound(indoc! {" SELECT pane_id FROM panes WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( workspace_id, @@ -268,28 +274,27 @@ impl Db { .context("No dock pane for workspace")?; Ok(SerializedPane::new( - self.get_items(pane_id).context("Reading items")?, + Workspace::get_items(db, pane_id).context("Reading items")?, )) } pub(crate) fn save_pane( - &self, + db: &Db, workspace_id: &WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - let pane_id = self.insert_bound( + let pane_id = db.insert_bound( "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", )?((workspace_id, parent_id, order))?; - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") + Workspace::save_items(db, workspace_id, pane_id, &pane.children).context("Saving items") } - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" + pub(crate) fn get_items(db: &Db, pane_id: PaneId) -> Result> { + Ok(db.select_bound(indoc! {" SELECT item_id, kind FROM items WHERE pane_id = ? ORDER BY position"})?(pane_id)? @@ -302,15 +307,15 @@ impl Db { } pub(crate) fn save_items( - &self, + db: &Db, workspace_id: &WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut delete_old = self + let mut delete_old = db .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( + let mut insert_new = db.exec_bound( "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { @@ -324,17 +329,12 @@ impl Db { #[cfg(test)] mod tests { - use crate::{ - model::{ - DockAnchor::{Bottom, Expanded, Right}, - SerializedWorkspace, - }, - Db, - }; + use crate::workspace_db::model::DockAnchor::{Bottom, Expanded, Right}; + use crate::{Db, Workspace}; #[test] fn test_workspace_assignment() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("test_basic_functionality"); @@ -359,61 +359,73 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); - db.save_workspace(&["/tmp"], None, &workspace_2); + Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_1); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace_2); db.write_to("test.db").unwrap(); // Test that paths are treated as a set assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_1 ); assert_eq!( - db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp"]).unwrap(), workspace_1 ); // Make sure that other keys work - assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); - assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); + assert_eq!( + Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(), + workspace_2 + ); + assert_eq!( + Workspace::workspace_for_roots(&db, &["/tmp3", "/tmp2", "/tmp4"]), + None + ); // Test 'mutate' case of updating a pre-existing id - db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + Workspace::save_workspace( + &db, + &["/tmp", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_2, + ); assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_2 ); // Test other mechanism for mutating - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_3); assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works - db.save_workspace( + Workspace::save_workspace( + &db, &["/tmp3", "/tmp4", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_3, ); - assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!(Workspace::workspace_for_roots(&db, &["/tmp2", "tmp"]), None); assert_eq!( - db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) - .unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp3", "/tmp4"]).unwrap(), workspace_3 ); } - use crate::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + use crate::workspace_db::model::SerializedWorkspace; + use crate::workspace_db::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; fn default_workspace( dock_pane: SerializedPane, center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - dock_anchor: crate::model::DockAnchor::Right, + dock_anchor: crate::workspace_db::model::DockAnchor::Right, dock_visible: false, center_group: center_group.clone(), dock_pane, @@ -422,11 +434,11 @@ mod tests { #[test] fn test_basic_dock_pane() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("basic_dock_pane"); - let dock_pane = crate::model::SerializedPane { + let dock_pane = crate::workspace_db::model::SerializedPane { children: vec![ SerializedItem::Terminal { item_id: 1 }, SerializedItem::Terminal { item_id: 4 }, @@ -437,16 +449,16 @@ mod tests { let workspace = default_workspace(dock_pane, &Default::default()); - db.save_workspace(&["/tmp"], None, &workspace); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + let new_workspace = Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } #[test] fn test_simple_split() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("simple_split"); @@ -456,10 +468,10 @@ mod tests { // | 3,4 | | // ----------------- let center_pane = SerializedPaneGroup::Group { - axis: crate::model::Axis::Horizontal, + axis: crate::workspace_db::model::Axis::Horizontal, children: vec![ SerializedPaneGroup::Group { - axis: crate::model::Axis::Vertical, + axis: crate::workspace_db::model::Axis::Vertical, children: vec![ SerializedPaneGroup::Pane(SerializedPane { children: vec![ @@ -486,7 +498,7 @@ mod tests { let workspace = default_workspace(Default::default(), ¢er_pane); - db.save_workspace(&["/tmp"], None, &workspace); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace); assert_eq!(workspace.center_group, center_pane); } @@ -720,7 +732,7 @@ pub mod model { mod tests { use sqlez::connection::Connection; - use crate::model::DockAnchor; + use crate::workspace_db::model::DockAnchor; use super::WorkspaceId; diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 359648b7d7..05f5b8871f 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -170,7 +170,11 @@ fn main() { client::ZED_SERVER_URL.clone(), cx, ); - workspace::init(app_state.clone(), cx); + + let workspace_db = cx.global::>().open_as::(); + + workspace::init(app_state.clone(), cx, workspace_db); + journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); zed::init(&app_state, cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index de785ca978..d6106d78e4 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -630,7 +630,7 @@ mod tests { use gpui::{ executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle, }; - use project::{Project, ProjectPath}; + use project::{Db, Project, ProjectPath}; use serde_json::json; use std::{ collections::HashSet, @@ -1817,7 +1817,7 @@ mod tests { state.initialize_workspace = initialize_workspace; state.build_window_options = build_window_options; call::init(app_state.client.clone(), app_state.user_store.clone(), cx); - workspace::init(app_state.clone(), cx); + workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); editor::init(cx); pane::init(cx); app_state From 2a5565ca93bfa41879159cfad3e576744259b568 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 14 Nov 2022 11:25:11 -0800 Subject: [PATCH 154/240] WIP --- Cargo.lock | 2 +- crates/util/Cargo.toml | 1 + crates/util/src/lib.rs | 1 + crates/{zed => util}/src/paths.rs | 0 crates/zed/Cargo.toml | 1 - crates/zed/src/main.rs | 25 ++++++++++++------------- crates/zed/src/zed.rs | 3 +-- 7 files changed, 16 insertions(+), 17 deletions(-) rename crates/{zed => util}/src/paths.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 9048225474..74860439dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6793,6 +6793,7 @@ version = "0.1.0" dependencies = [ "anyhow", "backtrace", + "dirs 3.0.2", "futures 0.3.25", "git2", "lazy_static", @@ -7707,7 +7708,6 @@ dependencies = [ "context_menu", "ctor", "diagnostics", - "dirs 3.0.2", "easy-parallel", "editor", "env_logger", diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index fc16eeb53c..0a0bacf53c 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -19,6 +19,7 @@ rand = { workspace = true } tempdir = { version = "0.3.7", optional = true } serde_json = { version = "1.0", features = ["preserve_order"], optional = true } git2 = { version = "0.15", default-features = false, optional = true } +dirs = "3.0" [dev-dependencies] diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 19d17c1190..b03bc21210 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -1,3 +1,4 @@ +pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; diff --git a/crates/zed/src/paths.rs b/crates/util/src/paths.rs similarity index 100% rename from crates/zed/src/paths.rs rename to crates/util/src/paths.rs diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index a3023918e3..7fef0aafcf 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -62,7 +62,6 @@ async-trait = "0.1" backtrace = "0.3" chrono = "0.4" ctor = "0.1.20" -dirs = "3.0" easy-parallel = "3.1.0" env_logger = "0.9" futures = "0.3" diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 05f5b8871f..6e7aaba3c6 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -37,7 +37,7 @@ use terminal::terminal_container_view::{get_working_directory, TerminalContainer use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; -use util::{ResultExt, TryFutureExt}; +use util::{paths, ResultExt, TryFutureExt}; use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace}; use zed::{ self, build_window_options, initialize_workspace, languages, menus, RELEASE_CHANNEL, @@ -57,7 +57,7 @@ fn main() { init_panic_hook(app_version, http.clone(), app.background()); let db = app.background().spawn(async move { - project::Db::::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) + project::Db::::open(&*paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) }); load_embedded_fonts(&app); @@ -91,11 +91,11 @@ fn main() { app.run(move |cx| { cx.set_global(*RELEASE_CHANNEL); - cx.set_global(HomeDir(zed::paths::HOME.to_path_buf())); + cx.set_global(HomeDir(paths::HOME.to_path_buf())); let client = client::Client::new(http.clone(), cx); let mut languages = LanguageRegistry::new(login_shell_env_loaded); - languages.set_language_server_download_dir(zed::paths::LANGUAGES_DIR.clone()); + languages.set_language_server_download_dir(paths::LANGUAGES_DIR.clone()); let languages = Arc::new(languages); let init_languages = cx .background() @@ -106,7 +106,7 @@ fn main() { //Setup settings global before binding actions cx.set_global(SettingsFile::new( - &*zed::paths::SETTINGS, + &*paths::SETTINGS, settings_file_content.clone(), fs.clone(), )); @@ -236,16 +236,15 @@ fn init_logger() { const KIB: u64 = 1024; const MIB: u64 = 1024 * KIB; const MAX_LOG_BYTES: u64 = MIB; - if std::fs::metadata(&*zed::paths::LOG) - .map_or(false, |metadata| metadata.len() > MAX_LOG_BYTES) + if std::fs::metadata(&*paths::LOG).map_or(false, |metadata| metadata.len() > MAX_LOG_BYTES) { - let _ = std::fs::rename(&*zed::paths::LOG, &*zed::paths::OLD_LOG); + let _ = std::fs::rename(&*paths::LOG, &*paths::OLD_LOG); } let log_file = OpenOptions::new() .create(true) .append(true) - .open(&*zed::paths::LOG) + .open(&*paths::LOG) .expect("could not open logfile"); simplelog::WriteLogger::init(level, simplelog::Config::default(), log_file) .expect("could not initialize logger"); @@ -257,7 +256,7 @@ fn init_panic_hook(app_version: String, http: Arc, background: A .spawn({ async move { let panic_report_url = format!("{}/api/panic", &*client::ZED_SERVER_URL); - let mut children = smol::fs::read_dir(&*zed::paths::LOGS_DIR).await?; + let mut children = smol::fs::read_dir(&*paths::LOGS_DIR).await?; while let Some(child) = children.next().await { let child = child?; let child_path = child.path(); @@ -345,7 +344,7 @@ fn init_panic_hook(app_version: String, http: Arc, background: A let panic_filename = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string(); std::fs::write( - zed::paths::LOGS_DIR.join(format!("zed-{}-{}.panic", app_version, panic_filename)), + paths::LOGS_DIR.join(format!("zed-{}-{}.panic", app_version, panic_filename)), &message, ) .context("error writing panic to disk") @@ -479,8 +478,8 @@ fn load_config_files( .clone() .spawn(async move { let settings_file = - WatchedJsonFile::new(fs.clone(), &executor, zed::paths::SETTINGS.clone()).await; - let keymap_file = WatchedJsonFile::new(fs, &executor, zed::paths::KEYMAP.clone()).await; + WatchedJsonFile::new(fs.clone(), &executor, paths::SETTINGS.clone()).await; + let keymap_file = WatchedJsonFile::new(fs, &executor, paths::KEYMAP.clone()).await; tx.send((settings_file, keymap_file)).ok() }) .detach(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index d6106d78e4..a8ec71bd4b 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -1,7 +1,6 @@ mod feedback; pub mod languages; pub mod menus; -pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; @@ -31,7 +30,7 @@ use serde::Deserialize; use serde_json::to_string_pretty; use settings::{keymap_file_json_schema, settings_file_json_schema, ReleaseChannel, Settings}; use std::{env, path::Path, str, sync::Arc}; -use util::ResultExt; +use util::{paths, ResultExt}; pub use workspace; use workspace::{sidebar::SidebarSide, AppState, Workspace}; From 479816111815c5eaadea28a3b027cd6a9596018b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 14 Nov 2022 13:18:44 -0800 Subject: [PATCH 155/240] Distributed database pattern built. Co-Authored-By: kay@zed.dev --- Cargo.lock | 4 +- crates/auto_update/src/auto_update.rs | 29 +- crates/auto_update/src/update_notification.rs | 3 +- crates/client/src/client.rs | 7 +- crates/client/src/telemetry.rs | 22 +- crates/collab/Cargo.toml | 1 - crates/collab/src/integration_tests.rs | 11 +- crates/command_palette/src/command_palette.rs | 4 +- crates/db/src/db.rs | 65 +- crates/db/src/kvp.rs | 33 +- crates/gpui/Cargo.toml | 1 + .../bindings/node/binding.cc | 12 +- crates/gpui/src/presenter.rs | 30 + crates/project/src/project.rs | 1 - crates/settings/Cargo.toml | 1 + crates/settings/src/settings.rs | 51 +- crates/sqlez/src/statement.rs | 4 +- crates/sqlez/src/thread_safe_connection.rs | 3 + crates/util/src/channel.rs | 32 + crates/util/src/lib.rs | 1 + crates/workspace/Cargo.toml | 1 + crates/workspace/src/persistence.rs | 494 +++++++++++ crates/workspace/src/persistence/model.rs | 188 +++++ crates/workspace/src/workspace.rs | 17 +- crates/workspace/src/workspace_db.rs | 765 ------------------ crates/workspace/test.db | Bin 0 -> 32768 bytes crates/zed/src/main.rs | 28 +- crates/zed/src/zed.rs | 22 +- 28 files changed, 893 insertions(+), 937 deletions(-) create mode 100644 crates/util/src/channel.rs create mode 100644 crates/workspace/src/persistence.rs create mode 100644 crates/workspace/src/persistence/model.rs delete mode 100644 crates/workspace/src/workspace_db.rs create mode 100644 crates/workspace/test.db diff --git a/Cargo.lock b/Cargo.lock index 74860439dd..bad036a05d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1040,7 +1040,6 @@ dependencies = [ "client", "collections", "ctor", - "db", "editor", "env_logger", "envy", @@ -2428,6 +2427,7 @@ dependencies = [ "simplelog", "smallvec", "smol", + "sqlez", "sum_tree", "time 0.3.17", "tiny-skia", @@ -5307,6 +5307,7 @@ dependencies = [ "serde", "serde_json", "serde_path_to_error", + "sqlez", "theme", "toml", "tree-sitter", @@ -7633,6 +7634,7 @@ dependencies = [ "gpui", "indoc", "language", + "lazy_static", "log", "menu", "parking_lot 0.11.2", diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index d6eaaab826..2a8d2fcf05 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -2,17 +2,17 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; -use db::{kvp::KeyValue, Db}; +use db::kvp::KEY_VALUE_STORE; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, }; use lazy_static::lazy_static; use serde::Deserialize; -use settings::ReleaseChannel; use smol::{fs::File, io::AsyncReadExt, process::Command}; use std::{env, ffi::OsString, path::PathBuf, sync::Arc, time::Duration}; use update_notification::UpdateNotification; +use util::channel::ReleaseChannel; use workspace::Workspace; const SHOULD_SHOW_UPDATE_NOTIFICATION_KEY: &str = "auto-updater-should-show-updated-notification"; @@ -42,7 +42,6 @@ pub struct AutoUpdater { current_version: AppVersion, http_client: Arc, pending_poll: Option>, - db: project::Db, server_url: String, } @@ -56,16 +55,11 @@ impl Entity for AutoUpdater { type Event = (); } -pub fn init( - db: Db, - http_client: Arc, - server_url: String, - cx: &mut MutableAppContext, -) { +pub fn init(http_client: Arc, server_url: String, cx: &mut MutableAppContext) { if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) { let server_url = server_url; let auto_updater = cx.add_model(|cx| { - let updater = AutoUpdater::new(version, db, http_client, server_url.clone()); + let updater = AutoUpdater::new(version, http_client, server_url.clone()); updater.start_polling(cx).detach(); updater }); @@ -126,14 +120,12 @@ impl AutoUpdater { fn new( current_version: AppVersion, - db: project::Db, http_client: Arc, server_url: String, ) -> Self { Self { status: AutoUpdateStatus::Idle, current_version, - db, http_client, server_url, pending_poll: None, @@ -303,20 +295,21 @@ impl AutoUpdater { should_show: bool, cx: &AppContext, ) -> Task> { - let db = self.db.clone(); cx.background().spawn(async move { if should_show { - db.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; + KEY_VALUE_STORE.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; } else { - db.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; + KEY_VALUE_STORE.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; } Ok(()) }) } fn should_show_update_notification(&self, cx: &AppContext) -> Task> { - let db = self.db.clone(); - cx.background() - .spawn(async move { Ok(db.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?.is_some()) }) + cx.background().spawn(async move { + Ok(KEY_VALUE_STORE + .read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)? + .is_some()) + }) } } diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 133a197f15..9963ae65b8 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -5,7 +5,8 @@ use gpui::{ Element, Entity, MouseButton, View, ViewContext, }; use menu::Cancel; -use settings::{ReleaseChannel, Settings}; +use settings::Settings; +use util::channel::ReleaseChannel; use workspace::Notification; pub struct UpdateNotification { diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 907f7e80f1..f9b3a88545 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -11,7 +11,6 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use db::{kvp::KeyValue, Db}; use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; use gpui::{ actions, @@ -27,7 +26,6 @@ use postage::watch; use rand::prelude::*; use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}; use serde::Deserialize; -use settings::ReleaseChannel; use std::{ any::TypeId, collections::HashMap, @@ -41,6 +39,7 @@ use std::{ use telemetry::Telemetry; use thiserror::Error; use url::Url; +use util::channel::ReleaseChannel; use util::{ResultExt, TryFutureExt}; pub use rpc::*; @@ -1218,8 +1217,8 @@ impl Client { self.peer.respond_with_error(receipt, error) } - pub fn start_telemetry(&self, db: Db) { - self.telemetry.start(db.clone()); + pub fn start_telemetry(&self) { + self.telemetry.start(); } pub fn report_event(&self, kind: &str, properties: Value) { diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 16a7c1cc82..0ce1a07f1b 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -1,5 +1,5 @@ use crate::http::HttpClient; -use db::{kvp::KeyValue, Db}; +use db::kvp::KEY_VALUE_STORE; use gpui::{ executor::Background, serde_json::{self, value::Map, Value}, @@ -10,7 +10,6 @@ use lazy_static::lazy_static; use parking_lot::Mutex; use serde::Serialize; use serde_json::json; -use settings::ReleaseChannel; use std::{ io::Write, mem, @@ -19,7 +18,7 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; use tempfile::NamedTempFile; -use util::{post_inc, ResultExt, TryFutureExt}; +use util::{channel::ReleaseChannel, post_inc, ResultExt, TryFutureExt}; use uuid::Uuid; pub struct Telemetry { @@ -148,18 +147,19 @@ impl Telemetry { Some(self.state.lock().log_file.as_ref()?.path().to_path_buf()) } - pub fn start(self: &Arc, db: Db) { + pub fn start(self: &Arc) { let this = self.clone(); self.executor .spawn( async move { - let device_id = if let Ok(Some(device_id)) = db.read_kvp("device_id") { - device_id - } else { - let device_id = Uuid::new_v4().to_string(); - db.write_kvp("device_id", &device_id)?; - device_id - }; + let device_id = + if let Ok(Some(device_id)) = KEY_VALUE_STORE.read_kvp("device_id") { + device_id + } else { + let device_id = Uuid::new_v4().to_string(); + KEY_VALUE_STORE.write_kvp("device_id", &device_id)?; + device_id + }; let device_id: Arc = device_id.into(); let mut state = this.state.lock(); diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 1722d3374a..09f379526e 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -18,7 +18,6 @@ live_kit_server = { path = "../live_kit_server" } rpc = { path = "../rpc" } util = { path = "../util" } -db = { path = "../db" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index bfc14618ea..ade4e10280 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,6 +1,6 @@ use crate::{ - db::{Db, NewUserParams, ProjectId, UserId}, - rpc::{Executor, Server}, + db::{NewUserParams, ProjectId, TestDb, UserId}, + rpc::{Executor, Server, Store}, AppState, }; @@ -12,7 +12,6 @@ use client::{ User, UserStore, RECEIVE_TIMEOUT, }; use collections::{BTreeMap, HashMap, HashSet}; -use db as SqliteDb; use editor::{ self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Redo, Rename, ToOffset, ToggleCodeActions, Undo, @@ -5838,11 +5837,7 @@ impl TestServer { Project::init(&client); cx.update(|cx| { - workspace::init( - app_state.clone(), - cx, - SqliteDb::open_in_memory("integration tests"), - ); + workspace::init(app_state.clone(), cx); call::init(client.clone(), user_store.clone(), cx); }); diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index f2542c9bc8..5af23b45d7 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -320,7 +320,7 @@ mod tests { use super::*; use editor::Editor; use gpui::TestAppContext; - use project::{Db, Project}; + use project::Project; use workspace::{AppState, Workspace}; #[test] @@ -345,7 +345,7 @@ mod tests { cx.update(|cx| { editor::init(cx); - workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); + workspace::init(app_state.clone(), cx); init(cx); }); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 02fc51ee8d..56fc79f475 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,11 +1,12 @@ pub mod kvp; use std::fs; -use std::ops::Deref; use std::path::Path; +#[cfg(any(test, feature = "test-support"))] use anyhow::Result; use indoc::indoc; +#[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -17,47 +18,29 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; -#[derive(Clone)] -pub struct Db(ThreadSafeConnection); +/// Open or create a database at the given directory path. +pub fn open_file_db() -> ThreadSafeConnection { + // Use 0 for now. Will implement incrementing and clearing of old db files soon TM + let current_db_dir = (*util::paths::DB_DIR).join(Path::new(&format!( + "0-{}", + *util::channel::RELEASE_CHANNEL_NAME + ))); + fs::create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); + let db_path = current_db_dir.join(Path::new("db.sqlite")); -impl Deref for Db { - type Target = sqlez::connection::Connection; - - fn deref(&self) -> &Self::Target { - &self.0.deref() - } + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(INITIALIZE_QUERY) } -impl Db { - /// Open or create a database at the given directory path. - pub fn open(db_dir: &Path, channel: &'static str) -> Self { - // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = db_dir.join(Path::new(&format!("0-{}", channel))); - fs::create_dir_all(¤t_db_dir) - .expect("Should be able to create the database directory"); - let db_path = current_db_dir.join(Path::new("db.sqlite")); - - Db( - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(INITIALIZE_QUERY), - ) - } - - /// Open a in memory database for testing and as a fallback. - pub fn open_in_memory(db_name: &str) -> Self { - Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY)) - } - - pub fn persisting(&self) -> bool { - self.persistent() - } - - pub fn write_to>(&self, dest: P) -> Result<()> { - let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - self.backup_main(&destination) - } - - pub fn open_as(&self) -> Db { - Db(self.0.for_domain()) - } +pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { + ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) +} + +#[cfg(any(test, feature = "test-support"))] +pub fn write_db_to>( + conn: &ThreadSafeConnection, + dest: P, +) -> Result<()> { + let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); + conn.backup_main(&destination) } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index c5c9c1c5b5..1dd1cf69b7 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,7 +1,11 @@ -use super::Db; use anyhow::Result; use indoc::indoc; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use sqlez::{ + connection::Connection, domain::Domain, migrations::Migration, + thread_safe_connection::ThreadSafeConnection, +}; +use std::ops::Deref; pub(crate) const KVP_MIGRATION: Migration = Migration::new( "kvp", @@ -13,16 +17,29 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( "}], ); -#[derive(Clone)] -pub enum KeyValue {} +lazy_static::lazy_static! { + pub static ref KEY_VALUE_STORE: KeyValueStore = + KeyValueStore(crate::open_file_db()); +} -impl Domain for KeyValue { +#[derive(Clone)] +pub struct KeyValueStore(ThreadSafeConnection); + +impl Domain for KeyValueStore { fn migrate(conn: &Connection) -> anyhow::Result<()> { KVP_MIGRATION.run(conn) } } -impl Db { +impl Deref for KeyValueStore { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl KeyValueStore { pub fn read_kvp(&self, key: &str) -> Result> { self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } @@ -44,11 +61,11 @@ impl Db { mod tests { use anyhow::Result; - use super::*; + use crate::kvp::KeyValueStore; #[test] fn test_kvp() -> Result<()> { - let db = Db::open_in_memory("test_kvp"); + let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index 54fe5e46a2..683e3bdfcd 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -17,6 +17,7 @@ collections = { path = "../collections" } gpui_macros = { path = "../gpui_macros" } util = { path = "../util" } sum_tree = { path = "../sum_tree" } +sqlez = { path = "../sqlez" } async-task = "4.0.3" backtrace = { version = "0.3", optional = true } ctor = "0.1" diff --git a/crates/gpui/grammars/context-predicate/bindings/node/binding.cc b/crates/gpui/grammars/context-predicate/bindings/node/binding.cc index 9a3df4b028..1264f49100 100644 --- a/crates/gpui/grammars/context-predicate/bindings/node/binding.cc +++ b/crates/gpui/grammars/context-predicate/bindings/node/binding.cc @@ -1,10 +1,10 @@ +#include "nan.h" #include "tree_sitter/parser.h" #include -#include "nan.h" using namespace v8; -extern "C" TSLanguage * tree_sitter_context_predicate(); +extern "C" TSLanguage *tree_sitter_context_predicate(); namespace { @@ -16,13 +16,15 @@ void Init(Local exports, Local module) { tpl->InstanceTemplate()->SetInternalFieldCount(1); Local constructor = Nan::GetFunction(tpl).ToLocalChecked(); - Local instance = constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked(); + Local instance = + constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked(); Nan::SetInternalFieldPointer(instance, 0, tree_sitter_context_predicate()); - Nan::Set(instance, Nan::New("name").ToLocalChecked(), Nan::New("context_predicate").ToLocalChecked()); + Nan::Set(instance, Nan::New("name").ToLocalChecked(), + Nan::New("context_predicate").ToLocalChecked()); Nan::Set(module, Nan::New("exports").ToLocalChecked(), instance); } NODE_MODULE(tree_sitter_context_predicate_binding, Init) -} // namespace +} // namespace diff --git a/crates/gpui/src/presenter.rs b/crates/gpui/src/presenter.rs index 27cd2a1347..eb7554a39c 100644 --- a/crates/gpui/src/presenter.rs +++ b/crates/gpui/src/presenter.rs @@ -17,10 +17,15 @@ use crate::{ SceneBuilder, UpgradeModelHandle, UpgradeViewHandle, View, ViewHandle, WeakModelHandle, WeakViewHandle, }; +use anyhow::bail; use collections::{HashMap, HashSet}; use pathfinder_geometry::vector::{vec2f, Vector2F}; use serde_json::json; use smallvec::SmallVec; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; use std::{ marker::PhantomData, ops::{Deref, DerefMut, Range}, @@ -895,6 +900,31 @@ impl ToJson for Axis { } } +impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } +} + +impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + pub trait Vector2FExt { fn along(self, axis: Axis) -> f32; } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index d1d8c96ce2..94558fee3e 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -63,7 +63,6 @@ use std::{ use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; -pub use db::{kvp::KeyValue, Db}; pub use fs::*; pub use worktree::*; diff --git a/crates/settings/Cargo.toml b/crates/settings/Cargo.toml index ad184ad313..a292358e75 100644 --- a/crates/settings/Cargo.toml +++ b/crates/settings/Cargo.toml @@ -14,6 +14,7 @@ test-support = [] assets = { path = "../assets" } collections = { path = "../collections" } gpui = { path = "../gpui" } +sqlez = { path = "../sqlez" } fs = { path = "../fs" } anyhow = "1.0.38" futures = "0.3" diff --git a/crates/settings/src/settings.rs b/crates/settings/src/settings.rs index cb83c2c370..5137751579 100644 --- a/crates/settings/src/settings.rs +++ b/crates/settings/src/settings.rs @@ -2,7 +2,7 @@ mod keymap_file; pub mod settings_file; pub mod watched_json; -use anyhow::Result; +use anyhow::{bail, Result}; use gpui::{ font_cache::{FamilyId, FontCache}, AssetSource, @@ -14,6 +14,10 @@ use schemars::{ }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; use std::{collections::HashMap, fmt::Write as _, num::NonZeroU32, str, sync::Arc}; use theme::{Theme, ThemeRegistry}; use tree_sitter::Query; @@ -55,24 +59,6 @@ pub struct FeatureFlags { pub experimental_themes: bool, } -#[derive(Copy, Clone, PartialEq, Eq, Default)] -pub enum ReleaseChannel { - #[default] - Dev, - Preview, - Stable, -} - -impl ReleaseChannel { - pub fn name(&self) -> &'static str { - match self { - ReleaseChannel::Dev => "Zed Dev", - ReleaseChannel::Preview => "Zed Preview", - ReleaseChannel::Stable => "Zed", - } - } -} - impl FeatureFlags { pub fn keymap_files(&self) -> Vec<&'static str> { vec![] @@ -244,6 +230,33 @@ pub enum DockAnchor { Expanded, } +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct SettingsFileContent { pub experiments: Option, diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index e0b284e628..b04f5bb82f 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -45,8 +45,8 @@ impl<'a> Statement<'a> { let sql = CString::new(query.as_ref())?; let mut remaining_sql = sql.as_c_str(); while { - let remaining_sql_str = remaining_sql.to_str()?; - remaining_sql_str.trim() != ";" && !remaining_sql_str.is_empty() + let remaining_sql_str = remaining_sql.to_str()?.trim(); + remaining_sql_str != ";" && !remaining_sql_str.is_empty() } { let mut raw_statement = 0 as *mut sqlite3_stmt; let mut remaining_sql_ptr = ptr::null(); diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 1081101f6a..b9bb1657ea 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -13,6 +13,9 @@ pub struct ThreadSafeConnection { _pd: PhantomData, } +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} + impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { diff --git a/crates/util/src/channel.rs b/crates/util/src/channel.rs new file mode 100644 index 0000000000..ab5b53b4ab --- /dev/null +++ b/crates/util/src/channel.rs @@ -0,0 +1,32 @@ +use std::env; + +use lazy_static::lazy_static; + +lazy_static! { + pub static ref RELEASE_CHANNEL_NAME: String = env::var("ZED_RELEASE_CHANNEL") + .unwrap_or(include_str!("../../zed/RELEASE_CHANNEL").to_string()); + pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() { + "dev" => ReleaseChannel::Dev, + "preview" => ReleaseChannel::Preview, + "stable" => ReleaseChannel::Stable, + _ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME), + }; +} + +#[derive(Copy, Clone, PartialEq, Eq, Default)] +pub enum ReleaseChannel { + #[default] + Dev, + Preview, + Stable, +} + +impl ReleaseChannel { + pub fn name(&self) -> &'static str { + match self { + ReleaseChannel::Dev => "Zed Dev", + ReleaseChannel::Preview => "Zed Preview", + ReleaseChannel::Stable => "Zed", + } + } +} diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index b03bc21210..78536f01d0 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -1,3 +1,4 @@ +pub mod channel; pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index f8bcba5eb7..553479b175 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -36,6 +36,7 @@ util = { path = "../util" } bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" +lazy_static = "1.4" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" postage = { version = "0.4.1", features = ["futures-traits"] } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs new file mode 100644 index 0000000000..8a80dc5a76 --- /dev/null +++ b/crates/workspace/src/persistence.rs @@ -0,0 +1,494 @@ +#![allow(dead_code)] + +pub mod model; + +use std::ops::Deref; +use std::path::{Path, PathBuf}; + +use anyhow::{bail, Context, Result}; +use db::open_file_db; +use gpui::Axis; +use indoc::indoc; +use lazy_static::lazy_static; + +use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; +use util::{iife, unzip_option, ResultExt}; + +use super::Workspace; + +use model::{ + GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + SerializedWorkspace, WorkspaceId, +}; + +lazy_static! { + pub static ref DB: WorkspaceDb = WorkspaceDb(open_file_db()); +} + +pub struct WorkspaceDb(ThreadSafeConnection); + +impl Deref for WorkspaceDb { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( + "workspace", + &[indoc! {" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER, -- Boolean + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); + +impl Domain for Workspace { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + WORKSPACES_MIGRATION.run(&conn) + } +} + +impl WorkspaceDb { + /// Returns a serialized workspace for the given worktree_roots. If the passed array + /// is empty, the most recent workspace is returned instead. If no workspace for the + /// passed roots is stored, returns none. + pub fn workspace_for_roots>( + &self, + worktree_roots: &[P], + ) -> Option { + let workspace_id: WorkspaceId = worktree_roots.into(); + + // Note that we re-assign the workspace_id here in case it's empty + // and we've grabbed the most recent workspace + let (workspace_id, dock_anchor, dock_visible) = iife!({ + if worktree_roots.len() == 0 { + self.select_row(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + ORDER BY timestamp DESC LIMIT 1"})?()? + } else { + self.select_row_bound(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + WHERE workspace_id = ?"})?(&workspace_id)? + } + .context("No workspaces found") + }) + .warn_on_err() + .flatten()?; + + Some(SerializedWorkspace { + dock_pane: self + .get_dock_pane(&workspace_id) + .context("Getting dock pane") + .log_err()?, + center_group: self + .get_center_pane_group(&workspace_id) + .context("Getting center group") + .log_err()?, + dock_anchor, + dock_visible, + }) + } + + /// Saves a workspace using the worktree roots. Will garbage collect any workspaces + /// that used this workspace previously + pub fn save_workspace>( + &self, + worktree_roots: &[P], + old_roots: Option<&[P]>, + workspace: &SerializedWorkspace, + ) { + let workspace_id: WorkspaceId = worktree_roots.into(); + + self.with_savepoint("update_worktrees", || { + if let Some(old_roots) = old_roots { + let old_id: WorkspaceId = old_roots.into(); + + self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + } + + // Delete any previous workspaces with the same roots. This cascades to all + // other tables that are based on the same roots set. + // Insert new workspace into workspaces table if none were found + self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; + + self.exec_bound( + "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", + )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; + + // Save center pane group and dock pane + self.save_pane_group(&workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + + Ok(()) + }) + .with_context(|| { + format!( + "Update workspace with roots {:?}", + worktree_roots + .iter() + .map(|p| p.as_ref()) + .collect::>() + ) + }) + .log_err(); + } + + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots + pub fn recent_workspaces(&self, limit: usize) -> Vec> { + iife!({ + // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html + Ok::<_, anyhow::Error>( + self.select_bound::( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?(limit)? + .into_iter() + .map(|id| id.paths()) + .collect::>>(), + ) + }) + .log_err() + .unwrap_or_default() + } + + pub(crate) fn get_center_pane_group( + &self, + workspace_id: &WorkspaceId, + ) -> Result { + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children<'a>( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + )?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items( pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } + } + + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self.select_row_bound(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") + } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self.select_bound(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } + + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.exec_bound( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use db::open_memory_db; + use settings::DockAnchor; + + use super::*; + + #[test] + fn test_workspace_assignment() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + + let workspace_1 = SerializedWorkspace { + dock_anchor: DockAnchor::Bottom, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_2 = SerializedWorkspace { + dock_anchor: DockAnchor::Expanded, + dock_visible: false, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_3 = SerializedWorkspace { + dock_anchor: DockAnchor::Right, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); + db.save_workspace(&["/tmp"], None, &workspace_2); + + db::write_db_to(&db, "test.db").unwrap(); + + // Test that paths are treated as a set + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_1 + ); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + workspace_1 + ); + + // Make sure that other keys work + assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); + assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); + + // Test 'mutate' case of updating a pre-existing id + db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_2 + ); + + // Test other mechanism for mutating + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_3 + ); + + // Make sure that updating paths differently also works + db.save_workspace( + &["/tmp3", "/tmp4", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_3, + ); + assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(), + workspace_3 + ); + } + + use crate::persistence::model::SerializedWorkspace; + use crate::persistence::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + + fn default_workspace( + dock_pane: SerializedPane, + center_group: &SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: DockAnchor::Right, + dock_visible: false, + center_group: center_group.clone(), + dock_pane, + } + } + + #[test] + fn test_basic_dock_pane() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + + let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; + + let workspace = default_workspace(dock_pane, &Default::default()); + + db.save_workspace(&["/tmp"], None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } + + #[test] + fn test_simple_split() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("simple_split")); + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; + + let workspace = default_workspace(Default::default(), ¢er_pane); + + db.save_workspace(&["/tmp"], None, &workspace); + + assert_eq!(workspace.center_group, center_pane); + } +} diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs new file mode 100644 index 0000000000..824f649f98 --- /dev/null +++ b/crates/workspace/src/persistence/model.rs @@ -0,0 +1,188 @@ +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; + +use anyhow::{bail, Result}; + +use gpui::Axis; +use settings::DockAnchor; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct WorkspaceId(Vec); + +impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } +} + +impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } +} + +impl Bind for &WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedWorkspace { + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), +} + +impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { + axis: Axis::Horizontal, + children: vec![Self::Pane(Default::default())], + } + } +} + +#[derive(Debug, PartialEq, Eq, Default, Clone)] +pub struct SerializedPane { + pub(crate) children: Vec, +} + +impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } +} + +pub type GroupId = i64; +pub type PaneId = i64; +pub type ItemId = usize; + +pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } +} + +impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { + Ok(( + match kind_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} + +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } +} + +#[cfg(test)] +mod tests { + use sqlez::connection::Connection; + + use crate::persistence::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap()() + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); + + assert_eq!( + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + ); + } +} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 39843859c0..085d9e2eb2 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -5,19 +5,18 @@ pub mod dock; pub mod pane; pub mod pane_group; +mod persistence; pub mod searchable; pub mod shared_screen; pub mod sidebar; mod status_bar; mod toolbar; -mod workspace_db; -use crate::workspace_db::model::SerializedWorkspace; +use crate::persistence::model::SerializedWorkspace; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{kvp::KeyValue, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -165,9 +164,7 @@ impl_internal_actions!( ); impl_actions!(workspace, [ActivatePane]); -pub fn init(app_state: Arc, cx: &mut MutableAppContext, db: Db) { - cx.set_global(db); - +pub fn init(app_state: Arc, cx: &mut MutableAppContext) { pane::init(cx); dock::init(cx); @@ -1291,12 +1288,8 @@ impl Workspace { } // Use the resolved worktree roots to get the serialized_db from the database - let serialized_workspace = cx.read(|cx| { - Workspace::workspace_for_roots( - cx.global::>(), - &Vec::from_iter(worktree_roots.into_iter())[..], - ) - }); + let serialized_workspace = persistence::DB + .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]); // Use the serialized workspace to construct the new window let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { diff --git a/crates/workspace/src/workspace_db.rs b/crates/workspace/src/workspace_db.rs deleted file mode 100644 index e896dd6c27..0000000000 --- a/crates/workspace/src/workspace_db.rs +++ /dev/null @@ -1,765 +0,0 @@ -use anyhow::{bail, Context, Result}; - -use db::Db; -use util::{iife, unzip_option, ResultExt}; - -use std::path::{Path, PathBuf}; - -use indoc::indoc; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; - -use super::Workspace; - -use self::model::{ - Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, - SerializedWorkspace, WorkspaceId, -}; - -// 1) Move all of this into Workspace crate -// 2) Deserialize items fully -// 3) Typed prepares (including how you expect to pull data out) -// 4) Investigate Tree column impls - -pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( - "workspace", - &[indoc! {" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER, -- Boolean - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - "}], -); - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - "}], -); - -pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( - "item", - &[indoc! {" - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); - -impl Domain for Workspace { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - WORKSPACES_MIGRATION.run(&conn)?; - PANE_MIGRATIONS.run(&conn)?; - ITEM_MIGRATIONS.run(&conn) - } -} - -impl Workspace { - /// Returns a serialized workspace for the given worktree_roots. If the passed array - /// is empty, the most recent workspace is returned instead. If no workspace for the - /// passed roots is stored, returns none. - pub fn workspace_for_roots>( - db: &Db, - worktree_roots: &[P], - ) -> Option { - let workspace_id: WorkspaceId = worktree_roots.into(); - - // Note that we re-assign the workspace_id here in case it's empty - // and we've grabbed the most recent workspace - let (workspace_id, dock_anchor, dock_visible) = iife!({ - if worktree_roots.len() == 0 { - db.select_row(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible - FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})?()? - } else { - db.select_row_bound(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible - FROM workspaces - WHERE workspace_id = ?"})?(&workspace_id)? - } - .context("No workspaces found") - }) - .warn_on_err() - .flatten()?; - - Some(SerializedWorkspace { - dock_pane: Workspace::get_dock_pane(&db, &workspace_id) - .context("Getting dock pane") - .log_err()?, - center_group: Workspace::get_center_pane_group(&db, &workspace_id) - .context("Getting center group") - .log_err()?, - dock_anchor, - dock_visible, - }) - } - - /// Saves a workspace using the worktree roots. Will garbage collect any workspaces - /// that used this workspace previously - pub fn save_workspace>( - db: &Db, - worktree_roots: &[P], - old_roots: Option<&[P]>, - workspace: &SerializedWorkspace, - ) { - let workspace_id: WorkspaceId = worktree_roots.into(); - - db.with_savepoint("update_worktrees", || { - if let Some(old_roots) = old_roots { - let old_id: WorkspaceId = old_roots.into(); - - db.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; - } - - // Delete any previous workspaces with the same roots. This cascades to all - // other tables that are based on the same roots set. - // Insert new workspace into workspaces table if none were found - db.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - - db.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; - - // Save center pane group and dock pane - Workspace::save_pane_group(db, &workspace_id, &workspace.center_group, None)?; - Workspace::save_pane(db, &workspace_id, &workspace.dock_pane, None)?; - - Ok(()) - }) - .with_context(|| { - format!( - "Update workspace with roots {:?}", - worktree_roots - .iter() - .map(|p| p.as_ref()) - .collect::>() - ) - }) - .log_err(); - } - - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(conn: &Connection, limit: usize) -> Vec> { - iife!({ - // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html - Ok::<_, anyhow::Error>( - conn.select_bound::( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?(limit)? - .into_iter() - .map(|id| id.paths()) - .collect::>>(), - ) - }) - .log_err() - .unwrap_or_default() - } - - pub(crate) fn get_center_pane_group( - db: &Db, - workspace_id: &WorkspaceId, - ) -> Result { - Workspace::get_pane_group_children(&db, workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children<'a>( - db: &Db, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - db.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?((group_id, workspace_id))? - .into_iter() - .map(|(group_id, axis, pane_id)| { - if let Some((group_id, axis)) = group_id.zip(axis) { - Ok(SerializedPaneGroup::Group { - axis, - children: Workspace::get_pane_group_children( - db, - workspace_id, - Some(group_id), - )?, - }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: Workspace::get_items(db, pane_id)?, - })) - } else { - bail!("Pane Group Child was neither a pane group or a pane"); - } - }) - .collect::>() - } - - pub(crate) fn save_pane_group( - db: &Db, - workspace_id: &WorkspaceId, - pane_group: &SerializedPaneGroup, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } - - let (parent_id, position) = unzip_option(parent); - - match pane_group { - SerializedPaneGroup::Group { axis, children } => { - let parent_id = db.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; - - for (position, group) in children.iter().enumerate() { - Workspace::save_pane_group( - db, - workspace_id, - group, - Some((parent_id, position)), - )? - } - Ok(()) - } - SerializedPaneGroup::Pane(pane) => Workspace::save_pane(db, workspace_id, pane, parent), - } - } - - pub(crate) fn get_dock_pane( - db: &Db, - workspace_id: &WorkspaceId, - ) -> Result { - let pane_id = db.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( - workspace_id, - )? - .context("No dock pane for workspace")?; - - Ok(SerializedPane::new( - Workspace::get_items(db, pane_id).context("Reading items")?, - )) - } - - pub(crate) fn save_pane( - db: &Db, - workspace_id: &WorkspaceId, - pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - - let pane_id = db.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; - - Workspace::save_items(db, workspace_id, pane_id, &pane.children).context("Saving items") - } - - pub(crate) fn get_items(db: &Db, pane_id: PaneId) -> Result> { - Ok(db.select_bound(indoc! {" - SELECT item_id, kind FROM items - WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) - } - - pub(crate) fn save_items( - db: &Db, - workspace_id: &WorkspaceId, - pane_id: PaneId, - items: &[SerializedItem], - ) -> Result<()> { - let mut delete_old = db - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = db.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; - for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::workspace_db::model::DockAnchor::{Bottom, Expanded, Right}; - use crate::{Db, Workspace}; - - #[test] - fn test_workspace_assignment() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("test_basic_functionality"); - - let workspace_1 = SerializedWorkspace { - dock_anchor: Bottom, - dock_visible: true, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - let workspace_2 = SerializedWorkspace { - dock_anchor: Expanded, - dock_visible: false, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - let workspace_3 = SerializedWorkspace { - dock_anchor: Right, - dock_visible: true, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_1); - Workspace::save_workspace(&db, &["/tmp"], None, &workspace_2); - - db.write_to("test.db").unwrap(); - - // Test that paths are treated as a set - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_1 - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp"]).unwrap(), - workspace_1 - ); - - // Make sure that other keys work - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(), - workspace_2 - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp3", "/tmp2", "/tmp4"]), - None - ); - - // Test 'mutate' case of updating a pre-existing id - Workspace::save_workspace( - &db, - &["/tmp", "/tmp2"], - Some(&["/tmp", "/tmp2"]), - &workspace_2, - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_2 - ); - - // Test other mechanism for mutating - Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_3); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_3 - ); - - // Make sure that updating paths differently also works - Workspace::save_workspace( - &db, - &["/tmp3", "/tmp4", "/tmp2"], - Some(&["/tmp", "/tmp2"]), - &workspace_3, - ); - assert_eq!(Workspace::workspace_for_roots(&db, &["/tmp2", "tmp"]), None); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp3", "/tmp4"]).unwrap(), - workspace_3 - ); - } - - use crate::workspace_db::model::SerializedWorkspace; - use crate::workspace_db::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; - - fn default_workspace( - dock_pane: SerializedPane, - center_group: &SerializedPaneGroup, - ) -> SerializedWorkspace { - SerializedWorkspace { - dock_anchor: crate::workspace_db::model::DockAnchor::Right, - dock_visible: false, - center_group: center_group.clone(), - dock_pane, - } - } - - #[test] - fn test_basic_dock_pane() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("basic_dock_pane"); - - let dock_pane = crate::workspace_db::model::SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }; - - let workspace = default_workspace(dock_pane, &Default::default()); - - Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - - let new_workspace = Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(); - - assert_eq!(workspace.dock_pane, new_workspace.dock_pane); - } - - #[test] - fn test_simple_split() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("simple_split"); - - // ----------------- - // | 1,2 | 5,6 | - // | - - - | | - // | 3,4 | | - // ----------------- - let center_pane = SerializedPaneGroup::Group { - axis: crate::workspace_db::model::Axis::Horizontal, - children: vec![ - SerializedPaneGroup::Group { - axis: crate::workspace_db::model::Axis::Vertical, - children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }), - ], - }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, - ], - }), - ], - }; - - let workspace = default_workspace(Default::default(), ¢er_pane); - - Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - - assert_eq!(workspace.center_group, center_pane); - } -} - -pub mod model { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - }; - - use anyhow::{bail, Result}; - - use sqlez::{ - bindable::{Bind, Column}, - statement::Statement, - }; - - #[derive(Debug, Clone, PartialEq, Eq)] - pub(crate) struct WorkspaceId(Vec); - - impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 - } - } - - impl, T: IntoIterator> From for WorkspaceId { - fn from(iterator: T) -> Self { - let mut roots = iterator - .into_iter() - .map(|p| p.as_ref().to_path_buf()) - .collect::>(); - roots.sort(); - Self(roots) - } - } - - impl Bind for &WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - bincode::serialize(&self.0) - .expect("Bincode serialization of paths should not fail") - .bind(statement, start_index) - } - } - - impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) - } - } - - #[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] - pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, - } - - impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } - } - - impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq)] - pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub center_group: SerializedPaneGroup, - pub dock_pane: SerializedPane, - } - - #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] - pub enum Axis { - #[default] - Horizontal, - Vertical, - } - - impl Bind for Axis { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - Axis::Horizontal => "Horizontal", - Axis::Vertical => "Vertical", - } - .bind(statement, start_index) - } - } - - impl Column for Axis { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(axis_text, next_index)| { - Ok(( - match axis_text.as_str() { - "Horizontal" => Axis::Horizontal, - "Vertical" => Axis::Vertical, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum SerializedPaneGroup { - Group { - axis: Axis, - children: Vec, - }, - Pane(SerializedPane), - } - - // Dock panes, and grouped panes combined? - // AND we're collapsing PaneGroup::Pane - // In the case where - - impl Default for SerializedPaneGroup { - fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], - } - } - } - - #[derive(Debug, PartialEq, Eq, Default, Clone)] - pub struct SerializedPane { - pub(crate) children: Vec, - } - - impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } - } - } - - pub type GroupId = i64; - pub type PaneId = i64; - pub type ItemId = usize; - - pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, - } - - impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", - } - .bind(statement, start_index) - } - } - - impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, - } - - impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } - - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } - } - } - - #[cfg(test)] - mod tests { - use sqlez::connection::Connection; - - use crate::workspace_db::model::DockAnchor; - - use super::WorkspaceId; - - #[test] - fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); - - db.exec(indoc::indoc! {" - CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT - );"}) - .unwrap()() - .unwrap(); - - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - - db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap()((&workspace_id, DockAnchor::Bottom)) - .unwrap(); - - assert_eq!( - db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap()() - .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) - ); - } - } -} diff --git a/crates/workspace/test.db b/crates/workspace/test.db new file mode 100644 index 0000000000000000000000000000000000000000..7491ccde3ad6a8c23785f69b92b679e622b1e62c GIT binary patch literal 32768 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCU|?ooVBlat01%%A!DV1XV&h^mGw8)j z@$$DYuyfpJ;P2t{<@e_a@iBQ=m6ps5h#>gF2c>gVhltN@oNMpNVOr{Lo1;~L_s;OrRe?C9c( zVJ6hkD8_-riZwuFd}fNK0(L_Jf;@d4gCfBW1BH7$DDdD80cqsYR0s|U@^lW-WM>n% z6=!6~%u7kFC@##&EJ=+|EG@|g^W#C9i{lN!6h|``D>$NCi@^~MazingU_*&$un;7o zK}=ADz(O6#T{>XjBD+ZeYqS-m=9R>!7v+~0AQ=QsD3BBbOAm<(Df!9S3LqQ7CKsSZ z9T8fn9Gws!A{$zeh~gWJc!l`0SOex=Y|#sgx8_n7c5!88#&$z^WI(lpf&j{7#fTWF z1SEQ3_P~=j)#FDYGcP4GIk6-al%Eqz;JG(Zp(sDUL?JIf1(JPn#ul*#Bvxb=qvc>w zVd9!snro$?pdOZ5RFavTn4_+sub}RcUzAyupI4HYqYh3g$WerxAW`B6EC54 z>zUZa6%`qqJ;Cvga2A5aj1ubzaY&@Y`Jns+&T<&B4$4XKiFwHx`9-zv5cA7#N8Efl~{1^6~`MQ z*q}Z&xSw6b2oAkQeQ@aIW~LVh^s%fP_E$R*3bB}-8~JF0m!1V%$(Gz3ONU^E0qLtr!nMnhmYgg|!!Bda{e zbVOSfrTK|y!9rS}aQ;wjyjyc|p?DCfvkLLyXrC0^hZs_QQf!f042e!~{2~qYVe~aI zQ#5=u5F8L_VLmfOw2MKi$6I4RL#@*mS3JSH+0ao~+Ei`qa4D^7+ zs5!uD%?0ZJGx2va@bBmE91d01s2@f{U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMnhmwhkywatBEX!F>FyiXi)&jz4|4&1+WziMg~Skx`u|jh9(L|hE|4VR)%KwjI4f= zoW@Kbr3?%V$hugeVjvTYAS)xV7{JBAz`(?Rh=KnQ|81y-QED^4?3@n zI%qTmMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3Og2&gczF_Cut_ngF|mTW_yl|SYK*K5ew-Wx z)q(o|3<5Ov>!|UgAut*OqaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8Uh0v0-*i> wqy7H@je=3jM?+vV1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONfaW0p0KSbnod5s; literal 0 HcmV?d00001 diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 6e7aaba3c6..5f67e290b5 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -23,7 +23,7 @@ use isahc::{config::Configurable, Request}; use language::LanguageRegistry; use log::LevelFilter; use parking_lot::Mutex; -use project::{Db, Fs, HomeDir, ProjectStore}; +use project::{Fs, HomeDir, ProjectStore}; use serde_json::json; use settings::{ self, settings_file::SettingsFile, KeymapFileContent, Settings, SettingsFileContent, @@ -37,12 +37,9 @@ use terminal::terminal_container_view::{get_working_directory, TerminalContainer use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; -use util::{paths, ResultExt, TryFutureExt}; +use util::{channel::RELEASE_CHANNEL, paths, ResultExt, TryFutureExt}; use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace}; -use zed::{ - self, build_window_options, initialize_workspace, languages, menus, RELEASE_CHANNEL, - RELEASE_CHANNEL_NAME, -}; +use zed::{self, build_window_options, initialize_workspace, languages, menus}; fn main() { let http = http::client(); @@ -56,10 +53,6 @@ fn main() { .map_or("dev".to_string(), |v| v.to_string()); init_panic_hook(app_version, http.clone(), app.background()); - let db = app.background().spawn(async move { - project::Db::::open(&*paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) - }); - load_embedded_fonts(&app); let fs = Arc::new(RealFs); @@ -147,10 +140,8 @@ fn main() { .detach(); let project_store = cx.add_model(|_| ProjectStore::new()); - let db = cx.background().block(db); - cx.set_global(db); - client.start_telemetry(cx.global::>().clone()); + client.start_telemetry(); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -164,16 +155,9 @@ fn main() { initialize_workspace, default_item_factory, }); - auto_update::init( - cx.global::>().clone(), - http, - client::ZED_SERVER_URL.clone(), - cx, - ); + auto_update::init(http, client::ZED_SERVER_URL.clone(), cx); - let workspace_db = cx.global::>().open_as::(); - - workspace::init(app_state.clone(), cx, workspace_db); + workspace::init(app_state.clone(), cx); journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index a8ec71bd4b..6b6b65ab32 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -12,7 +12,6 @@ use collab_ui::{CollabTitlebarItem, ToggleCollaborationMenu}; use collections::VecDeque; pub use editor; use editor::{Editor, MultiBuffer}; -use lazy_static::lazy_static; use gpui::{ actions, @@ -28,9 +27,9 @@ use project_panel::ProjectPanel; use search::{BufferSearchBar, ProjectSearchBar}; use serde::Deserialize; use serde_json::to_string_pretty; -use settings::{keymap_file_json_schema, settings_file_json_schema, ReleaseChannel, Settings}; +use settings::{keymap_file_json_schema, settings_file_json_schema, Settings}; use std::{env, path::Path, str, sync::Arc}; -use util::{paths, ResultExt}; +use util::{channel::ReleaseChannel, paths, ResultExt}; pub use workspace; use workspace::{sidebar::SidebarSide, AppState, Workspace}; @@ -69,17 +68,6 @@ actions!( const MIN_FONT_SIZE: f32 = 6.0; -lazy_static! { - pub static ref RELEASE_CHANNEL_NAME: String = - env::var("ZED_RELEASE_CHANNEL").unwrap_or(include_str!("../RELEASE_CHANNEL").to_string()); - pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() { - "dev" => ReleaseChannel::Dev, - "preview" => ReleaseChannel::Preview, - "stable" => ReleaseChannel::Stable, - _ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME), - }; -} - pub fn init(app_state: &Arc, cx: &mut gpui::MutableAppContext) { cx.add_action(about); cx.add_global_action(|_: &Hide, cx: &mut gpui::MutableAppContext| { @@ -629,7 +617,7 @@ mod tests { use gpui::{ executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle, }; - use project::{Db, Project, ProjectPath}; + use project::{Project, ProjectPath}; use serde_json::json; use std::{ collections::HashSet, @@ -774,6 +762,8 @@ mod tests { async fn test_new_empty_workspace(cx: &mut TestAppContext) { let app_state = init(cx); cx.dispatch_global_action(workspace::NewFile); + cx.foreground().run_until_parked(); + let window_id = *cx.window_ids().first().unwrap(); let workspace = cx.root_view::(window_id).unwrap(); let editor = workspace.update(cx, |workspace, cx| { @@ -1816,7 +1806,7 @@ mod tests { state.initialize_workspace = initialize_workspace; state.build_window_options = build_window_options; call::init(app_state.client.clone(), app_state.user_store.clone(), cx); - workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); + workspace::init(app_state.clone(), cx); editor::init(cx); pane::init(cx); app_state From c1f79023094ee03ee0f4db350602dd5612b299a8 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Mon, 14 Nov 2022 17:31:12 -0800 Subject: [PATCH 156/240] wip --- .../src/activity_indicator.rs | 2 +- crates/breadcrumbs/src/breadcrumbs.rs | 5 +- crates/collab/src/integration_tests.rs | 2 +- crates/diagnostics/src/diagnostics.rs | 9 +- crates/diagnostics/src/items.rs | 4 +- crates/editor/src/editor_tests.rs | 7 +- crates/editor/src/items.rs | 6 +- crates/search/src/buffer_search.rs | 3 +- crates/search/src/project_search.rs | 6 +- .../terminal/src/terminal_container_view.rs | 5 +- crates/theme_testbench/src/theme_testbench.rs | 7 +- crates/workspace/src/dock.rs | 20 +- crates/workspace/src/item.rs | 876 ++++++++++++++ crates/workspace/src/pane.rs | 5 +- crates/workspace/src/persistence.rs | 45 +- crates/workspace/src/persistence/model.rs | 37 +- crates/workspace/src/searchable.rs | 2 +- crates/workspace/src/shared_screen.rs | 22 +- crates/workspace/src/workspace.rs | 1003 ++--------------- crates/zed/src/feedback.rs | 4 +- crates/zed/src/main.rs | 2 +- crates/zed/src/zed.rs | 3 +- 22 files changed, 1114 insertions(+), 961 deletions(-) create mode 100644 crates/workspace/src/item.rs diff --git a/crates/activity_indicator/src/activity_indicator.rs b/crates/activity_indicator/src/activity_indicator.rs index 775e460a2d..8b9eb4b040 100644 --- a/crates/activity_indicator/src/activity_indicator.rs +++ b/crates/activity_indicator/src/activity_indicator.rs @@ -11,7 +11,7 @@ use settings::Settings; use smallvec::SmallVec; use std::{cmp::Reverse, fmt::Write, sync::Arc}; use util::ResultExt; -use workspace::{ItemHandle, StatusItemView, Workspace}; +use workspace::{item::ItemHandle, StatusItemView, Workspace}; actions!(lsp_status, [ShowErrorMessage]); diff --git a/crates/breadcrumbs/src/breadcrumbs.rs b/crates/breadcrumbs/src/breadcrumbs.rs index 85f0509caf..278b8f39e2 100644 --- a/crates/breadcrumbs/src/breadcrumbs.rs +++ b/crates/breadcrumbs/src/breadcrumbs.rs @@ -4,7 +4,10 @@ use gpui::{ use itertools::Itertools; use search::ProjectSearchView; use settings::Settings; -use workspace::{ItemEvent, ItemHandle, ToolbarItemLocation, ToolbarItemView}; +use workspace::{ + item::{ItemEvent, ItemHandle}, + ToolbarItemLocation, ToolbarItemView, +}; pub enum Event { UpdateLocation, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index ade4e10280..762a5cf711 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -52,7 +52,7 @@ use std::{ use theme::ThemeRegistry; use unindent::Unindent as _; use util::post_inc; -use workspace::{shared_screen::SharedScreen, Item, SplitDirection, ToggleFollow, Workspace}; +use workspace::{shared_screen::SharedScreen, item::Item, SplitDirection, ToggleFollow, Workspace}; #[ctor::ctor] fn init_logger() { diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 078d83ac61..a3621dd30d 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -29,7 +29,10 @@ use std::{ sync::Arc, }; use util::TryFutureExt; -use workspace::{ItemHandle as _, ItemNavHistory, Workspace}; +use workspace::{ + item::{Item, ItemEvent, ItemHandle}, + ItemNavHistory, Workspace, +}; actions!(diagnostics, [Deploy]); @@ -503,7 +506,7 @@ impl ProjectDiagnosticsEditor { } } -impl workspace::Item for ProjectDiagnosticsEditor { +impl Item for ProjectDiagnosticsEditor { fn tab_content( &self, _detail: Option, @@ -571,7 +574,7 @@ impl workspace::Item for ProjectDiagnosticsEditor { unreachable!() } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { Editor::to_item_events(event) } diff --git a/crates/diagnostics/src/items.rs b/crates/diagnostics/src/items.rs index 26636d7700..d109a5262c 100644 --- a/crates/diagnostics/src/items.rs +++ b/crates/diagnostics/src/items.rs @@ -7,7 +7,7 @@ use gpui::{ use language::Diagnostic; use project::Project; use settings::Settings; -use workspace::StatusItemView; +use workspace::{item::ItemHandle, StatusItemView}; pub struct DiagnosticIndicator { summary: project::DiagnosticSummary, @@ -219,7 +219,7 @@ impl View for DiagnosticIndicator { impl StatusItemView for DiagnosticIndicator { fn set_active_pane_item( &mut self, - active_pane_item: Option<&dyn workspace::ItemHandle>, + active_pane_item: Option<&dyn ItemHandle>, cx: &mut ViewContext, ) { if let Some(editor) = active_pane_item.and_then(|item| item.downcast::()) { diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 8ac1f9a3fc..ca66ae7dc9 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -22,7 +22,10 @@ use util::{ assert_set_eq, test::{marked_text_ranges, marked_text_ranges_by, sample_text, TextRangeMarker}, }; -use workspace::{FollowableItem, ItemHandle, NavigationEntry, Pane}; +use workspace::{ + item::{FollowableItem, ItemHandle}, + NavigationEntry, Pane, +}; #[gpui::test] fn test_edit_events(cx: &mut MutableAppContext) { @@ -475,7 +478,7 @@ fn test_clone(cx: &mut gpui::MutableAppContext) { fn test_navigation_history(cx: &mut gpui::MutableAppContext) { cx.set_global(Settings::test(cx)); cx.set_global(DragAndDrop::::default()); - use workspace::Item; + use workspace::item::Item; let (_, pane) = cx.add_window(Default::default(), |cx| Pane::new(None, cx)); let buffer = MultiBuffer::build_simple(&sample_text(300, 5, 'a'), cx); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0cc8575e99..4f9c7d5593 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -24,9 +24,9 @@ use std::{ use text::Selection; use util::TryFutureExt; use workspace::{ + item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, - FollowableItem, Item, ItemEvent, ItemHandle, ItemNavHistory, ProjectItem, StatusItemView, - ToolbarItemLocation, + ItemNavHistory, StatusItemView, ToolbarItemLocation, }; pub const MAX_TAB_TITLE_LEN: usize = 24; @@ -490,7 +490,7 @@ impl Item for Editor { Task::ready(Ok(())) } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { let mut result = Vec::new(); match event { Event::Closed => result.push(ItemEvent::CloseItem), diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index 7d668f6b3e..5877322feb 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -14,8 +14,9 @@ use serde::Deserialize; use settings::Settings; use std::{any::Any, sync::Arc}; use workspace::{ + item::ItemHandle, searchable::{Direction, SearchEvent, SearchableItemHandle, WeakSearchableItemHandle}, - ItemHandle, Pane, ToolbarItemLocation, ToolbarItemView, + Pane, ToolbarItemLocation, ToolbarItemView, }; #[derive(Clone, Deserialize, PartialEq)] diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 5e935a6ae3..edd4f40ba2 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -24,9 +24,9 @@ use std::{ }; use util::ResultExt as _; use workspace::{ + item::{Item, ItemEvent, ItemHandle}, searchable::{Direction, SearchableItem, SearchableItemHandle}, - Item, ItemEvent, ItemHandle, ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, - Workspace, + ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, }; actions!(project_search, [SearchInNew, ToggleFocus]); @@ -893,7 +893,7 @@ impl View for ProjectSearchBar { impl ToolbarItemView for ProjectSearchBar { fn set_active_pane_item( &mut self, - active_pane_item: Option<&dyn workspace::ItemHandle>, + active_pane_item: Option<&dyn ItemHandle>, cx: &mut ViewContext, ) -> ToolbarItemLocation { cx.notify(); diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 6ee8bc78ae..5d5fda1206 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -9,7 +9,10 @@ use gpui::{ }; use util::truncate_and_trailoff; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; -use workspace::{Item, ItemEvent, ToolbarItemLocation, Workspace}; +use workspace::{ + item::{Item, ItemEvent}, + ToolbarItemLocation, Workspace, +}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; diff --git a/crates/theme_testbench/src/theme_testbench.rs b/crates/theme_testbench/src/theme_testbench.rs index 17b6248671..9c7d6bdf49 100644 --- a/crates/theme_testbench/src/theme_testbench.rs +++ b/crates/theme_testbench/src/theme_testbench.rs @@ -12,7 +12,10 @@ use project::{Project, ProjectEntryId, ProjectPath}; use settings::Settings; use smallvec::SmallVec; use theme::{ColorScheme, Layer, Style, StyleSet}; -use workspace::{Item, Workspace}; +use workspace::{ + item::{Item, ItemEvent}, + Workspace, +}; actions!(theme, [DeployThemeTestbench]); @@ -351,7 +354,7 @@ impl Item for ThemeTestbench { gpui::Task::ready(Ok(())) } - fn to_item_events(_: &Self::Event) -> Vec { + fn to_item_events(_: &Self::Event) -> Vec { Vec::new() } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 5f471ff018..5b08b689ab 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -98,14 +98,14 @@ pub fn icon_for_dock_anchor(anchor: DockAnchor) -> &'static str { } impl DockPosition { - fn is_visible(&self) -> bool { + pub fn is_visible(&self) -> bool { match self { DockPosition::Shown(_) => true, DockPosition::Hidden(_) => false, } } - fn anchor(&self) -> DockAnchor { + pub fn anchor(&self) -> DockAnchor { match self { DockPosition::Shown(anchor) | DockPosition::Hidden(anchor) => *anchor, } @@ -137,9 +137,15 @@ pub struct Dock { } impl Dock { - pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { - let anchor = cx.global::().default_dock_anchor; - let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); + pub fn new( + default_item_factory: DefaultItemFactory, + position: Option, + cx: &mut ViewContext, + ) -> Self { + let position = position + .unwrap_or_else(|| DockPosition::Hidden(cx.global::().default_dock_anchor)); + + let pane = cx.add_view(|cx| Pane::new(Some(position.anchor()), cx)); pane.update(cx, |pane, cx| { pane.set_active(false, cx); }); @@ -152,7 +158,7 @@ impl Dock { Self { pane, panel_sizes: Default::default(), - position: DockPosition::Hidden(anchor), + position, default_item_factory, } } @@ -454,7 +460,7 @@ mod tests { use settings::Settings; use super::*; - use crate::{sidebar::Sidebar, tests::TestItem, ItemHandle, Workspace}; + use crate::{item::test::TestItem, sidebar::Sidebar, ItemHandle, Workspace}; pub fn default_item_factory( _workspace: &mut Workspace, diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs new file mode 100644 index 0000000000..215ad47e1b --- /dev/null +++ b/crates/workspace/src/item.rs @@ -0,0 +1,876 @@ +use std::{ + any::{Any, TypeId}, + borrow::Cow, + cell::RefCell, + fmt, + path::PathBuf, + rc::Rc, + sync::atomic::{AtomicBool, Ordering}, + time::Duration, +}; + +use anyhow::Result; +use client::proto; +use gpui::{ + AnyViewHandle, AppContext, ElementBox, ModelHandle, MutableAppContext, Task, View, ViewContext, + ViewHandle, WeakViewHandle, +}; +use project::{Project, ProjectEntryId, ProjectPath}; +use settings::{Autosave, Settings}; +use smallvec::SmallVec; +use theme::Theme; +use util::ResultExt; + +use crate::{ + pane, + persistence::model::{ItemId, WorkspaceId}, + searchable::SearchableItemHandle, + DelayedDebouncedEditAction, FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, + Workspace, +}; + +#[derive(Eq, PartialEq, Hash)] +pub enum ItemEvent { + CloseItem, + UpdateTab, + UpdateBreadcrumbs, + Edit, +} + +pub trait Item: View { + fn deactivated(&mut self, _: &mut ViewContext) {} + fn workspace_deactivated(&mut self, _: &mut ViewContext) {} + fn navigate(&mut self, _: Box, _: &mut ViewContext) -> bool { + false + } + fn tab_description<'a>(&'a self, _: usize, _: &'a AppContext) -> Option> { + None + } + fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) + -> ElementBox; + fn project_path(&self, cx: &AppContext) -> Option; + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; + fn is_singleton(&self, cx: &AppContext) -> bool; + fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); + fn clone_on_split(&self, _: &mut ViewContext) -> Option + where + Self: Sized, + { + None + } + fn is_dirty(&self, _: &AppContext) -> bool { + false + } + fn has_conflict(&self, _: &AppContext) -> bool { + false + } + fn can_save(&self, cx: &AppContext) -> bool; + fn save( + &mut self, + project: ModelHandle, + cx: &mut ViewContext, + ) -> Task>; + fn save_as( + &mut self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut ViewContext, + ) -> Task>; + fn reload( + &mut self, + project: ModelHandle, + cx: &mut ViewContext, + ) -> Task>; + fn git_diff_recalc( + &mut self, + _project: ModelHandle, + _cx: &mut ViewContext, + ) -> Task> { + Task::ready(Ok(())) + } + fn to_item_events(event: &Self::Event) -> Vec; + fn should_close_item_on_event(_: &Self::Event) -> bool { + false + } + fn should_update_tab_on_event(_: &Self::Event) -> bool { + false + } + fn is_edit_event(_: &Self::Event) -> bool { + false + } + fn act_as_type( + &self, + type_id: TypeId, + self_handle: &ViewHandle, + _: &AppContext, + ) -> Option { + if TypeId::of::() == type_id { + Some(self_handle.into()) + } else { + None + } + } + fn as_searchable(&self, _: &ViewHandle) -> Option> { + None + } + + fn breadcrumb_location(&self) -> ToolbarItemLocation { + ToolbarItemLocation::Hidden + } + fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { + None + } + fn serialized_item_kind() -> Option<&'static str>; + fn deserialize( + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut ViewContext, + ) -> Result; +} + +pub trait ItemHandle: 'static + fmt::Debug { + fn subscribe_to_item_events( + &self, + cx: &mut MutableAppContext, + handler: Box, + ) -> gpui::Subscription; + fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option>; + fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) + -> ElementBox; + fn project_path(&self, cx: &AppContext) -> Option; + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; + fn is_singleton(&self, cx: &AppContext) -> bool; + fn boxed_clone(&self) -> Box; + fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; + fn added_to_pane( + &self, + workspace: &mut Workspace, + pane: ViewHandle, + cx: &mut ViewContext, + ); + fn deactivated(&self, cx: &mut MutableAppContext); + fn workspace_deactivated(&self, cx: &mut MutableAppContext); + fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool; + fn id(&self) -> usize; + fn window_id(&self) -> usize; + fn to_any(&self) -> AnyViewHandle; + fn is_dirty(&self, cx: &AppContext) -> bool; + fn has_conflict(&self, cx: &AppContext) -> bool; + fn can_save(&self, cx: &AppContext) -> bool; + fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task>; + fn save_as( + &self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut MutableAppContext, + ) -> Task>; + fn reload(&self, project: ModelHandle, cx: &mut MutableAppContext) + -> Task>; + fn git_diff_recalc( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task>; + fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option; + fn to_followable_item_handle(&self, cx: &AppContext) -> Option>; + fn on_release( + &self, + cx: &mut MutableAppContext, + callback: Box, + ) -> gpui::Subscription; + fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; + fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; + fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; +} + +pub trait WeakItemHandle { + fn id(&self) -> usize; + fn window_id(&self) -> usize; + fn upgrade(&self, cx: &AppContext) -> Option>; +} + +impl dyn ItemHandle { + pub fn downcast(&self) -> Option> { + self.to_any().downcast() + } + + pub fn act_as(&self, cx: &AppContext) -> Option> { + self.act_as_type(TypeId::of::(), cx) + .and_then(|t| t.downcast()) + } +} + +impl ItemHandle for ViewHandle { + fn subscribe_to_item_events( + &self, + cx: &mut MutableAppContext, + handler: Box, + ) -> gpui::Subscription { + cx.subscribe(self, move |_, event, cx| { + for item_event in T::to_item_events(event) { + handler(item_event, cx) + } + }) + } + + fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option> { + self.read(cx).tab_description(detail, cx) + } + + fn tab_content( + &self, + detail: Option, + style: &theme::Tab, + cx: &AppContext, + ) -> ElementBox { + self.read(cx).tab_content(detail, style, cx) + } + + fn project_path(&self, cx: &AppContext) -> Option { + self.read(cx).project_path(cx) + } + + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { + self.read(cx).project_entry_ids(cx) + } + + fn is_singleton(&self, cx: &AppContext) -> bool { + self.read(cx).is_singleton(cx) + } + + fn boxed_clone(&self) -> Box { + Box::new(self.clone()) + } + + fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { + self.update(cx, |item, cx| { + cx.add_option_view(|cx| item.clone_on_split(cx)) + }) + .map(|handle| Box::new(handle) as Box) + } + + fn added_to_pane( + &self, + workspace: &mut Workspace, + pane: ViewHandle, + cx: &mut ViewContext, + ) { + let history = pane.read(cx).nav_history_for_item(self); + self.update(cx, |this, cx| this.set_nav_history(history, cx)); + + if let Some(followed_item) = self.to_followable_item_handle(cx) { + if let Some(message) = followed_item.to_state_proto(cx) { + workspace.update_followers( + proto::update_followers::Variant::CreateView(proto::View { + id: followed_item.id() as u64, + variant: Some(message), + leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), + }), + cx, + ); + } + } + + if workspace + .panes_by_item + .insert(self.id(), pane.downgrade()) + .is_none() + { + let mut pending_autosave = DelayedDebouncedEditAction::new(); + let mut pending_git_update = DelayedDebouncedEditAction::new(); + let pending_update = Rc::new(RefCell::new(None)); + let pending_update_scheduled = Rc::new(AtomicBool::new(false)); + + let mut event_subscription = + Some(cx.subscribe(self, move |workspace, item, event, cx| { + let pane = if let Some(pane) = workspace + .panes_by_item + .get(&item.id()) + .and_then(|pane| pane.upgrade(cx)) + { + pane + } else { + log::error!("unexpected item event after pane was dropped"); + return; + }; + + if let Some(item) = item.to_followable_item_handle(cx) { + let leader_id = workspace.leader_for_pane(&pane); + + if leader_id.is_some() && item.should_unfollow_on_event(event, cx) { + workspace.unfollow(&pane, cx); + } + + if item.add_event_to_update_proto( + event, + &mut *pending_update.borrow_mut(), + cx, + ) && !pending_update_scheduled.load(Ordering::SeqCst) + { + pending_update_scheduled.store(true, Ordering::SeqCst); + cx.after_window_update({ + let pending_update = pending_update.clone(); + let pending_update_scheduled = pending_update_scheduled.clone(); + move |this, cx| { + pending_update_scheduled.store(false, Ordering::SeqCst); + this.update_followers( + proto::update_followers::Variant::UpdateView( + proto::UpdateView { + id: item.id() as u64, + variant: pending_update.borrow_mut().take(), + leader_id: leader_id.map(|id| id.0), + }, + ), + cx, + ); + } + }); + } + } + + for item_event in T::to_item_events(event).into_iter() { + match item_event { + ItemEvent::CloseItem => { + Pane::close_item(workspace, pane, item.id(), cx) + .detach_and_log_err(cx); + return; + } + + ItemEvent::UpdateTab => { + pane.update(cx, |_, cx| { + cx.emit(pane::Event::ChangeItemTitle); + cx.notify(); + }); + } + + ItemEvent::Edit => { + if let Autosave::AfterDelay { milliseconds } = + cx.global::().autosave + { + let delay = Duration::from_millis(milliseconds); + let item = item.clone(); + pending_autosave.fire_new( + delay, + workspace, + cx, + |project, mut cx| async move { + cx.update(|cx| Pane::autosave_item(&item, project, cx)) + .await + .log_err(); + }, + ); + } + + let settings = cx.global::(); + let debounce_delay = settings.git_overrides.gutter_debounce; + + let item = item.clone(); + + if let Some(delay) = debounce_delay { + const MIN_GIT_DELAY: u64 = 50; + + let delay = delay.max(MIN_GIT_DELAY); + let duration = Duration::from_millis(delay); + + pending_git_update.fire_new( + duration, + workspace, + cx, + |project, mut cx| async move { + cx.update(|cx| item.git_diff_recalc(project, cx)) + .await + .log_err(); + }, + ); + } else { + let project = workspace.project().downgrade(); + cx.spawn_weak(|_, mut cx| async move { + if let Some(project) = project.upgrade(&cx) { + cx.update(|cx| item.git_diff_recalc(project, cx)) + .await + .log_err(); + } + }) + .detach(); + } + } + + _ => {} + } + } + })); + + cx.observe_focus(self, move |workspace, item, focused, cx| { + if !focused && cx.global::().autosave == Autosave::OnFocusChange { + Pane::autosave_item(&item, workspace.project.clone(), cx) + .detach_and_log_err(cx); + } + }) + .detach(); + + let item_id = self.id(); + cx.observe_release(self, move |workspace, _, _| { + workspace.panes_by_item.remove(&item_id); + event_subscription.take(); + }) + .detach(); + } + } + + fn deactivated(&self, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| this.deactivated(cx)); + } + + fn workspace_deactivated(&self, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| this.workspace_deactivated(cx)); + } + + fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool { + self.update(cx, |this, cx| this.navigate(data, cx)) + } + + fn id(&self) -> usize { + self.id() + } + + fn window_id(&self) -> usize { + self.window_id() + } + + fn to_any(&self) -> AnyViewHandle { + self.into() + } + + fn is_dirty(&self, cx: &AppContext) -> bool { + self.read(cx).is_dirty(cx) + } + + fn has_conflict(&self, cx: &AppContext) -> bool { + self.read(cx).has_conflict(cx) + } + + fn can_save(&self, cx: &AppContext) -> bool { + self.read(cx).can_save(cx) + } + + fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task> { + self.update(cx, |item, cx| item.save(project, cx)) + } + + fn save_as( + &self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.save_as(project, abs_path, cx)) + } + + fn reload( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.reload(project, cx)) + } + + fn git_diff_recalc( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.git_diff_recalc(project, cx)) + } + + fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option { + self.read(cx).act_as_type(type_id, self, cx) + } + + fn to_followable_item_handle(&self, cx: &AppContext) -> Option> { + if cx.has_global::() { + let builders = cx.global::(); + let item = self.to_any(); + Some(builders.get(&item.view_type())?.1(item)) + } else { + None + } + } + + fn on_release( + &self, + cx: &mut MutableAppContext, + callback: Box, + ) -> gpui::Subscription { + cx.observe_release(self, move |_, cx| callback(cx)) + } + + fn to_searchable_item_handle(&self, cx: &AppContext) -> Option> { + self.read(cx).as_searchable(self) + } + + fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation { + self.read(cx).breadcrumb_location() + } + + fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { + self.read(cx).breadcrumbs(theme, cx) + } +} + +impl From> for AnyViewHandle { + fn from(val: Box) -> Self { + val.to_any() + } +} + +impl From<&Box> for AnyViewHandle { + fn from(val: &Box) -> Self { + val.to_any() + } +} + +impl Clone for Box { + fn clone(&self) -> Box { + self.boxed_clone() + } +} + +impl WeakItemHandle for WeakViewHandle { + fn id(&self) -> usize { + self.id() + } + + fn window_id(&self) -> usize { + self.window_id() + } + + fn upgrade(&self, cx: &AppContext) -> Option> { + self.upgrade(cx).map(|v| Box::new(v) as Box) + } +} + +pub trait ProjectItem: Item { + type Item: project::Item; + + fn for_project_item( + project: ModelHandle, + item: ModelHandle, + cx: &mut ViewContext, + ) -> Self; +} + +pub trait FollowableItem: Item { + fn to_state_proto(&self, cx: &AppContext) -> Option; + fn from_state_proto( + pane: ViewHandle, + project: ModelHandle, + state: &mut Option, + cx: &mut MutableAppContext, + ) -> Option>>>; + fn add_event_to_update_proto( + &self, + event: &Self::Event, + update: &mut Option, + cx: &AppContext, + ) -> bool; + fn apply_update_proto( + &mut self, + message: proto::update_view::Variant, + cx: &mut ViewContext, + ) -> Result<()>; + + fn set_leader_replica_id(&mut self, leader_replica_id: Option, cx: &mut ViewContext); + fn should_unfollow_on_event(event: &Self::Event, cx: &AppContext) -> bool; +} + +pub trait FollowableItemHandle: ItemHandle { + fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext); + fn to_state_proto(&self, cx: &AppContext) -> Option; + fn add_event_to_update_proto( + &self, + event: &dyn Any, + update: &mut Option, + cx: &AppContext, + ) -> bool; + fn apply_update_proto( + &self, + message: proto::update_view::Variant, + cx: &mut MutableAppContext, + ) -> Result<()>; + fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool; +} + +impl FollowableItemHandle for ViewHandle { + fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| { + this.set_leader_replica_id(leader_replica_id, cx) + }) + } + + fn to_state_proto(&self, cx: &AppContext) -> Option { + self.read(cx).to_state_proto(cx) + } + + fn add_event_to_update_proto( + &self, + event: &dyn Any, + update: &mut Option, + cx: &AppContext, + ) -> bool { + if let Some(event) = event.downcast_ref() { + self.read(cx).add_event_to_update_proto(event, update, cx) + } else { + false + } + } + + fn apply_update_proto( + &self, + message: proto::update_view::Variant, + cx: &mut MutableAppContext, + ) -> Result<()> { + self.update(cx, |this, cx| this.apply_update_proto(message, cx)) + } + + fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool { + if let Some(event) = event.downcast_ref() { + T::should_unfollow_on_event(event, cx) + } else { + false + } + } +} + +#[cfg(test)] +pub(crate) mod test { + use std::{any::Any, borrow::Cow, cell::Cell}; + + use anyhow::anyhow; + use gpui::{ + elements::Empty, AppContext, Element, ElementBox, Entity, ModelHandle, RenderContext, Task, + View, ViewContext, + }; + use project::{Project, ProjectEntryId, ProjectPath}; + use smallvec::SmallVec; + + use crate::{sidebar::SidebarItem, ItemNavHistory}; + + use super::{Item, ItemEvent}; + + pub struct TestItem { + pub state: String, + pub label: String, + pub save_count: usize, + pub save_as_count: usize, + pub reload_count: usize, + pub is_dirty: bool, + pub is_singleton: bool, + pub has_conflict: bool, + pub project_entry_ids: Vec, + pub project_path: Option, + pub nav_history: Option, + pub tab_descriptions: Option>, + pub tab_detail: Cell>, + } + + pub enum TestItemEvent { + Edit, + } + + impl Clone for TestItem { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + label: self.label.clone(), + save_count: self.save_count, + save_as_count: self.save_as_count, + reload_count: self.reload_count, + is_dirty: self.is_dirty, + is_singleton: self.is_singleton, + has_conflict: self.has_conflict, + project_entry_ids: self.project_entry_ids.clone(), + project_path: self.project_path.clone(), + nav_history: None, + tab_descriptions: None, + tab_detail: Default::default(), + } + } + } + + impl TestItem { + pub fn new() -> Self { + Self { + state: String::new(), + label: String::new(), + save_count: 0, + save_as_count: 0, + reload_count: 0, + is_dirty: false, + has_conflict: false, + project_entry_ids: Vec::new(), + project_path: None, + is_singleton: true, + nav_history: None, + tab_descriptions: None, + tab_detail: Default::default(), + } + } + + pub fn with_label(mut self, state: &str) -> Self { + self.label = state.to_string(); + self + } + + pub fn with_singleton(mut self, singleton: bool) -> Self { + self.is_singleton = singleton; + self + } + + pub fn with_project_entry_ids(mut self, project_entry_ids: &[u64]) -> Self { + self.project_entry_ids.extend( + project_entry_ids + .iter() + .copied() + .map(ProjectEntryId::from_proto), + ); + self + } + + pub fn set_state(&mut self, state: String, cx: &mut ViewContext) { + self.push_to_nav_history(cx); + self.state = state; + } + + fn push_to_nav_history(&mut self, cx: &mut ViewContext) { + if let Some(history) = &mut self.nav_history { + history.push(Some(Box::new(self.state.clone())), cx); + } + } + } + + impl Entity for TestItem { + type Event = TestItemEvent; + } + + impl View for TestItem { + fn ui_name() -> &'static str { + "TestItem" + } + + fn render(&mut self, _: &mut RenderContext) -> ElementBox { + Empty::new().boxed() + } + } + + impl Item for TestItem { + fn tab_description<'a>(&'a self, detail: usize, _: &'a AppContext) -> Option> { + self.tab_descriptions.as_ref().and_then(|descriptions| { + let description = *descriptions.get(detail).or_else(|| descriptions.last())?; + Some(description.into()) + }) + } + + fn tab_content(&self, detail: Option, _: &theme::Tab, _: &AppContext) -> ElementBox { + self.tab_detail.set(detail); + Empty::new().boxed() + } + + fn project_path(&self, _: &AppContext) -> Option { + self.project_path.clone() + } + + fn project_entry_ids(&self, _: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { + self.project_entry_ids.iter().copied().collect() + } + + fn is_singleton(&self, _: &AppContext) -> bool { + self.is_singleton + } + + fn set_nav_history(&mut self, history: ItemNavHistory, _: &mut ViewContext) { + self.nav_history = Some(history); + } + + fn navigate(&mut self, state: Box, _: &mut ViewContext) -> bool { + let state = *state.downcast::().unwrap_or_default(); + if state != self.state { + self.state = state; + true + } else { + false + } + } + + fn deactivated(&mut self, cx: &mut ViewContext) { + self.push_to_nav_history(cx); + } + + fn clone_on_split(&self, _: &mut ViewContext) -> Option + where + Self: Sized, + { + Some(self.clone()) + } + + fn is_dirty(&self, _: &AppContext) -> bool { + self.is_dirty + } + + fn has_conflict(&self, _: &AppContext) -> bool { + self.has_conflict + } + + fn can_save(&self, _: &AppContext) -> bool { + !self.project_entry_ids.is_empty() + } + + fn save( + &mut self, + _: ModelHandle, + _: &mut ViewContext, + ) -> Task> { + self.save_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn save_as( + &mut self, + _: ModelHandle, + _: std::path::PathBuf, + _: &mut ViewContext, + ) -> Task> { + self.save_as_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn reload( + &mut self, + _: ModelHandle, + _: &mut ViewContext, + ) -> Task> { + self.reload_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn to_item_events(_: &Self::Event) -> Vec { + vec![ItemEvent::UpdateTab, ItemEvent::Edit] + } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + workspace_id: crate::persistence::model::WorkspaceId, + item_id: crate::persistence::model::ItemId, + cx: &mut ViewContext, + ) -> anyhow::Result { + Err(anyhow!("Cannot deserialize test item")) + } + } + + impl SidebarItem for TestItem {} +} diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 644fa9481e..5db8d6feec 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -3,8 +3,9 @@ mod dragged_item_receiver; use super::{ItemHandle, SplitDirection}; use crate::{ dock::{icon_for_dock_anchor, AnchorDockBottom, AnchorDockRight, ExpandDock, HideDock}, + item::WeakItemHandle, toolbar::Toolbar, - Item, NewFile, NewSearch, NewTerminal, WeakItemHandle, Workspace, + Item, NewFile, NewSearch, NewTerminal, Workspace, }; use anyhow::Result; use collections::{HashMap, HashSet, VecDeque}; @@ -1634,7 +1635,7 @@ mod tests { use std::sync::Arc; use super::*; - use crate::tests::TestItem; + use crate::item::test::TestItem; use gpui::{executor::Deterministic, TestAppContext}; use project::FakeFs; diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 8a80dc5a76..164807b24f 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -4,6 +4,7 @@ pub mod model; use std::ops::Deref; use std::path::{Path, PathBuf}; +use std::sync::Arc; use anyhow::{bail, Context, Result}; use db::open_file_db; @@ -52,7 +53,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( parent_group_id INTEGER, -- NULL indicates that this is a root node position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; @@ -61,7 +64,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( workspace_id BLOB NOT NULL, parent_group_id INTEGER, -- NULL, this is a dock pane position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; @@ -71,8 +76,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], @@ -96,15 +104,15 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_anchor, dock_visible) = iife!({ + let (workspace_id, dock_position) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible + SELECT workspace_id, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { self.select_row_bound(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible + SELECT workspace_id, dock_visible, dock_anchor FROM workspaces WHERE workspace_id = ?"})?(&workspace_id)? } @@ -122,8 +130,7 @@ impl WorkspaceDb { .get_center_pane_group(&workspace_id) .context("Getting center group") .log_err()?, - dock_anchor, - dock_visible, + dock_position, }) } @@ -150,8 +157,8 @@ impl WorkspaceDb { self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; self.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; + "INSERT INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", + )?((&workspace_id, workspace.dock_position))?; // Save center pane group and dock pane self.save_pane_group(&workspace_id, &workspace.center_group, None)?; @@ -172,7 +179,7 @@ impl WorkspaceDb { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec> { + pub fn recent_workspaces(&self, limit: usize) -> Vec>> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( @@ -181,7 +188,7 @@ impl WorkspaceDb { )?(limit)? .into_iter() .map(|id| id.paths()) - .collect::>>(), + .collect::>>>(), ) }) .log_err() @@ -339,22 +346,19 @@ mod tests { let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { - dock_anchor: DockAnchor::Bottom, - dock_visible: true, + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; let workspace_2 = SerializedWorkspace { - dock_anchor: DockAnchor::Expanded, - dock_visible: false, + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; let workspace_3 = SerializedWorkspace { - dock_anchor: DockAnchor::Right, - dock_visible: true, + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), }; @@ -414,8 +418,7 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - dock_anchor: DockAnchor::Right, - dock_visible: false, + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 824f649f98..7afd186a36 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -6,18 +6,21 @@ use std::{ use anyhow::{bail, Result}; use gpui::Axis; + use settings::DockAnchor; use sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use crate::dock::DockPosition; + #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Vec); +pub(crate) struct WorkspaceId(Arc>); impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 + pub fn paths(self) -> Arc> { + self.0.clone() } } @@ -28,7 +31,7 @@ impl, T: IntoIterator> From for WorkspaceId { .map(|p| p.as_ref().to_path_buf()) .collect::>(); roots.sort(); - Self(roots) + Self(Arc::new(roots)) } } @@ -49,8 +52,7 @@ impl Column for WorkspaceId { #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, + pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, } @@ -152,12 +154,31 @@ impl SerializedItem { } } +impl Bind for DockPosition { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = statement.bind(self.is_visible(), start_index)?; + statement.bind(self.anchor(), next_index) + } +} + +impl Column for DockPosition { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (visible, next_index) = bool::column(statement, start_index)?; + let (dock_anchor, next_index) = DockAnchor::column(statement, next_index)?; + let position = if visible { + DockPosition::Shown(dock_anchor) + } else { + DockPosition::Hidden(dock_anchor) + }; + Ok((position, next_index)) + } +} + #[cfg(test)] mod tests { + use settings::DockAnchor; use sqlez::connection::Connection; - use crate::persistence::model::DockAnchor; - use super::WorkspaceId; #[test] diff --git a/crates/workspace/src/searchable.rs b/crates/workspace/src/searchable.rs index cbe7364536..073e88bf6d 100644 --- a/crates/workspace/src/searchable.rs +++ b/crates/workspace/src/searchable.rs @@ -6,7 +6,7 @@ use gpui::{ }; use project::search::SearchQuery; -use crate::{Item, ItemHandle, WeakItemHandle}; +use crate::{item::WeakItemHandle, Item, ItemHandle}; #[derive(Debug)] pub enum SearchEvent { diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index 8c3f293895..d6a69490a5 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,4 +1,8 @@ -use crate::{Item, ItemNavHistory}; +use crate::{ + item::ItemEvent, + persistence::model::{ItemId, WorkspaceId}, + Item, ItemNavHistory, +}; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; use client::{PeerId, User}; @@ -176,9 +180,21 @@ impl Item for SharedScreen { Task::ready(Err(anyhow!("Item::reload called on SharedScreen"))) } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { match event { - Event::Close => vec![crate::ItemEvent::CloseItem], + Event::Close => vec![ItemEvent::CloseItem], } } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut ViewContext, + ) -> Result { + Err(anyhow!("SharedScreen can not be deserialized")) + } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 085d9e2eb2..c51979f655 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -3,6 +3,7 @@ /// This may cause issues when you're trying to write tests that use workspace focus to add items at /// specific locations. pub mod dock; +pub mod item; pub mod pane; pub mod pane_group; mod persistence; @@ -12,7 +13,15 @@ pub mod sidebar; mod status_bar; mod toolbar; -use crate::persistence::model::SerializedWorkspace; +use std::{ + any::TypeId, + borrow::Cow, + future::Future, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; @@ -30,56 +39,25 @@ use gpui::{ MouseButton, MutableAppContext, PathPromptOptions, PromptLevel, RenderContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; +use item::{FollowableItem, FollowableItemHandle, Item, ItemHandle, ProjectItem}; use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; +use persistence::model::{ItemId, WorkspaceId}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; -use searchable::SearchableItemHandle; use serde::Deserialize; use settings::{Autosave, DockAnchor, Settings}; use shared_screen::SharedScreen; use sidebar::{Sidebar, SidebarButtons, SidebarSide, ToggleSidebarItem}; -use smallvec::SmallVec; use status_bar::StatusBar; pub use status_bar::StatusItemView; -use std::{ - any::{Any, TypeId}, - borrow::Cow, - cell::RefCell, - fmt, - future::Future, - path::{Path, PathBuf}, - rc::Rc, - sync::{ - atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - }, - time::Duration, -}; use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -type ProjectItemBuilders = HashMap< - TypeId, - fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, ->; - -type FollowableItemBuilder = fn( - ViewHandle, - ModelHandle, - &mut Option, - &mut MutableAppContext, -) -> Option>>>; -type FollowableItemBuilders = HashMap< - TypeId, - ( - FollowableItemBuilder, - fn(AnyViewHandle) -> Box, - ), ->; +use crate::persistence::model::SerializedWorkspace; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -316,6 +294,10 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { client.add_view_message_handler(Workspace::handle_update_followers); } +type ProjectItemBuilders = HashMap< + TypeId, + fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, +>; pub fn register_project_item(cx: &mut MutableAppContext) { cx.update_default_global(|builders: &mut ProjectItemBuilders, _| { builders.insert(TypeId::of::(), |project, model, cx| { @@ -325,6 +307,19 @@ pub fn register_project_item(cx: &mut MutableAppContext) { }); } +type FollowableItemBuilder = fn( + ViewHandle, + ModelHandle, + &mut Option, + &mut MutableAppContext, +) -> Option>>>; +type FollowableItemBuilders = HashMap< + TypeId, + ( + FollowableItemBuilder, + fn(AnyViewHandle) -> Box, + ), +>; pub fn register_followable_item(cx: &mut MutableAppContext) { cx.update_default_global(|builders: &mut FollowableItemBuilders, _| { builders.insert( @@ -342,6 +337,26 @@ pub fn register_followable_item(cx: &mut MutableAppContext) { }); } +type SerializableItemBuilders = HashMap< + &'static str, + fn(WorkspaceId, ItemId, &mut ViewContext) -> Option>, +>; +pub fn register_deserializable_item(cx: &mut MutableAppContext) { + cx.update_default_global(|deserializers: &mut SerializableItemBuilders, _| { + if let Some(serialized_item_kind) = I::serialized_item_kind() { + deserializers.insert(serialized_item_kind, |workspace_id, item_id, cx| { + if let Some(v) = + cx.add_option_view(|cx| I::deserialize(workspace_id, item_id, cx).log_err()) + { + Some(Box::new(v)) + } else { + None + } + }); + } + }); +} + pub struct AppState { pub languages: Arc, pub themes: Arc, @@ -354,189 +369,34 @@ pub struct AppState { pub default_item_factory: DefaultItemFactory, } -#[derive(Eq, PartialEq, Hash)] -pub enum ItemEvent { - CloseItem, - UpdateTab, - UpdateBreadcrumbs, - Edit, -} +impl AppState { + #[cfg(any(test, feature = "test-support"))] + pub fn test(cx: &mut MutableAppContext) -> Arc { + use fs::HomeDir; -pub trait Item: View { - fn deactivated(&mut self, _: &mut ViewContext) {} - fn workspace_deactivated(&mut self, _: &mut ViewContext) {} - fn navigate(&mut self, _: Box, _: &mut ViewContext) -> bool { - false - } - fn tab_description<'a>(&'a self, _: usize, _: &'a AppContext) -> Option> { - None - } - fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) - -> ElementBox; - fn project_path(&self, cx: &AppContext) -> Option; - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; - fn is_singleton(&self, cx: &AppContext) -> bool; - fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); - fn clone_on_split(&self, _: &mut ViewContext) -> Option - where - Self: Sized, - { - None - } - fn is_dirty(&self, _: &AppContext) -> bool { - false - } - fn has_conflict(&self, _: &AppContext) -> bool { - false - } - fn can_save(&self, cx: &AppContext) -> bool; - fn save( - &mut self, - project: ModelHandle, - cx: &mut ViewContext, - ) -> Task>; - fn save_as( - &mut self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut ViewContext, - ) -> Task>; - fn reload( - &mut self, - project: ModelHandle, - cx: &mut ViewContext, - ) -> Task>; - fn git_diff_recalc( - &mut self, - _project: ModelHandle, - _cx: &mut ViewContext, - ) -> Task> { - Task::ready(Ok(())) - } - fn to_item_events(event: &Self::Event) -> Vec; - fn should_close_item_on_event(_: &Self::Event) -> bool { - false - } - fn should_update_tab_on_event(_: &Self::Event) -> bool { - false - } - fn is_edit_event(_: &Self::Event) -> bool { - false - } - fn act_as_type( - &self, - type_id: TypeId, - self_handle: &ViewHandle, - _: &AppContext, - ) -> Option { - if TypeId::of::() == type_id { - Some(self_handle.into()) - } else { - None - } - } - fn as_searchable(&self, _: &ViewHandle) -> Option> { - None - } + cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); + let settings = Settings::test(cx); + cx.set_global(settings); - fn breadcrumb_location(&self) -> ToolbarItemLocation { - ToolbarItemLocation::Hidden - } - fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { - None - } -} - -pub trait ProjectItem: Item { - type Item: project::Item; - - fn for_project_item( - project: ModelHandle, - item: ModelHandle, - cx: &mut ViewContext, - ) -> Self; -} - -pub trait FollowableItem: Item { - fn to_state_proto(&self, cx: &AppContext) -> Option; - fn from_state_proto( - pane: ViewHandle, - project: ModelHandle, - state: &mut Option, - cx: &mut MutableAppContext, - ) -> Option>>>; - fn add_event_to_update_proto( - &self, - event: &Self::Event, - update: &mut Option, - cx: &AppContext, - ) -> bool; - fn apply_update_proto( - &mut self, - message: proto::update_view::Variant, - cx: &mut ViewContext, - ) -> Result<()>; - - fn set_leader_replica_id(&mut self, leader_replica_id: Option, cx: &mut ViewContext); - fn should_unfollow_on_event(event: &Self::Event, cx: &AppContext) -> bool; -} - -pub trait FollowableItemHandle: ItemHandle { - fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext); - fn to_state_proto(&self, cx: &AppContext) -> Option; - fn add_event_to_update_proto( - &self, - event: &dyn Any, - update: &mut Option, - cx: &AppContext, - ) -> bool; - fn apply_update_proto( - &self, - message: proto::update_view::Variant, - cx: &mut MutableAppContext, - ) -> Result<()>; - fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool; -} - -impl FollowableItemHandle for ViewHandle { - fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| { - this.set_leader_replica_id(leader_replica_id, cx) + let fs = fs::FakeFs::new(cx.background().clone()); + let languages = Arc::new(LanguageRegistry::test()); + let http_client = client::test::FakeHttpClient::with_404_response(); + let client = Client::new(http_client.clone(), cx); + let project_store = cx.add_model(|_| ProjectStore::new()); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); + let themes = ThemeRegistry::new((), cx.font_cache().clone()); + Arc::new(Self { + client, + themes, + fs, + languages, + user_store, + project_store, + initialize_workspace: |_, _, _| {}, + build_window_options: Default::default, + default_item_factory: |_, _| unimplemented!(), }) } - - fn to_state_proto(&self, cx: &AppContext) -> Option { - self.read(cx).to_state_proto(cx) - } - - fn add_event_to_update_proto( - &self, - event: &dyn Any, - update: &mut Option, - cx: &AppContext, - ) -> bool { - if let Some(event) = event.downcast_ref() { - self.read(cx).add_event_to_update_proto(event, update, cx) - } else { - false - } - } - - fn apply_update_proto( - &self, - message: proto::update_view::Variant, - cx: &mut MutableAppContext, - ) -> Result<()> { - self.update(cx, |this, cx| this.apply_update_proto(message, cx)) - } - - fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool { - if let Some(event) = event.downcast_ref() { - T::should_unfollow_on_event(event, cx) - } else { - false - } - } } struct DelayedDebouncedEditAction { @@ -580,7 +440,7 @@ impl DelayedDebouncedEditAction { futures::select_biased! { _ = receiver => return, - _ = timer => {} + _ = timer => {} } if let Some(project) = project.upgrade(&cx) { @@ -590,427 +450,6 @@ impl DelayedDebouncedEditAction { } } -pub trait ItemHandle: 'static + fmt::Debug { - fn subscribe_to_item_events( - &self, - cx: &mut MutableAppContext, - handler: Box, - ) -> gpui::Subscription; - fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option>; - fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) - -> ElementBox; - fn project_path(&self, cx: &AppContext) -> Option; - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; - fn is_singleton(&self, cx: &AppContext) -> bool; - fn boxed_clone(&self) -> Box; - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; - fn added_to_pane( - &self, - workspace: &mut Workspace, - pane: ViewHandle, - cx: &mut ViewContext, - ); - fn deactivated(&self, cx: &mut MutableAppContext); - fn workspace_deactivated(&self, cx: &mut MutableAppContext); - fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool; - fn id(&self) -> usize; - fn window_id(&self) -> usize; - fn to_any(&self) -> AnyViewHandle; - fn is_dirty(&self, cx: &AppContext) -> bool; - fn has_conflict(&self, cx: &AppContext) -> bool; - fn can_save(&self, cx: &AppContext) -> bool; - fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task>; - fn save_as( - &self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut MutableAppContext, - ) -> Task>; - fn reload(&self, project: ModelHandle, cx: &mut MutableAppContext) - -> Task>; - fn git_diff_recalc( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task>; - fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option; - fn to_followable_item_handle(&self, cx: &AppContext) -> Option>; - fn on_release( - &self, - cx: &mut MutableAppContext, - callback: Box, - ) -> gpui::Subscription; - fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; - fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; - fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; -} - -pub trait WeakItemHandle { - fn id(&self) -> usize; - fn window_id(&self) -> usize; - fn upgrade(&self, cx: &AppContext) -> Option>; -} - -impl dyn ItemHandle { - pub fn downcast(&self) -> Option> { - self.to_any().downcast() - } - - pub fn act_as(&self, cx: &AppContext) -> Option> { - self.act_as_type(TypeId::of::(), cx) - .and_then(|t| t.downcast()) - } -} - -impl ItemHandle for ViewHandle { - fn subscribe_to_item_events( - &self, - cx: &mut MutableAppContext, - handler: Box, - ) -> gpui::Subscription { - cx.subscribe(self, move |_, event, cx| { - for item_event in T::to_item_events(event) { - handler(item_event, cx) - } - }) - } - - fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option> { - self.read(cx).tab_description(detail, cx) - } - - fn tab_content( - &self, - detail: Option, - style: &theme::Tab, - cx: &AppContext, - ) -> ElementBox { - self.read(cx).tab_content(detail, style, cx) - } - - fn project_path(&self, cx: &AppContext) -> Option { - self.read(cx).project_path(cx) - } - - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { - self.read(cx).project_entry_ids(cx) - } - - fn is_singleton(&self, cx: &AppContext) -> bool { - self.read(cx).is_singleton(cx) - } - - fn boxed_clone(&self) -> Box { - Box::new(self.clone()) - } - - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { - self.update(cx, |item, cx| { - cx.add_option_view(|cx| item.clone_on_split(cx)) - }) - .map(|handle| Box::new(handle) as Box) - } - - fn added_to_pane( - &self, - workspace: &mut Workspace, - pane: ViewHandle, - cx: &mut ViewContext, - ) { - let history = pane.read(cx).nav_history_for_item(self); - self.update(cx, |this, cx| this.set_nav_history(history, cx)); - - if let Some(followed_item) = self.to_followable_item_handle(cx) { - if let Some(message) = followed_item.to_state_proto(cx) { - workspace.update_followers( - proto::update_followers::Variant::CreateView(proto::View { - id: followed_item.id() as u64, - variant: Some(message), - leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), - }), - cx, - ); - } - } - - if workspace - .panes_by_item - .insert(self.id(), pane.downgrade()) - .is_none() - { - let mut pending_autosave = DelayedDebouncedEditAction::new(); - let mut pending_git_update = DelayedDebouncedEditAction::new(); - let pending_update = Rc::new(RefCell::new(None)); - let pending_update_scheduled = Rc::new(AtomicBool::new(false)); - - let mut event_subscription = - Some(cx.subscribe(self, move |workspace, item, event, cx| { - let pane = if let Some(pane) = workspace - .panes_by_item - .get(&item.id()) - .and_then(|pane| pane.upgrade(cx)) - { - pane - } else { - log::error!("unexpected item event after pane was dropped"); - return; - }; - - if let Some(item) = item.to_followable_item_handle(cx) { - let leader_id = workspace.leader_for_pane(&pane); - - if leader_id.is_some() && item.should_unfollow_on_event(event, cx) { - workspace.unfollow(&pane, cx); - } - - if item.add_event_to_update_proto( - event, - &mut *pending_update.borrow_mut(), - cx, - ) && !pending_update_scheduled.load(SeqCst) - { - pending_update_scheduled.store(true, SeqCst); - cx.after_window_update({ - let pending_update = pending_update.clone(); - let pending_update_scheduled = pending_update_scheduled.clone(); - move |this, cx| { - pending_update_scheduled.store(false, SeqCst); - this.update_followers( - proto::update_followers::Variant::UpdateView( - proto::UpdateView { - id: item.id() as u64, - variant: pending_update.borrow_mut().take(), - leader_id: leader_id.map(|id| id.0), - }, - ), - cx, - ); - } - }); - } - } - - for item_event in T::to_item_events(event).into_iter() { - match item_event { - ItemEvent::CloseItem => { - Pane::close_item(workspace, pane, item.id(), cx) - .detach_and_log_err(cx); - return; - } - - ItemEvent::UpdateTab => { - pane.update(cx, |_, cx| { - cx.emit(pane::Event::ChangeItemTitle); - cx.notify(); - }); - } - - ItemEvent::Edit => { - if let Autosave::AfterDelay { milliseconds } = - cx.global::().autosave - { - let delay = Duration::from_millis(milliseconds); - let item = item.clone(); - pending_autosave.fire_new( - delay, - workspace, - cx, - |project, mut cx| async move { - cx.update(|cx| Pane::autosave_item(&item, project, cx)) - .await - .log_err(); - }, - ); - } - - let settings = cx.global::(); - let debounce_delay = settings.git_overrides.gutter_debounce; - - let item = item.clone(); - - if let Some(delay) = debounce_delay { - const MIN_GIT_DELAY: u64 = 50; - - let delay = delay.max(MIN_GIT_DELAY); - let duration = Duration::from_millis(delay); - - pending_git_update.fire_new( - duration, - workspace, - cx, - |project, mut cx| async move { - cx.update(|cx| item.git_diff_recalc(project, cx)) - .await - .log_err(); - }, - ); - } else { - let project = workspace.project().downgrade(); - cx.spawn_weak(|_, mut cx| async move { - if let Some(project) = project.upgrade(&cx) { - cx.update(|cx| item.git_diff_recalc(project, cx)) - .await - .log_err(); - } - }) - .detach(); - } - } - - _ => {} - } - } - })); - - cx.observe_focus(self, move |workspace, item, focused, cx| { - if !focused && cx.global::().autosave == Autosave::OnFocusChange { - Pane::autosave_item(&item, workspace.project.clone(), cx) - .detach_and_log_err(cx); - } - }) - .detach(); - - let item_id = self.id(); - cx.observe_release(self, move |workspace, _, _| { - workspace.panes_by_item.remove(&item_id); - event_subscription.take(); - }) - .detach(); - } - } - - fn deactivated(&self, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| this.deactivated(cx)); - } - - fn workspace_deactivated(&self, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| this.workspace_deactivated(cx)); - } - - fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool { - self.update(cx, |this, cx| this.navigate(data, cx)) - } - - fn id(&self) -> usize { - self.id() - } - - fn window_id(&self) -> usize { - self.window_id() - } - - fn to_any(&self) -> AnyViewHandle { - self.into() - } - - fn is_dirty(&self, cx: &AppContext) -> bool { - self.read(cx).is_dirty(cx) - } - - fn has_conflict(&self, cx: &AppContext) -> bool { - self.read(cx).has_conflict(cx) - } - - fn can_save(&self, cx: &AppContext) -> bool { - self.read(cx).can_save(cx) - } - - fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task> { - self.update(cx, |item, cx| item.save(project, cx)) - } - - fn save_as( - &self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.save_as(project, abs_path, cx)) - } - - fn reload( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.reload(project, cx)) - } - - fn git_diff_recalc( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.git_diff_recalc(project, cx)) - } - - fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option { - self.read(cx).act_as_type(type_id, self, cx) - } - - fn to_followable_item_handle(&self, cx: &AppContext) -> Option> { - if cx.has_global::() { - let builders = cx.global::(); - let item = self.to_any(); - Some(builders.get(&item.view_type())?.1(item)) - } else { - None - } - } - - fn on_release( - &self, - cx: &mut MutableAppContext, - callback: Box, - ) -> gpui::Subscription { - cx.observe_release(self, move |_, cx| callback(cx)) - } - - fn to_searchable_item_handle(&self, cx: &AppContext) -> Option> { - self.read(cx).as_searchable(self) - } - - fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation { - self.read(cx).breadcrumb_location() - } - - fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { - self.read(cx).breadcrumbs(theme, cx) - } -} - -impl From> for AnyViewHandle { - fn from(val: Box) -> Self { - val.to_any() - } -} - -impl From<&Box> for AnyViewHandle { - fn from(val: &Box) -> Self { - val.to_any() - } -} - -impl Clone for Box { - fn clone(&self) -> Box { - self.boxed_clone() - } -} - -impl WeakItemHandle for WeakViewHandle { - fn id(&self) -> usize { - self.id() - } - - fn window_id(&self) -> usize { - self.window_id() - } - - fn upgrade(&self, cx: &AppContext) -> Option> { - self.upgrade(cx).map(|v| Box::new(v) as Box) - } -} - pub trait Notification: View { fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; } @@ -1036,34 +475,23 @@ impl From<&dyn NotificationHandle> for AnyViewHandle { } } -impl AppState { - #[cfg(any(test, feature = "test-support"))] - pub fn test(cx: &mut MutableAppContext) -> Arc { - use fs::HomeDir; +#[derive(Default)] +struct LeaderState { + followers: HashSet, +} - cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); - let settings = Settings::test(cx); - cx.set_global(settings); +type FollowerStatesByLeader = HashMap, FollowerState>>; - let fs = fs::FakeFs::new(cx.background().clone()); - let languages = Arc::new(LanguageRegistry::test()); - let http_client = client::test::FakeHttpClient::with_404_response(); - let client = Client::new(http_client.clone(), cx); - let project_store = cx.add_model(|_| ProjectStore::new()); - let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); - let themes = ThemeRegistry::new((), cx.font_cache().clone()); - Arc::new(Self { - client, - themes, - fs, - languages, - user_store, - project_store, - initialize_workspace: |_, _, _| {}, - build_window_options: Default::default, - default_item_factory: |_, _| unimplemented!(), - }) - } +#[derive(Default)] +struct FollowerState { + active_view_id: Option, + items_by_leader_view_id: HashMap, +} + +#[derive(Debug)] +enum FollowerItem { + Loading(Vec), + Loaded(Box), } pub enum Event { @@ -1074,7 +502,6 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - // _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1100,28 +527,9 @@ pub struct Workspace { _observe_current_user: Task<()>, } -#[derive(Default)] -struct LeaderState { - followers: HashSet, -} - -type FollowerStatesByLeader = HashMap, FollowerState>>; - -#[derive(Default)] -struct FollowerState { - active_view_id: Option, - items_by_leader_view_id: HashMap, -} - -#[derive(Debug)] -enum FollowerItem { - Loading(Vec), - Loaded(Box), -} - impl Workspace { pub fn new( - _serialized_workspace: Option, + serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1160,6 +568,20 @@ impl Workspace { .detach(); cx.focus(¢er_pane); cx.emit(Event::PaneAdded(center_pane.clone())); + let dock = Dock::new( + dock_default_factory, + serialized_workspace + .as_ref() + .map(|ws| ws.dock_position) + .clone(), + cx, + ); + let dock_pane = dock.pane().clone(); + + if let Some(serialized_workspace) = serialized_workspace { + + // Fill them in? + } let fs = project.read(cx).fs().clone(); let user_store = project.read(cx).user_store(); @@ -1186,9 +608,6 @@ impl Workspace { cx.emit_global(WorkspaceCreated(weak_handle.clone())); - let dock = Dock::new(dock_default_factory, cx); - let dock_pane = dock.pane().clone(); - let left_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Left)); let right_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Right)); let left_sidebar_buttons = cx.add_view(|cx| SidebarButtons::new(left_sidebar.clone(), cx)); @@ -1218,7 +637,6 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - // _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array @@ -3086,13 +2504,13 @@ fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { #[cfg(test)] mod tests { - use std::cell::Cell; + use std::{cell::RefCell, rc::Rc}; - use crate::sidebar::SidebarItem; + use crate::item::test::{TestItem, TestItemEvent}; use super::*; use fs::FakeFs; - use gpui::{executor::Deterministic, ModelHandle, TestAppContext, ViewContext}; + use gpui::{executor::Deterministic, TestAppContext, ViewContext}; use project::{Project, ProjectEntryId}; use serde_json::json; @@ -3697,209 +3115,4 @@ mod tests { assert!(pane.can_navigate_forward()); }); } - - pub struct TestItem { - state: String, - pub label: String, - save_count: usize, - save_as_count: usize, - reload_count: usize, - is_dirty: bool, - is_singleton: bool, - has_conflict: bool, - project_entry_ids: Vec, - project_path: Option, - nav_history: Option, - tab_descriptions: Option>, - tab_detail: Cell>, - } - - pub enum TestItemEvent { - Edit, - } - - impl Clone for TestItem { - fn clone(&self) -> Self { - Self { - state: self.state.clone(), - label: self.label.clone(), - save_count: self.save_count, - save_as_count: self.save_as_count, - reload_count: self.reload_count, - is_dirty: self.is_dirty, - is_singleton: self.is_singleton, - has_conflict: self.has_conflict, - project_entry_ids: self.project_entry_ids.clone(), - project_path: self.project_path.clone(), - nav_history: None, - tab_descriptions: None, - tab_detail: Default::default(), - } - } - } - - impl TestItem { - pub fn new() -> Self { - Self { - state: String::new(), - label: String::new(), - save_count: 0, - save_as_count: 0, - reload_count: 0, - is_dirty: false, - has_conflict: false, - project_entry_ids: Vec::new(), - project_path: None, - is_singleton: true, - nav_history: None, - tab_descriptions: None, - tab_detail: Default::default(), - } - } - - pub fn with_label(mut self, state: &str) -> Self { - self.label = state.to_string(); - self - } - - pub fn with_singleton(mut self, singleton: bool) -> Self { - self.is_singleton = singleton; - self - } - - pub fn with_project_entry_ids(mut self, project_entry_ids: &[u64]) -> Self { - self.project_entry_ids.extend( - project_entry_ids - .iter() - .copied() - .map(ProjectEntryId::from_proto), - ); - self - } - - fn set_state(&mut self, state: String, cx: &mut ViewContext) { - self.push_to_nav_history(cx); - self.state = state; - } - - fn push_to_nav_history(&mut self, cx: &mut ViewContext) { - if let Some(history) = &mut self.nav_history { - history.push(Some(Box::new(self.state.clone())), cx); - } - } - } - - impl Entity for TestItem { - type Event = TestItemEvent; - } - - impl View for TestItem { - fn ui_name() -> &'static str { - "TestItem" - } - - fn render(&mut self, _: &mut RenderContext) -> ElementBox { - Empty::new().boxed() - } - } - - impl Item for TestItem { - fn tab_description<'a>(&'a self, detail: usize, _: &'a AppContext) -> Option> { - self.tab_descriptions.as_ref().and_then(|descriptions| { - let description = *descriptions.get(detail).or_else(|| descriptions.last())?; - Some(description.into()) - }) - } - - fn tab_content(&self, detail: Option, _: &theme::Tab, _: &AppContext) -> ElementBox { - self.tab_detail.set(detail); - Empty::new().boxed() - } - - fn project_path(&self, _: &AppContext) -> Option { - self.project_path.clone() - } - - fn project_entry_ids(&self, _: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { - self.project_entry_ids.iter().copied().collect() - } - - fn is_singleton(&self, _: &AppContext) -> bool { - self.is_singleton - } - - fn set_nav_history(&mut self, history: ItemNavHistory, _: &mut ViewContext) { - self.nav_history = Some(history); - } - - fn navigate(&mut self, state: Box, _: &mut ViewContext) -> bool { - let state = *state.downcast::().unwrap_or_default(); - if state != self.state { - self.state = state; - true - } else { - false - } - } - - fn deactivated(&mut self, cx: &mut ViewContext) { - self.push_to_nav_history(cx); - } - - fn clone_on_split(&self, _: &mut ViewContext) -> Option - where - Self: Sized, - { - Some(self.clone()) - } - - fn is_dirty(&self, _: &AppContext) -> bool { - self.is_dirty - } - - fn has_conflict(&self, _: &AppContext) -> bool { - self.has_conflict - } - - fn can_save(&self, _: &AppContext) -> bool { - !self.project_entry_ids.is_empty() - } - - fn save( - &mut self, - _: ModelHandle, - _: &mut ViewContext, - ) -> Task> { - self.save_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn save_as( - &mut self, - _: ModelHandle, - _: std::path::PathBuf, - _: &mut ViewContext, - ) -> Task> { - self.save_as_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn reload( - &mut self, - _: ModelHandle, - _: &mut ViewContext, - ) -> Task> { - self.reload_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn to_item_events(_: &Self::Event) -> Vec { - vec![ItemEvent::UpdateTab, ItemEvent::Edit] - } - } - - impl SidebarItem for TestItem {} } diff --git a/crates/zed/src/feedback.rs b/crates/zed/src/feedback.rs index 03b068a019..55597312ae 100644 --- a/crates/zed/src/feedback.rs +++ b/crates/zed/src/feedback.rs @@ -5,7 +5,7 @@ use gpui::{ Element, Entity, MouseButton, RenderContext, View, }; use settings::Settings; -use workspace::StatusItemView; +use workspace::{item::ItemHandle, StatusItemView}; pub const NEW_ISSUE_URL: &str = "https://github.com/zed-industries/feedback/issues/new/choose"; @@ -43,7 +43,7 @@ impl View for FeedbackLink { impl StatusItemView for FeedbackLink { fn set_active_pane_item( &mut self, - _: Option<&dyn workspace::ItemHandle>, + _: Option<&dyn ItemHandle>, _: &mut gpui::ViewContext, ) { } diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 5f67e290b5..53273b45d8 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -38,7 +38,7 @@ use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; use util::{channel::RELEASE_CHANNEL, paths, ResultExt, TryFutureExt}; -use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace}; +use workspace::{self, item::ItemHandle, AppState, NewFile, OpenPaths, Workspace}; use zed::{self, build_window_options, initialize_workspace, languages, menus}; fn main() { diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 6b6b65ab32..0abcbeac48 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -625,7 +625,8 @@ mod tests { }; use theme::ThemeRegistry; use workspace::{ - open_paths, pane, Item, ItemHandle, NewFile, Pane, SplitDirection, WorkspaceHandle, + item::{Item, ItemHandle}, + open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, }; #[gpui::test] From d20d21c6a20ed208c81b3271e62d72b87fcbc5c3 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 16 Nov 2022 16:35:56 -0800 Subject: [PATCH 157/240] Dock persistence working! Co-Authored-By: Mikayla Maki --- Cargo.lock | 3 + crates/collab/src/integration_tests.rs | 2 +- crates/db/src/db.rs | 41 +- crates/db/src/kvp.rs | 28 +- crates/diagnostics/src/diagnostics.rs | 16 +- crates/editor/Cargo.toml | 2 + crates/editor/src/editor.rs | 1 + crates/editor/src/items.rs | 19 +- crates/editor/src/persistence.rs | 30 ++ crates/project/src/project.rs | 2 + crates/search/src/project_search.rs | 14 + crates/sqlez/src/bindable.rs | 15 + crates/sqlez/src/connection.rs | 12 +- crates/sqlez/src/domain.rs | 57 +-- crates/sqlez/src/migrations.rs | 166 ++++---- crates/sqlez/src/statement.rs | 5 - crates/sqlez/src/thread_safe_connection.rs | 23 +- crates/sqlez/src/typed_statements.rs | 13 - crates/terminal/src/terminal.rs | 4 + .../terminal/src/terminal_container_view.rs | 23 +- crates/theme_testbench/src/theme_testbench.rs | 21 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/dock.rs | 16 +- crates/workspace/src/item.rs | 29 +- crates/workspace/src/persistence.rs | 378 +++++++++++------- crates/workspace/src/persistence/model.rs | 85 ++-- crates/workspace/src/shared_screen.rs | 18 +- crates/workspace/src/workspace.rs | 202 ++++++++-- crates/workspace/test.db | Bin 32768 -> 32768 bytes 29 files changed, 783 insertions(+), 443 deletions(-) create mode 100644 crates/editor/src/persistence.rs diff --git a/Cargo.lock b/Cargo.lock index bad036a05d..b4df5a9ab9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1738,6 +1738,7 @@ dependencies = [ "collections", "context_menu", "ctor", + "db", "drag_and_drop", "env_logger", "futures 0.3.25", @@ -1761,6 +1762,7 @@ dependencies = [ "smallvec", "smol", "snippet", + "sqlez", "sum_tree", "text", "theme", @@ -7629,6 +7631,7 @@ dependencies = [ "context_menu", "db", "drag_and_drop", + "env_logger", "fs", "futures 0.3.25", "gpui", diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 762a5cf711..e1b242713f 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -52,7 +52,7 @@ use std::{ use theme::ThemeRegistry; use unindent::Unindent as _; use util::post_inc; -use workspace::{shared_screen::SharedScreen, item::Item, SplitDirection, ToggleFollow, Workspace}; +use workspace::{item::Item, shared_screen::SharedScreen, SplitDirection, ToggleFollow, Workspace}; #[ctor::ctor] fn init_logger() { diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 56fc79f475..9bb4286b83 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,6 @@ pub mod kvp; -use std::fs; +use std::fs::create_dir_all; use std::path::Path; #[cfg(any(test, feature = "test-support"))] @@ -8,24 +8,29 @@ use anyhow::Result; use indoc::indoc; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; -use sqlez::domain::Domain; +use sqlez::domain::{Domain, Migrator}; use sqlez::thread_safe_connection::ThreadSafeConnection; +use util::channel::RELEASE_CHANNEL_NAME; +use util::paths::DB_DIR; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; "}; /// Open or create a database at the given directory path. -pub fn open_file_db() -> ThreadSafeConnection { +pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = (*util::paths::DB_DIR).join(Path::new(&format!( - "0-{}", - *util::channel::RELEASE_CHANNEL_NAME - ))); - fs::create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); + let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + + // if *RELEASE_CHANNEL == ReleaseChannel::Dev { + // remove_dir_all(¤t_db_dir).ok(); + // } + + create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) @@ -44,3 +49,23 @@ pub fn write_db_to>( let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); conn.backup_main(&destination) } + +/// Implements a basic DB wrapper for a given domain +#[macro_export] +macro_rules! connection { + ($id:ident: $t:ident<$d:ty>) => { + pub struct $t(::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + + impl ::std::ops::Deref for $t { + type Target = ::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + lazy_static! { + pub static ref $id: $t = $t(::db::open_file_db()); + } + }; +} diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 1dd1cf69b7..dd82c17615 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,22 +1,9 @@ use anyhow::Result; use indoc::indoc; -use sqlez::{ - connection::Connection, domain::Domain, migrations::Migration, - thread_safe_connection::ThreadSafeConnection, -}; +use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; use std::ops::Deref; -pub(crate) const KVP_MIGRATION: Migration = Migration::new( - "kvp", - &[indoc! {" - CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - "}], -); - lazy_static::lazy_static! { pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(crate::open_file_db()); @@ -26,8 +13,17 @@ lazy_static::lazy_static! { pub struct KeyValueStore(ThreadSafeConnection); impl Domain for KeyValueStore { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - KVP_MIGRATION.run(conn) + fn name() -> &'static str { + "kvp" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + "}] } } diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index a3621dd30d..639a108724 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -31,7 +31,7 @@ use std::{ use util::TryFutureExt; use workspace::{ item::{Item, ItemEvent, ItemHandle}, - ItemNavHistory, Workspace, + ItemNavHistory, Pane, Workspace, }; actions!(diagnostics, [Deploy]); @@ -613,6 +613,20 @@ impl Item for ProjectDiagnosticsEditor { fn deactivated(&mut self, cx: &mut ViewContext) { self.editor.update(cx, |editor, cx| editor.deactivated(cx)); } + + fn serialized_item_kind() -> Option<&'static str> { + Some("diagnostics") + } + + fn deserialize( + project: ModelHandle, + workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + Task::ready(Ok(cx.add_view(|cx| Self::new(project, workspace, cx)))) + } } fn diagnostic_header_renderer(diagnostic: Diagnostic) -> RenderBlock { diff --git a/crates/editor/Cargo.toml b/crates/editor/Cargo.toml index f56ed36f75..f992ed5116 100644 --- a/crates/editor/Cargo.toml +++ b/crates/editor/Cargo.toml @@ -23,6 +23,7 @@ test-support = [ drag_and_drop = { path = "../drag_and_drop" } text = { path = "../text" } clock = { path = "../clock" } +db = { path = "../db" } collections = { path = "../collections" } context_menu = { path = "../context_menu" } fuzzy = { path = "../fuzzy" } @@ -37,6 +38,7 @@ snippet = { path = "../snippet" } sum_tree = { path = "../sum_tree" } theme = { path = "../theme" } util = { path = "../util" } +sqlez = { path = "../sqlez" } workspace = { path = "../workspace" } aho-corasick = "0.7" anyhow = "1.0" diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 5bbeed3fb5..ce810bab0c 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -9,6 +9,7 @@ mod link_go_to_definition; mod mouse_context_menu; pub mod movement; mod multi_buffer; +mod persistence; pub mod selections_collection; #[cfg(test)] diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 4f9c7d5593..ae9bbd5748 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use futures::FutureExt; use gpui::{ elements::*, geometry::vector::vec2f, AppContext, Entity, ModelHandle, MutableAppContext, - RenderContext, Subscription, Task, View, ViewContext, ViewHandle, + RenderContext, Subscription, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; use language::{Bias, Buffer, File as _, OffsetRangeExt, Point, SelectionGoal}; use project::{File, FormatTrigger, Project, ProjectEntryId, ProjectPath}; @@ -26,7 +26,7 @@ use util::TryFutureExt; use workspace::{ item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, - ItemNavHistory, StatusItemView, ToolbarItemLocation, + ItemId, ItemNavHistory, Pane, StatusItemView, ToolbarItemLocation, Workspace, WorkspaceId, }; pub const MAX_TAB_TITLE_LEN: usize = 24; @@ -552,6 +552,21 @@ impl Item for Editor { })); Some(breadcrumbs) } + + fn serialized_item_kind() -> Option<&'static str> { + Some("Editor") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + // Look up the path with this key associated, create a self with that path + unimplemented!() + } } impl ProjectItem for Editor { diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs new file mode 100644 index 0000000000..4b39f94638 --- /dev/null +++ b/crates/editor/src/persistence.rs @@ -0,0 +1,30 @@ +use std::path::PathBuf; + +use db::connection; +use indoc::indoc; +use lazy_static::lazy_static; +use project::WorktreeId; +use sqlez::domain::Domain; +use workspace::{ItemId, Workspace}; + +use crate::Editor; + +connection!(DB: EditorDb<(Workspace, Editor)>); + +impl Domain for Editor { + fn name() -> &'static str { + "editor" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + + "}] + } +} + +impl EditorDb { + fn get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { + unimplemented!(); + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 94558fee3e..e0cc3cdd0b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -804,6 +804,7 @@ impl Project { &self.collaborators } + /// Collect all worktrees, including ones that don't appear in the project panel pub fn worktrees<'a>( &'a self, cx: &'a AppContext, @@ -813,6 +814,7 @@ impl Project { .filter_map(move |worktree| worktree.upgrade(cx)) } + /// Collect all user-visible worktrees, the ones that appear in the project panel pub fn visible_worktrees<'a>( &'a self, cx: &'a AppContext, diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index edd4f40ba2..322d035870 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -353,6 +353,20 @@ impl Item for ProjectSearchView { fn breadcrumbs(&self, theme: &theme::Theme, cx: &AppContext) -> Option> { self.results_editor.breadcrumbs(theme, cx) } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unimplemented!() + } } impl ProjectSearchView { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 7a3483bcea..1e4f0df33f 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -2,6 +2,7 @@ use std::{ ffi::OsStr, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, + sync::Arc, }; use anyhow::Result; @@ -118,6 +119,13 @@ impl Bind for &str { } } +impl Bind for Arc { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self.as_ref())?; + Ok(start_index + 1) + } +} + impl Bind for String { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_text(start_index, self)?; @@ -125,6 +133,13 @@ impl Bind for String { } } +impl Column for Arc { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_text(start_index)?; + Ok((Arc::from(result), start_index + 1)) + } +} + impl Column for String { fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let result = statement.column_text(start_index)?; diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index b673167c86..8ab1e345d8 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -54,10 +54,6 @@ impl Connection { self.persistent } - pub(crate) fn last_insert_id(&self) -> i64 { - unsafe { sqlite3_last_insert_rowid(self.sqlite3) } - } - pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( @@ -126,7 +122,7 @@ mod test { let text = "Some test text"; connection - .insert_bound("INSERT INTO text (text) VALUES (?);") + .exec_bound("INSERT INTO text (text) VALUES (?);") .unwrap()(text) .unwrap(); @@ -155,7 +151,7 @@ mod test { let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); let mut insert = connection - .insert_bound::<(String, usize, Vec)>( + .exec_bound::<(String, usize, Vec)>( "INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)", ) .unwrap(); @@ -185,7 +181,7 @@ mod test { .unwrap(); connection - .insert_bound("INSERT INTO bools(t, f) VALUES (?, ?);") + .exec_bound("INSERT INTO bools(t, f) VALUES (?, ?)") .unwrap()((true, false)) .unwrap(); @@ -210,7 +206,7 @@ mod test { .unwrap(); let blob = vec![0, 1, 2, 4, 8, 16, 32, 64]; connection1 - .insert_bound::>("INSERT INTO blobs (data) VALUES (?);") + .exec_bound::>("INSERT INTO blobs (data) VALUES (?);") .unwrap()(blob.clone()) .unwrap(); diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index f57e89a5c8..b7cfbaef88 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,39 +1,50 @@ use crate::connection::Connection; pub trait Domain { - fn migrate(conn: &Connection) -> anyhow::Result<()>; + fn name() -> &'static str; + fn migrations() -> &'static [&'static str]; } -impl Domain for (D1, D2) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn) +pub trait Migrator { + fn migrate(connection: &Connection) -> anyhow::Result<()>; +} + +impl Migrator for D { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + connection.migrate(Self::name(), Self::migrations()) } } -impl Domain for (D1, D2, D3) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn) +impl Migrator for (D1, D2) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection) } } -impl Domain for (D1, D2, D3, D4) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn)?; - D4::migrate(conn) +impl Migrator for (D1, D2, D3) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection) } } -impl Domain for (D1, D2, D3, D4, D5) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn)?; - D4::migrate(conn)?; - D5::migrate(conn) +impl Migrator for (D1, D2, D3, D4) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection)?; + D4::migrate(connection) + } +} + +impl Migrator for (D1, D2, D3, D4, D5) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection)?; + D4::migrate(connection)?; + D5::migrate(connection) } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 89eaebb494..1f4b3f0f7c 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -9,53 +9,27 @@ use indoc::{formatdoc, indoc}; use crate::connection::Connection; -const MIGRATIONS_MIGRATION: Migration = Migration::new( - "migrations", - // The migrations migration must be infallable because it runs to completion - // with every call to migration run and is run unchecked. - &[indoc! {" - CREATE TABLE IF NOT EXISTS migrations ( - domain TEXT, - step INTEGER, - migration TEXT - ) - "}], -); - -#[derive(Debug)] -pub struct Migration { - domain: &'static str, - migrations: &'static [&'static str], -} - -impl Migration { - pub const fn new(domain: &'static str, migrations: &'static [&'static str]) -> Self { - Self { domain, migrations } - } - - fn run_unchecked(&self, connection: &Connection) -> Result<()> { - for migration in self.migrations { - connection.exec(migration)?()?; - } - - Ok(()) - } - - pub fn run(&self, connection: &Connection) -> Result<()> { +impl Connection { + pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { // Setup the migrations table unconditionally - MIGRATIONS_MIGRATION.run_unchecked(connection)?; + self.exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + )"})?()?; let completed_migrations = - connection.select_bound::<&str, (String, usize, String)>(indoc! {" + self.select_bound::<&str, (String, usize, String)>(indoc! {" SELECT domain, step, migration FROM migrations WHERE domain = ? ORDER BY step - "})?(self.domain)?; + "})?(domain)?; - let mut store_completed_migration = connection - .insert_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + let mut store_completed_migration = + self.exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; - for (index, migration) in self.migrations.iter().enumerate() { + for (index, migration) in migrations.iter().enumerate() { if let Some((_, _, completed_migration)) = completed_migrations.get(index) { if completed_migration != migration { return Err(anyhow!(formatdoc! {" @@ -65,15 +39,15 @@ impl Migration { {} Proposed migration: - {}", self.domain, index, completed_migration, migration})); + {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue continue; } } - connection.exec(migration)?()?; - store_completed_migration((self.domain, index, *migration))?; + self.exec(migration)?()?; + store_completed_migration((domain, index, *migration))?; } Ok(()) @@ -84,22 +58,23 @@ impl Migration { mod test { use indoc::indoc; - use crate::{connection::Connection, migrations::Migration}; + use crate::connection::Connection; #[test] fn test_migrations_are_added_to_table() { let connection = Connection::open_memory("migrations_are_added_to_table"); // Create first migration with a single step and run it - let mut migration = Migration::new( - "test", - &[indoc! {" - CREATE TABLE test1 ( - a TEXT, - b TEXT - )"}], - ); - migration.run(&connection).unwrap(); + connection + .migrate( + "test", + &[indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}], + ) + .unwrap(); // Verify it got added to the migrations table assert_eq!( @@ -107,23 +82,31 @@ mod test { .select::("SELECT (migration) FROM migrations") .unwrap()() .unwrap()[..], - migration.migrations - ); - - // Add another step to the migration and run it again - migration.migrations = &[ - indoc! {" + &[indoc! {" CREATE TABLE test1 ( a TEXT, b TEXT - )"}, - indoc! {" - CREATE TABLE test2 ( - c TEXT, - d TEXT - )"}, - ]; - migration.run(&connection).unwrap(); + )"}], + ); + + // Add another step to the migration and run it again + connection + .migrate( + "test", + &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + )"}, + ], + ) + .unwrap(); // Verify it is also added to the migrations table assert_eq!( @@ -131,7 +114,18 @@ mod test { .select::("SELECT (migration) FROM migrations") .unwrap()() .unwrap()[..], - migration.migrations + &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + )"}, + ], ); } @@ -150,7 +144,7 @@ mod test { .unwrap(); let mut store_completed_migration = connection - .insert_bound::<(&str, usize, String)>(indoc! {" + .exec_bound::<(&str, usize, String)>(indoc! {" INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)"}) .unwrap(); @@ -171,8 +165,7 @@ mod test { fn migrations_dont_rerun() { let connection = Connection::open_memory("migrations_dont_rerun"); - // Create migration which clears a table - let migration = Migration::new("test", &["DELETE FROM test_table"]); + // Create migration which clears a tabl // Manually create the table for that migration with a row connection @@ -197,7 +190,9 @@ mod test { ); // Run the migration verifying that the row got dropped - migration.run(&connection).unwrap(); + connection + .migrate("test", &["DELETE FROM test_table"]) + .unwrap(); assert_eq!( connection .select_row::("SELECT * FROM test_table") @@ -213,7 +208,9 @@ mod test { .unwrap(); // Run the same migration again and verify that the table was left unchanged - migration.run(&connection).unwrap(); + connection + .migrate("test", &["DELETE FROM test_table"]) + .unwrap(); assert_eq!( connection .select_row::("SELECT * FROM test_table") @@ -228,22 +225,22 @@ mod test { let connection = Connection::open_memory("changed_migration_fails"); // Create a migration with two steps and run it - Migration::new( - "test migration", - &[ - indoc! {" + connection + .migrate( + "test migration", + &[ + indoc! {" CREATE TABLE test ( col INTEGER )"}, - indoc! {" - INSERT INTO test (col) VALUES (1)"}, - ], - ) - .run(&connection) - .unwrap(); + indoc! {" + INSERT INTO test (col) VALUES (1)"}, + ], + ) + .unwrap(); // Create another migration with the same domain but different steps - let second_migration_result = Migration::new( + let second_migration_result = connection.migrate( "test migration", &[ indoc! {" @@ -253,8 +250,7 @@ mod test { indoc! {" INSERT INTO test (color) VALUES (1)"}, ], - ) - .run(&connection); + ); // Verify new migration returns error when run assert!(second_migration_result.is_err()) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index b04f5bb82f..40118dd923 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -256,11 +256,6 @@ impl<'a> Statement<'a> { } } - pub fn insert(&mut self) -> Result { - self.exec()?; - Ok(self.connection.last_insert_id()) - } - pub fn exec(&mut self) -> Result<()> { fn logic(this: &mut Statement) -> Result<()> { while this.step()? == StepResult::Row {} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index b9bb1657ea..e85ba4c51a 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -3,20 +3,23 @@ use std::{marker::PhantomData, ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::{connection, domain::Domain}; +use crate::{ + connection, + domain::{Domain, Migrator}, +}; -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, connection: Arc>, - _pd: PhantomData, + _pd: PhantomData, } -unsafe impl Send for ThreadSafeConnection {} -unsafe impl Sync for ThreadSafeConnection {} +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} -impl ThreadSafeConnection { +impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { uri: Arc::from(uri), @@ -72,7 +75,11 @@ impl Clone for ThreadSafeConnection { } } -impl Deref for ThreadSafeConnection { +// TODO: +// 1. When migration or initialization fails, move the corrupted db to a holding place and create a new one +// 2. If the new db also fails, downgrade to a shared in memory db +// 3. In either case notify the user about what went wrong +impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { @@ -91,7 +98,7 @@ impl Deref for ThreadSafeConnection { .unwrap(); } - D::migrate(&connection).expect("Migrations failed"); + M::migrate(&connection).expect("Migrations failed"); connection }) diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index f2d66a781f..98f51b970a 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -20,19 +20,6 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.exec()) } - pub fn insert<'a>(&'a self, query: &str) -> Result Result> { - let mut statement = Statement::prepare(&self, query)?; - Ok(move || statement.insert()) - } - - pub fn insert_bound<'a, B: Bind>( - &'a self, - query: &str, - ) -> Result Result> { - let mut statement = Statement::prepare(&self, query)?; - Ok(move |bindings| statement.with_bindings(bindings)?.insert()) - } - pub fn select<'a, C: Column>( &'a self, query: &str, diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 7e469e19fe..15b3b4e66e 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -34,7 +34,9 @@ use mappings::mouse::{ use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; +use terminal_container_view::TerminalContainer; use util::ResultExt; +use workspace::register_deserializable_item; use std::{ cmp::min, @@ -67,6 +69,8 @@ use lazy_static::lazy_static; pub fn init(cx: &mut MutableAppContext) { terminal_view::init(cx); terminal_container_view::init(cx); + + register_deserializable_item::(cx); } ///Scrolling is unbearably sluggish by default. Alacritty supports a configurable diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 5d5fda1206..49b6ae341f 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -5,7 +5,7 @@ use alacritty_terminal::index::Point; use dirs::home_dir; use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, - View, ViewContext, ViewHandle, + View, ViewContext, ViewHandle, WeakViewHandle, }; use util::truncate_and_trailoff; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; @@ -13,6 +13,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; +use workspace::{register_deserializable_item, Pane}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -26,6 +27,8 @@ actions!(terminal, [DeployModal]); pub fn init(cx: &mut MutableAppContext) { cx.add_action(TerminalContainer::deploy); + + register_deserializable_item::(cx); } //Make terminal view an enum, that can give you views for the error and non-error states @@ -127,7 +130,7 @@ impl TerminalContainer { TerminalContainerContent::Error(view) } }; - cx.focus(content.handle()); + // cx.focus(content.handle()); TerminalContainer { content, @@ -375,6 +378,22 @@ impl Item for TerminalContainer { ) .boxed()]) } + + fn serialized_item_kind() -> Option<&'static str> { + Some("Terminal") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + // TODO: Pull the current working directory out of the DB. + + Task::ready(Ok(cx.add_view(|cx| TerminalContainer::new(None, false, cx)))) + } } impl SearchableItem for TerminalContainer { diff --git a/crates/theme_testbench/src/theme_testbench.rs b/crates/theme_testbench/src/theme_testbench.rs index 9c7d6bdf49..cf9f03de45 100644 --- a/crates/theme_testbench/src/theme_testbench.rs +++ b/crates/theme_testbench/src/theme_testbench.rs @@ -6,7 +6,8 @@ use gpui::{ Padding, ParentElement, }, fonts::TextStyle, - Border, Element, Entity, MutableAppContext, Quad, RenderContext, View, ViewContext, + Border, Element, Entity, ModelHandle, MutableAppContext, Quad, RenderContext, Task, View, + ViewContext, ViewHandle, WeakViewHandle, }; use project::{Project, ProjectEntryId, ProjectPath}; use settings::Settings; @@ -14,13 +15,15 @@ use smallvec::SmallVec; use theme::{ColorScheme, Layer, Style, StyleSet}; use workspace::{ item::{Item, ItemEvent}, - Workspace, + register_deserializable_item, Pane, Workspace, }; actions!(theme, [DeployThemeTestbench]); pub fn init(cx: &mut MutableAppContext) { cx.add_action(ThemeTestbench::deploy); + + register_deserializable_item::(cx) } pub struct ThemeTestbench {} @@ -357,4 +360,18 @@ impl Item for ThemeTestbench { fn to_item_events(_: &Self::Event) -> Vec { Vec::new() } + + fn serialized_item_kind() -> Option<&'static str> { + Some("ThemeTestBench") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + Task::ready(Ok(cx.add_view(|_| Self {}))) + } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 553479b175..822a008eed 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -37,6 +37,7 @@ bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" lazy_static = "1.4" +env_logger = "0.9.1" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" postage = { version = "0.4.1", features = ["futures-traits"] } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 5b08b689ab..2e4fbcad6f 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,13 +137,8 @@ pub struct Dock { } impl Dock { - pub fn new( - default_item_factory: DefaultItemFactory, - position: Option, - cx: &mut ViewContext, - ) -> Self { - let position = position - .unwrap_or_else(|| DockPosition::Hidden(cx.global::().default_dock_anchor)); + pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { + let position = DockPosition::Hidden(cx.global::().default_dock_anchor); let pane = cx.add_view(|cx| Pane::new(Some(position.anchor()), cx)); pane.update(cx, |pane, cx| { @@ -175,7 +170,7 @@ impl Dock { self.position.is_visible() && self.position.anchor() == anchor } - fn set_dock_position( + pub(crate) fn set_dock_position( workspace: &mut Workspace, new_position: DockPosition, cx: &mut ViewContext, @@ -211,6 +206,7 @@ impl Dock { cx.focus(last_active_center_pane); } cx.emit(crate::Event::DockAnchorChanged); + workspace.serialize_workspace(None, cx); cx.notify(); } @@ -347,6 +343,10 @@ impl Dock { } }) } + + pub fn position(&self) -> DockPosition { + self.position + } } pub struct ToggleDockButton { diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index 215ad47e1b..d006f2fe15 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -117,15 +117,18 @@ pub trait Item: View { fn breadcrumb_location(&self) -> ToolbarItemLocation { ToolbarItemLocation::Hidden } + fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { None } fn serialized_item_kind() -> Option<&'static str>; fn deserialize( + project: ModelHandle, + workspace: WeakViewHandle, workspace_id: WorkspaceId, item_id: ItemId, - cx: &mut ViewContext, - ) -> Result; + cx: &mut ViewContext, + ) -> Task>>; } pub trait ItemHandle: 'static + fmt::Debug { @@ -181,6 +184,7 @@ pub trait ItemHandle: 'static + fmt::Debug { fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; + fn serialized_item_kind(&self) -> Option<&'static str>; } pub trait WeakItemHandle { @@ -515,6 +519,10 @@ impl ItemHandle for ViewHandle { fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { self.read(cx).breadcrumbs(theme, cx) } + + fn serialized_item_kind(&self) -> Option<&'static str> { + T::serialized_item_kind() + } } impl From> for AnyViewHandle { @@ -645,15 +653,14 @@ impl FollowableItemHandle for ViewHandle { pub(crate) mod test { use std::{any::Any, borrow::Cow, cell::Cell}; - use anyhow::anyhow; use gpui::{ elements::Empty, AppContext, Element, ElementBox, Entity, ModelHandle, RenderContext, Task, - View, ViewContext, + View, ViewContext, ViewHandle, WeakViewHandle, }; use project::{Project, ProjectEntryId, ProjectPath}; use smallvec::SmallVec; - use crate::{sidebar::SidebarItem, ItemNavHistory}; + use crate::{sidebar::SidebarItem, ItemId, ItemNavHistory, Pane, Workspace, WorkspaceId}; use super::{Item, ItemEvent}; @@ -864,11 +871,13 @@ pub(crate) mod test { } fn deserialize( - workspace_id: crate::persistence::model::WorkspaceId, - item_id: crate::persistence::model::ItemId, - cx: &mut ViewContext, - ) -> anyhow::Result { - Err(anyhow!("Cannot deserialize test item")) + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unreachable!("Cannot deserialize test item") } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 164807b24f..cc07a76596 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -2,93 +2,81 @@ pub mod model; -use std::ops::Deref; use std::path::{Path, PathBuf}; use std::sync::Arc; -use anyhow::{bail, Context, Result}; -use db::open_file_db; +use anyhow::{anyhow, bail, Result, Context}; +use db::connection; use gpui::Axis; use indoc::indoc; use lazy_static::lazy_static; -use sqlez::thread_safe_connection::ThreadSafeConnection; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; +use crate::dock::DockPosition; + use super::Workspace; use model::{ - GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceId, }; -lazy_static! { - pub static ref DB: WorkspaceDb = WorkspaceDb(open_file_db()); -} - -pub struct WorkspaceDb(ThreadSafeConnection); - -impl Deref for WorkspaceDb { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( - "workspace", - &[indoc! {" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER, -- Boolean - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); +connection!(DB: WorkspaceDb); impl Domain for Workspace { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - WORKSPACES_MIGRATION.run(&conn) + fn name() -> &'static str { + "workspace" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}] } } @@ -104,7 +92,7 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_position) = iife!({ + let (workspace_id, dock_position): (WorkspaceId, DockPosition) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" SELECT workspace_id, dock_visible, dock_anchor @@ -122,6 +110,7 @@ impl WorkspaceDb { .flatten()?; Some(SerializedWorkspace { + workspace_id: workspace_id.clone(), dock_pane: self .get_dock_pane(&workspace_id) .context("Getting dock pane") @@ -136,43 +125,47 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace>( + pub fn save_workspace( &self, - worktree_roots: &[P], - old_roots: Option<&[P]>, + old_id: Option, workspace: &SerializedWorkspace, ) { - let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", || { - if let Some(old_roots) = old_roots { - let old_id: WorkspaceId = old_roots.into(); - - self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + if let Some(old_id) = old_id { + self.exec_bound(indoc! {" + DELETE FROM pane_groups WHERE workspace_id = ?"})?(&old_id)?; + + // If collision, delete + + self.exec_bound(indoc! {" + UPDATE OR REPLACE workspaces + SET workspace_id = ?, + dock_visible = ?, + dock_anchor = ?, + timestamp = CURRENT_TIMESTAMP + WHERE workspace_id = ?"})?(( + &workspace.workspace_id, + workspace.dock_position, + &old_id, + ))?; + } else { + self.exec_bound(indoc! {" + DELETE FROM pane_groups WHERE workspace_id = ?"})?(&workspace.workspace_id)?; + self.exec_bound( + "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", + )?((&workspace.workspace_id, workspace.dock_position))?; } - - // Delete any previous workspaces with the same roots. This cascades to all - // other tables that are based on the same roots set. - // Insert new workspace into workspaces table if none were found - self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - - self.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_position))?; - + // Save center pane group and dock pane - self.save_pane_group(&workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None)?; Ok(()) }) .with_context(|| { format!( "Update workspace with roots {:?}", - worktree_roots - .iter() - .map(|p| p.as_ref()) - .collect::>() + workspace.workspace_id.paths() ) }) .log_err(); @@ -253,15 +246,19 @@ impl WorkspaceDb { bail!("Pane groups must have a SerializedPaneGroup::Group at the root") } - let (parent_id, position) = unzip_option(parent); - match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; + let (parent_id, position) = unzip_option(parent); + let group_id = self.select_row_bound::<_, i64>(indoc!{" + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) + RETURNING group_id"})? + ((workspace_id, parent_id, position, *axis))? + .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; + for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + self.save_pane_group(workspace_id, group, Some((group_id, position)))? } Ok(()) } @@ -289,10 +286,13 @@ impl WorkspaceDb { parent: Option<(GroupId, usize)>, ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - - let pane_id = self.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; + + let pane_id = self.select_row_bound::<_, i64>(indoc!{" + INSERT INTO panes(workspace_id, parent_group_id, position) + VALUES (?, ?, ?) + RETURNING pane_id"}, + )?((workspace_id, parent_id, order))? + .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items") @@ -300,15 +300,9 @@ impl WorkspaceDb { pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" - SELECT item_id, kind FROM items + SELECT kind, item_id FROM items WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) + ORDER BY position"})?(pane_id)?) } pub(crate) fn save_items( @@ -317,15 +311,11 @@ impl WorkspaceDb { pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut delete_old = self - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + let mut insert = self.exec_bound( + "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + insert((workspace_id, pane_id, position, item))?; } Ok(()) @@ -339,34 +329,102 @@ mod tests { use super::*; + #[test] + fn test_full_workspace_serialization() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + + let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4), + + ], + }; + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_group = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 7), + SerializedItem::new("Terminal", 8), + + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 9), + SerializedItem::new("Terminal", 10), + + ], + }), + ], + }; + + let workspace = SerializedWorkspace { + workspace_id: (["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Bottom), + center_group, + dock_pane, + }; + + db.save_workspace(None, &workspace); + let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); + + assert_eq!(workspace, round_trip_workspace.unwrap()); + + // Test guaranteed duplicate IDs + db.save_workspace(None, &workspace); + db.save_workspace(None, &workspace); + + let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace, round_trip_workspace.unwrap()); + + + } + #[test] fn test_workspace_assignment() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { + workspace_id: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; - let workspace_2 = SerializedWorkspace { + let mut workspace_2 = SerializedWorkspace { + workspace_id: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; - let workspace_3 = SerializedWorkspace { - dock_position: crate::dock::DockPosition::Shown(DockAnchor::Right), - center_group: Default::default(), - dock_pane: Default::default(), - }; - - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); - db.save_workspace(&["/tmp"], None, &workspace_2); - - db::write_db_to(&db, "test.db").unwrap(); + db.save_workspace(None, &workspace_1); + db.save_workspace(None, &workspace_2); // Test that paths are treated as a set assert_eq!( @@ -383,23 +441,32 @@ mod tests { assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); // Test 'mutate' case of updating a pre-existing id - db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + workspace_2.workspace_id = (["/tmp", "/tmp2"]).into(); + db.save_workspace(Some((&["/tmp"]).into()), &workspace_2); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 ); // Test other mechanism for mutating - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + let mut workspace_3 = SerializedWorkspace { + workspace_id: (&["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Right), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + + db.save_workspace(None, &workspace_3); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works + workspace_3.workspace_id = (["/tmp3", "/tmp4", "/tmp2"]).into(); db.save_workspace( - &["/tmp3", "/tmp4", "/tmp2"], - Some(&["/tmp", "/tmp2"]), + Some((&["/tmp", "/tmp2"]).into()), &workspace_3, ); assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); @@ -408,16 +475,21 @@ mod tests { .unwrap(), workspace_3 ); + + } + use crate::dock::DockPosition; use crate::persistence::model::SerializedWorkspace; use crate::persistence::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; - fn default_workspace( + fn default_workspace>( + workspace_id: &[P], dock_pane: SerializedPane, center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { + workspace_id: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, @@ -426,23 +498,23 @@ mod tests { #[test] fn test_basic_dock_pane() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db("basic_dock_pane")); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 3), ], }; - let workspace = default_workspace(dock_pane, &Default::default()); - - db.save_workspace(&["/tmp"], None, &workspace); + let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); + db.save_workspace(None, &workspace); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); @@ -467,30 +539,30 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), ], }), SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), ], }), ], }, SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), ], }), ], }; - let workspace = default_workspace(Default::default(), ¢er_pane); + let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(&["/tmp"], None, &workspace); + db.save_workspace(None, &workspace); assert_eq!(workspace.center_group, center_pane); } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 7afd186a36..adc6ea7c1a 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use anyhow::{bail, Result}; +use anyhow::Result; use gpui::Axis; @@ -16,10 +16,10 @@ use sqlez::{ use crate::dock::DockPosition; #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Arc>); +pub struct WorkspaceId(Arc>); impl WorkspaceId { - pub fn paths(self) -> Arc> { + pub fn paths(&self) -> Arc> { self.0.clone() } } @@ -52,6 +52,7 @@ impl Column for WorkspaceId { #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { + pub workspace_id: WorkspaceId, pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, @@ -90,67 +91,33 @@ pub type GroupId = i64; pub type PaneId = i64; pub type ItemId = usize; -pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, -} - -impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", - } - .bind(statement, start_index) - } -} - -impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } -} - #[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, +pub struct SerializedItem { + pub kind: Arc, + pub item_id: ItemId, } impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, + pub fn new(kind: impl AsRef, item_id: ItemId) -> Self { + Self { + kind: Arc::from(kind.as_ref()), + item_id, } } +} - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } +impl Bind for &SerializedItem { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = statement.bind(self.kind.clone(), start_index)?; + statement.bind(self.item_id, next_index) + } +} + +impl Column for SerializedItem { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (kind, next_index) = Arc::::column(statement, start_index)?; + let (item_id, next_index) = ItemId::column(statement, next_index)?; + Ok((SerializedItem { kind, item_id }, next_index)) } } @@ -187,8 +154,8 @@ mod tests { db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT + workspace_id BLOB, + dock_anchor TEXT );"}) .unwrap()() .unwrap(); diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index d6a69490a5..28623950df 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,7 +1,7 @@ use crate::{ item::ItemEvent, persistence::model::{ItemId, WorkspaceId}, - Item, ItemNavHistory, + Item, ItemNavHistory, Pane, Workspace, }; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; @@ -10,8 +10,10 @@ use futures::StreamExt; use gpui::{ elements::*, geometry::{rect::RectF, vector::vec2f}, - Entity, ModelHandle, MouseButton, RenderContext, Task, View, ViewContext, + Entity, ModelHandle, MouseButton, RenderContext, Task, View, ViewContext, ViewHandle, + WeakViewHandle, }; +use project::Project; use settings::Settings; use smallvec::SmallVec; use std::{ @@ -191,10 +193,12 @@ impl Item for SharedScreen { } fn deserialize( - workspace_id: WorkspaceId, - item_id: ItemId, - cx: &mut ViewContext, - ) -> Result { - Err(anyhow!("SharedScreen can not be deserialized")) + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unreachable!("Shared screen can not be deserialized") } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index c51979f655..3d170818e2 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -44,7 +44,8 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -use persistence::model::{ItemId, WorkspaceId}; +use persistence::model::SerializedItem; +pub use persistence::model::{ItemId, WorkspaceId}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -57,7 +58,7 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::SerializedWorkspace; +use crate::persistence::model::{SerializedPane, SerializedWorkspace}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -337,22 +338,27 @@ pub fn register_followable_item(cx: &mut MutableAppContext) { }); } -type SerializableItemBuilders = HashMap< - &'static str, - fn(WorkspaceId, ItemId, &mut ViewContext) -> Option>, +type ItemDeserializers = HashMap< + Arc, + fn( + ModelHandle, + WeakViewHandle, + WorkspaceId, + ItemId, + &mut ViewContext, + ) -> Task>>, >; pub fn register_deserializable_item(cx: &mut MutableAppContext) { - cx.update_default_global(|deserializers: &mut SerializableItemBuilders, _| { + cx.update_default_global(|deserializers: &mut ItemDeserializers, _cx| { if let Some(serialized_item_kind) = I::serialized_item_kind() { - deserializers.insert(serialized_item_kind, |workspace_id, item_id, cx| { - if let Some(v) = - cx.add_option_view(|cx| I::deserialize(workspace_id, item_id, cx).log_err()) - { - Some(Box::new(v)) - } else { - None - } - }); + deserializers.insert( + Arc::from(serialized_item_kind), + |project, workspace, workspace_id, item_id, cx| { + let task = I::deserialize(project, workspace, workspace_id, item_id, cx); + cx.foreground() + .spawn(async { Ok(Box::new(task.await?) as Box<_>) }) + }, + ); } }); } @@ -549,6 +555,8 @@ impl Workspace { } project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); + // TODO: Cache workspace_id on workspace and read from it here + this.serialize_workspace(None, cx); } project::Event::DisconnectedFromHost => { this.update_window_edited(cx); @@ -568,21 +576,9 @@ impl Workspace { .detach(); cx.focus(¢er_pane); cx.emit(Event::PaneAdded(center_pane.clone())); - let dock = Dock::new( - dock_default_factory, - serialized_workspace - .as_ref() - .map(|ws| ws.dock_position) - .clone(), - cx, - ); + let dock = Dock::new(dock_default_factory, cx); let dock_pane = dock.pane().clone(); - if let Some(serialized_workspace) = serialized_workspace { - - // Fill them in? - } - let fs = project.read(cx).fs().clone(); let user_store = project.read(cx).user_store(); let client = project.read(cx).client(); @@ -636,13 +632,13 @@ impl Workspace { let mut this = Workspace { modal: None, - weak_self: weak_handle, + weak_self: weak_handle.clone(), center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array // is used to find where focus should fallback to. As such, the order // of these two variables is important. - panes: vec![dock_pane, center_pane.clone()], + panes: vec![dock_pane.clone(), center_pane.clone()], panes_by_item: Default::default(), active_pane: center_pane.clone(), last_active_center_pane: Some(center_pane.downgrade()), @@ -655,7 +651,7 @@ impl Workspace { fs, left_sidebar, right_sidebar, - project, + project: project.clone(), leader_state: Default::default(), follower_states_by_leader: Default::default(), last_leaders_by_pane: Default::default(), @@ -663,9 +659,15 @@ impl Workspace { active_call, _observe_current_user, }; - this.project_remote_id_changed(this.project.read(cx).remote_id(), cx); + this.project_remote_id_changed(project.read(cx).remote_id(), cx); cx.defer(|this, cx| this.update_window_title(cx)); + if let Some(serialized_workspace) = serialized_workspace { + cx.defer(move |_, cx| { + Self::load_from_serialized_workspace(weak_handle, serialized_workspace, cx) + }); + } + this } @@ -1315,6 +1317,7 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); + self.serialize_workspace(None, cx); } pub fn open_path( @@ -1519,6 +1522,7 @@ impl Workspace { entry.remove(); } } + self.serialize_workspace(None, cx); } _ => {} } @@ -2250,6 +2254,140 @@ impl Workspace { _ => {} } } + + fn workspace_id(&self, cx: &AppContext) -> WorkspaceId { + self.project() + .read(cx) + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into() + } + + fn serialize_workspace(&self, old_id: Option, cx: &mut MutableAppContext) { + let dock_pane = SerializedPane { + children: self + .dock + .pane() + .read(cx) + .items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) + }) + .collect::>(), + }; + + let serialized_workspace = SerializedWorkspace { + workspace_id: self.workspace_id(cx), + dock_position: self.dock.position(), + dock_pane, + center_group: Default::default(), + }; + + cx.background() + .spawn(async move { + persistence::DB.save_workspace(old_id, &serialized_workspace); + }) + .detach(); + } + + fn load_from_serialized_workspace( + workspace: WeakViewHandle, + serialized_workspace: SerializedWorkspace, + cx: &mut MutableAppContext, + ) { + // fn process_splits( + // pane_group: SerializedPaneGroup, + // parent: Option, + // workspace: ViewHandle, + // cx: &mut AsyncAppContext, + // ) { + // match pane_group { + // SerializedPaneGroup::Group { axis, children } => { + // process_splits(pane_group, parent) + // } + // SerializedPaneGroup::Pane(pane) => { + // process_pane(pane) + // }, + // } + // } + + async fn deserialize_pane( + project: ModelHandle, + pane: SerializedPane, + pane_handle: ViewHandle, + workspace_id: WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) { + for item in pane.children { + let project = project.clone(); + let workspace_id = workspace_id.clone(); + let item_handle = pane_handle + .update(cx, |_, cx| { + if let Some(deserializer) = cx.global::().get(&item.kind) + { + deserializer( + project, + workspace.downgrade(), + workspace_id, + item.item_id, + cx, + ) + } else { + Task::ready(Err(anyhow!( + "Deserializer does not exist for item kind: {}", + item.kind + ))) + } + }) + .await + .log_err(); + + if let Some(item_handle) = item_handle { + workspace.update(cx, |workspace, cx| { + Pane::add_item( + workspace, + &pane_handle, + item_handle, + false, + false, + None, + cx, + ); + }) + } + } + } + + cx.spawn(|mut cx| async move { + if let Some(workspace) = workspace.upgrade(&cx) { + let (project, dock_pane_handle) = workspace.read_with(&cx, |workspace, _| { + (workspace.project().clone(), workspace.dock_pane().clone()) + }); + deserialize_pane( + project, + serialized_workspace.dock_pane, + dock_pane_handle, + serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; + + // Traverse the splits tree and add to things + // process_splits(serialized_workspace.center_group, None, workspace, &mut cx); + + workspace.update(&mut cx, |workspace, cx| { + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) + }); + } + }) + .detach(); + } } impl Entity for Workspace { diff --git a/crates/workspace/test.db b/crates/workspace/test.db index 7491ccde3ad6a8c23785f69b92b679e622b1e62c..9c94aa3162f548eeb541b3b23e9ccab9d09271e2 100644 GIT binary patch delta 1224 zcmZo@U}|V!njkI6!@$76!GHif6LpNGc^LHKrFi*U7}z;-8TfnneEI!(47r*(a=F4c zHhyO3YEEZk7Z(?2Y}eiVl0%nKi%S6t{QVSMTzy)YJ^F7D*O%ab;!3 z_R7uiJoSv)Ff|IewS%N!`X~GHWlsLer!aXVpZ#P}{tv92s~I>~Zx$3-$SK$p#K_33 zTlqQc8Cm@}C5@RTPt+Hld^(JwWT;yj} zVfN#MtDWS)#x{q6|1AHU&4LCK_&N2N*ckjcIb`7q*uqkaN-~otpZ2%rieuov&cBYo zjX!R)pn@MihZ+kjgC7F}Of4TfGb@81C&F+JbtYB@KSqQACzCoONQ48Xg;$n|mw|zS zi;tOs{~Z4u{!D%|erCQ4eDnCS`ONv4Hw!8}=jAfuVq^4Ulx1Xw>k?rJNiE9F%uCE+ zHsXXxGGUQqGUNbDa%gF|p6M|_oU<6BXu)?KOU{auX<>D)1;Csb4 zi?4|95dTg7jr^VbiTsxQf|~^u!uZ(LSs4A0LXCq9EPx0p4oqx}!jl{2jllGIdHu=ZTsoXwtY9Ur#hZJ$L>MO< z^9fGA!6`D?Q^t#{xs-)nTv?g1-EgxtPd$p^flGF? zpn@=$V0Qu|t31ba!}9#1?BasN?2xB8`MEy77(#}Nfq{XE{}2QJA^zK&1r4_Gvq~_VFoF$aWl~}G z;{@rMvlmIvXP^gC8dc#2OYR27%3j2B-KZ MPB35tISs)D01p0r!vFvP From 75d3d46b1b7582c59637c642a0f889978e474399 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 17 Nov 2022 12:43:16 -0800 Subject: [PATCH 158/240] wip serialize editor --- crates/editor/src/editor.rs | 1 + crates/editor/src/items.rs | 34 +++++++++++++++++++++++++++------ crates/terminal/src/terminal.rs | 2 -- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index ce810bab0c..d1ccc5e8f3 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -373,6 +373,7 @@ pub fn init(cx: &mut MutableAppContext) { workspace::register_project_item::(cx); workspace::register_followable_item::(cx); + workspace::register_deserializable_item::(cx); } trait InvalidationRegion { diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index ae9bbd5748..0050122948 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,9 +1,9 @@ use crate::{ display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, - MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, + movement::surrounding_word, Anchor, Autoscroll, Editor, EditorMode, Event, ExcerptId, + MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, }; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use futures::FutureExt; use gpui::{ elements::*, geometry::vector::vec2f, AppContext, Entity, ModelHandle, MutableAppContext, @@ -558,14 +558,36 @@ impl Item for Editor { } fn deserialize( - _project: ModelHandle, + project: ModelHandle, _workspace: WeakViewHandle, _workspace_id: WorkspaceId, _item_id: ItemId, - _cx: &mut ViewContext, + cx: &mut ViewContext, ) -> Task>> { // Look up the path with this key associated, create a self with that path - unimplemented!() + let path = Path::new("."); + if let Some(project_item) = project.update(cx, |project, cx| { + let (worktree, path) = project.find_local_worktree(path, cx)?; + let project_path = ProjectPath { + worktree_id: worktree.read(cx).id(), + path: path.into(), + }; + + Some(project.open_path(project_path, cx)) + }) { + cx.spawn(|pane, mut cx| async move { + let (_, project_item) = project_item.await?; + let buffer = project_item + .downcast::() + .context("Project item at stored path was not a buffer")?; + + Ok(cx.update(|cx| { + cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) + })) + }) + } else { + Task::ready(Err(anyhow!("Could not load file from stored path"))) + } } } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 15b3b4e66e..66a64903d3 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -69,8 +69,6 @@ use lazy_static::lazy_static; pub fn init(cx: &mut MutableAppContext) { terminal_view::init(cx); terminal_container_view::init(cx); - - register_deserializable_item::(cx); } ///Scrolling is unbearably sluggish by default. Alacritty supports a configurable From 6530658c3ec202fcc958349f3e5fb4cf4fd1f95a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 14:20:52 -0800 Subject: [PATCH 159/240] Added center group deserialization --- Cargo.lock | 16 +- crates/db/src/db.rs | 20 ++- crates/db/src/kvp.rs | 2 +- crates/editor/src/items.rs | 3 +- crates/sqlez/src/connection.rs | 23 +-- crates/sqlez/src/migrations.rs | 8 +- crates/sqlez/src/savepoint.rs | 2 +- crates/sqlez/src/statement.rs | 6 +- crates/sqlez/src/thread_safe_connection.rs | 15 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/pane_group.rs | 14 +- crates/workspace/src/persistence.rs | 44 ++++-- crates/workspace/src/persistence/model.rs | 88 ++++++++++- crates/workspace/src/workspace.rs | 171 ++++++++++----------- 14 files changed, 264 insertions(+), 149 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4df5a9ab9..d53e91aa71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -327,6 +327,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -943,7 +954,7 @@ name = "client" version = "0.1.0" dependencies = [ "anyhow", - "async-recursion", + "async-recursion 0.3.2", "async-tungstenite", "collections", "db", @@ -7624,6 +7635,7 @@ name = "workspace" version = "0.1.0" dependencies = [ "anyhow", + "async-recursion 1.0.0", "bincode", "call", "client", @@ -7697,7 +7709,7 @@ dependencies = [ "anyhow", "assets", "async-compression", - "async-recursion", + "async-recursion 0.3.2", "async-trait", "auto_update", "backtrace", diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9bb4286b83..39891718fb 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,6 @@ pub mod kvp; -use std::fs::create_dir_all; +use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; #[cfg(any(test, feature = "test-support"))] @@ -10,7 +10,7 @@ use indoc::indoc; use sqlez::connection::Connection; use sqlez::domain::{Domain, Migrator}; use sqlez::thread_safe_connection::ThreadSafeConnection; -use util::channel::RELEASE_CHANNEL_NAME; +use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; const INITIALIZE_QUERY: &'static str = indoc! {" @@ -26,18 +26,18 @@ pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); - // if *RELEASE_CHANNEL == ReleaseChannel::Dev { - // remove_dir_all(¤t_db_dir).ok(); - // } + if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() { + remove_dir_all(¤t_db_dir).ok(); + } create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + ThreadSafeConnection::new(Some(db_path.to_string_lossy().as_ref()), true) .with_initialize_query(INITIALIZE_QUERY) } -pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { +pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnection { ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } @@ -65,7 +65,11 @@ macro_rules! connection { } lazy_static! { - pub static ref $id: $t = $t(::db::open_file_db()); + pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { + ::db::open_memory_db(None) + } else { + ::db::open_file_db() + }); } }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index dd82c17615..3cdcd99016 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -61,7 +61,7 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = KeyValueStore(crate::open_memory_db("test_kvp")); + let db = KeyValueStore(crate::open_memory_db(Some("test_kvp"))); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0050122948..60ac8d5278 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -554,7 +554,8 @@ impl Item for Editor { } fn serialized_item_kind() -> Option<&'static str> { - Some("Editor") + // TODO: Some("Editor") + None } fn deserialize( diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 8ab1e345d8..1eaeb090e1 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -42,11 +42,16 @@ impl Connection { /// Attempts to open the database at uri. If it fails, a shared memory db will be opened /// instead. pub fn open_file(uri: &str) -> Self { - Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(uri)) + Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(Some(uri))) } - pub fn open_memory(uri: &str) -> Self { - let in_memory_path = format!("file:{}?mode=memory&cache=shared", uri); + pub fn open_memory(uri: Option<&str>) -> Self { + let in_memory_path = if let Some(uri) = uri { + format!("file:{}?mode=memory&cache=shared", uri) + } else { + ":memory:".to_string() + }; + Self::open(&in_memory_path, false).expect("Could not create fallback in memory db") } @@ -110,7 +115,7 @@ mod test { #[test] fn string_round_trips() -> Result<()> { - let connection = Connection::open_memory("string_round_trips"); + let connection = Connection::open_memory(Some("string_round_trips")); connection .exec(indoc! {" CREATE TABLE text ( @@ -136,7 +141,7 @@ mod test { #[test] fn tuple_round_trips() { - let connection = Connection::open_memory("tuple_round_trips"); + let connection = Connection::open_memory(Some("tuple_round_trips")); connection .exec(indoc! {" CREATE TABLE test ( @@ -170,7 +175,7 @@ mod test { #[test] fn bool_round_trips() { - let connection = Connection::open_memory("bool_round_trips"); + let connection = Connection::open_memory(Some("bool_round_trips")); connection .exec(indoc! {" CREATE TABLE bools ( @@ -196,7 +201,7 @@ mod test { #[test] fn backup_works() { - let connection1 = Connection::open_memory("backup_works"); + let connection1 = Connection::open_memory(Some("backup_works")); connection1 .exec(indoc! {" CREATE TABLE blobs ( @@ -211,7 +216,7 @@ mod test { .unwrap(); // Backup connection1 to connection2 - let connection2 = Connection::open_memory("backup_works_other"); + let connection2 = Connection::open_memory(Some("backup_works_other")); connection1.backup_main(&connection2).unwrap(); // Delete the added blob and verify its deleted on the other side @@ -224,7 +229,7 @@ mod test { #[test] fn multi_step_statement_works() { - let connection = Connection::open_memory("multi_step_statement_works"); + let connection = Connection::open_memory(Some("multi_step_statement_works")); connection .exec(indoc! {" diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 1f4b3f0f7c..23af04bbf4 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -62,7 +62,7 @@ mod test { #[test] fn test_migrations_are_added_to_table() { - let connection = Connection::open_memory("migrations_are_added_to_table"); + let connection = Connection::open_memory(Some("migrations_are_added_to_table")); // Create first migration with a single step and run it connection @@ -131,7 +131,7 @@ mod test { #[test] fn test_migration_setup_works() { - let connection = Connection::open_memory("migration_setup_works"); + let connection = Connection::open_memory(Some("migration_setup_works")); connection .exec(indoc! {" @@ -163,7 +163,7 @@ mod test { #[test] fn migrations_dont_rerun() { - let connection = Connection::open_memory("migrations_dont_rerun"); + let connection = Connection::open_memory(Some("migrations_dont_rerun")); // Create migration which clears a tabl @@ -222,7 +222,7 @@ mod test { #[test] fn changed_migration_fails() { - let connection = Connection::open_memory("changed_migration_fails"); + let connection = Connection::open_memory(Some("changed_migration_fails")); // Create a migration with two steps and run it connection diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 9751aac51d..09c2e94148 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -59,7 +59,7 @@ mod tests { #[test] fn test_nested_savepoints() -> Result<()> { - let connection = Connection::open_memory("nested_savepoints"); + let connection = Connection::open_memory(Some("nested_savepoints")); connection .exec(indoc! {" diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 40118dd923..f0afc0e020 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -352,7 +352,7 @@ mod test { #[test] fn blob_round_trips() { - let connection1 = Connection::open_memory("blob_round_trips"); + let connection1 = Connection::open_memory(Some("blob_round_trips")); connection1 .exec(indoc! {" CREATE TABLE blobs ( @@ -369,7 +369,7 @@ mod test { assert_eq!(write.step().unwrap(), StepResult::Done); // Read the blob from the - let connection2 = Connection::open_memory("blob_round_trips"); + let connection2 = Connection::open_memory(Some("blob_round_trips")); let mut read = Statement::prepare(&connection2, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Row); assert_eq!(read.column_blob(0).unwrap(), blob); @@ -383,7 +383,7 @@ mod test { #[test] pub fn maybe_returns_options() { - let connection = Connection::open_memory("maybe_returns_options"); + let connection = Connection::open_memory(Some("maybe_returns_options")); connection .exec(indoc! {" CREATE TABLE texts ( diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index e85ba4c51a..f415c32960 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -9,7 +9,7 @@ use crate::{ }; pub struct ThreadSafeConnection { - uri: Arc, + uri: Option>, persistent: bool, initialize_query: Option<&'static str>, connection: Arc>, @@ -20,9 +20,13 @@ unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} impl ThreadSafeConnection { - pub fn new(uri: &str, persistent: bool) -> Self { + pub fn new(uri: Option<&str>, persistent: bool) -> Self { + if persistent == true && uri == None { + // This panic is securing the unwrap in open_file(), don't remove it! + panic!("Cannot create a persistent connection without a URI") + } Self { - uri: Arc::from(uri), + uri: uri.map(|str| Arc::from(str)), persistent, initialize_query: None, connection: Default::default(), @@ -41,13 +45,14 @@ impl ThreadSafeConnection { /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { - Connection::open_file(self.uri.as_ref()) + // This unwrap is secured by a panic in the constructor. Be careful if you remove it! + Connection::open_file(self.uri.as_ref().unwrap()) } /// Opens a shared memory connection using the file path as the identifier. This unwraps /// as we expect it always to succeed fn open_shared_memory(&self) -> Connection { - Connection::open_memory(self.uri.as_ref()) + Connection::open_memory(self.uri.as_ref().map(|str| str.deref())) } // Open a new connection for the given domain, leaving this diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 822a008eed..0ce3bc220b 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -33,6 +33,7 @@ settings = { path = "../settings" } sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } +async-recursion = "1.0.0" bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" diff --git a/crates/workspace/src/pane_group.rs b/crates/workspace/src/pane_group.rs index 6442429b0d..b8e73d6f6f 100644 --- a/crates/workspace/src/pane_group.rs +++ b/crates/workspace/src/pane_group.rs @@ -13,10 +13,14 @@ use theme::Theme; #[derive(Clone, Debug, Eq, PartialEq)] pub struct PaneGroup { - root: Member, + pub(crate) root: Member, } impl PaneGroup { + pub(crate) fn with_root(root: Member) -> Self { + Self { root } + } + pub fn new(pane: ViewHandle) -> Self { Self { root: Member::Pane(pane), @@ -85,7 +89,7 @@ impl PaneGroup { } #[derive(Clone, Debug, Eq, PartialEq)] -enum Member { +pub(crate) enum Member { Axis(PaneAxis), Pane(ViewHandle), } @@ -276,9 +280,9 @@ impl Member { } #[derive(Clone, Debug, Eq, PartialEq)] -struct PaneAxis { - axis: Axis, - members: Vec, +pub(crate) struct PaneAxis { + pub axis: Axis, + pub members: Vec, } impl PaneAxis { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index cc07a76596..f7517ec8bf 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -55,8 +55,8 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane + parent_group_id INTEGER, -- NULL means that this is a dock pane + position INTEGER, -- NULL means that this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE, @@ -164,7 +164,7 @@ impl WorkspaceDb { }) .with_context(|| { format!( - "Update workspace with roots {:?}", + "Update workspace with roots {:?} failed.", workspace.workspace_id.paths() ) }) @@ -196,6 +196,17 @@ impl WorkspaceDb { .into_iter() .next() .context("No center pane group") + .map(|pane_group| { + // Rewrite the special case of the root being a leaf node + if let SerializedPaneGroup::Group { axis: Axis::Horizontal, ref children } = pane_group { + if children.len() == 1 { + if let Some(SerializedPaneGroup::Pane(pane)) = children.get(0) { + return SerializedPaneGroup::Pane(pane.clone()) + } + } + } + pane_group + }) } fn get_pane_group_children<'a>( @@ -242,9 +253,12 @@ impl WorkspaceDb { pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } + // Rewrite the root node to fit with the database + let pane_group = if parent.is_none() && matches!(pane_group, SerializedPaneGroup::Pane { .. }) { + SerializedPaneGroup::Group { axis: Axis::Horizontal, children: vec![pane_group.clone()] } + } else { + pane_group.clone() + }; match pane_group { SerializedPaneGroup::Group { axis, children } => { @@ -254,7 +268,7 @@ impl WorkspaceDb { INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) RETURNING group_id"})? - ((workspace_id, parent_id, position, *axis))? + ((workspace_id, parent_id, position, axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -262,7 +276,9 @@ impl WorkspaceDb { } Ok(()) } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + SerializedPaneGroup::Pane(pane) => { + self.save_pane(workspace_id, &pane, parent) + }, } } @@ -324,7 +340,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::open_memory_db; + use db::{open_memory_db, write_db_to}; use settings::DockAnchor; use super::*; @@ -333,7 +349,7 @@ mod tests { fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -407,7 +423,7 @@ mod tests { fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { workspace_id: (["/tmp", "/tmp2"]).into(), @@ -500,7 +516,7 @@ mod tests { fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -514,7 +530,7 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); db.save_workspace(None, &workspace); - + write_db_to(&db, "dest.db").unwrap(); let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); @@ -524,7 +540,7 @@ mod tests { fn test_simple_split() { // env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split")); + let db = WorkspaceDb(open_memory_db(Some("simple_split"))); // ----------------- // | 1,2 | 5,6 | diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index adc6ea7c1a..b4b5db5f11 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -5,15 +5,20 @@ use std::{ use anyhow::Result; -use gpui::Axis; +use async_recursion::async_recursion; +use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; +use project::Project; use settings::DockAnchor; use sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use util::ResultExt; -use crate::dock::DockPosition; +use crate::{ + dock::DockPosition, item::ItemHandle, ItemDeserializers, Member, Pane, PaneAxis, Workspace, +}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct WorkspaceId(Arc>); @@ -69,9 +74,42 @@ pub enum SerializedPaneGroup { impl Default for SerializedPaneGroup { fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], + Self::Pane(SerializedPane { + children: Vec::new(), + }) + } +} + +impl SerializedPaneGroup { + #[async_recursion(?Send)] + pub(crate) async fn deserialize( + &self, + project: &ModelHandle, + workspace_id: &WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) -> Member { + match self { + SerializedPaneGroup::Group { axis, children } => { + let mut members = Vec::new(); + for child in children { + let new_member = child + .deserialize(project, workspace_id, workspace, cx) + .await; + members.push(new_member); + } + Member::Axis(PaneAxis { + axis: *axis, + members, + }) + } + SerializedPaneGroup::Pane(serialized_pane) => { + let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); + serialized_pane + .deserialize_to(project, &pane, workspace_id, workspace, cx) + .await; + Member::Pane(pane) + } } } } @@ -85,6 +123,44 @@ impl SerializedPane { pub fn new(children: Vec) -> Self { SerializedPane { children } } + + pub async fn deserialize_to( + &self, + project: &ModelHandle, + pane_handle: &ViewHandle, + workspace_id: &WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) { + for item in self.children.iter() { + let project = project.clone(); + let workspace_id = workspace_id.clone(); + let item_handle = pane_handle + .update(cx, |_, cx| { + if let Some(deserializer) = cx.global::().get(&item.kind) { + deserializer( + project, + workspace.downgrade(), + workspace_id, + item.item_id, + cx, + ) + } else { + Task::ready(Err(anyhow::anyhow!( + "Deserializer does not exist for item kind: {}", + item.kind + ))) + } + }) + .await + .log_err(); + if let Some(item_handle) = item_handle { + workspace.update(cx, |workspace, cx| { + Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); + }) + } + } + } } pub type GroupId = i64; @@ -150,7 +226,7 @@ mod tests { #[test] fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); + let db = Connection::open_memory(Some("workspace_id_round_trips")); db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 3d170818e2..072bd80e1d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -58,7 +58,7 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::{SerializedPane, SerializedWorkspace}; +use crate::persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -2264,27 +2264,62 @@ impl Workspace { .into() } - fn serialize_workspace(&self, old_id: Option, cx: &mut MutableAppContext) { - let dock_pane = SerializedPane { - children: self - .dock - .pane() - .read(cx) - .items() - .filter_map(|item_handle| { - Some(SerializedItem { - kind: Arc::from(item_handle.serialized_item_kind()?), - item_id: item_handle.id(), + fn remove_panes(&mut self, member: Member, cx: &mut ViewContext) { + match member { + Member::Axis(PaneAxis { members, .. }) => { + for child in members.iter() { + self.remove_panes(child.clone(), cx) + } + } + Member::Pane(pane) => self.remove_pane(pane.clone(), cx), + } + } + + fn serialize_workspace(&self, old_id: Option, cx: &AppContext) { + fn serialize_pane_handle( + pane_handle: &ViewHandle, + cx: &AppContext, + ) -> SerializedPane { + SerializedPane { + children: pane_handle + .read(cx) + .items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) }) - }) - .collect::>(), - }; + .collect::>(), + } + } + + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + + fn build_serialized_pane_group( + pane_group: &Member, + cx: &AppContext, + ) -> SerializedPaneGroup { + match pane_group { + Member::Axis(PaneAxis { axis, members }) => SerializedPaneGroup::Group { + axis: *axis, + children: members + .iter() + .map(|member| build_serialized_pane_group(member, cx)) + .collect::>(), + }, + Member::Pane(pane_handle) => { + SerializedPaneGroup::Pane(serialize_pane_handle(&pane_handle, cx)) + } + } + } + let center_group = build_serialized_pane_group(&self.center.root, cx); let serialized_workspace = SerializedWorkspace { workspace_id: self.workspace_id(cx), dock_position: self.dock.position(), dock_pane, - center_group: Default::default(), + center_group, }; cx.background() @@ -2299,87 +2334,43 @@ impl Workspace { serialized_workspace: SerializedWorkspace, cx: &mut MutableAppContext, ) { - // fn process_splits( - // pane_group: SerializedPaneGroup, - // parent: Option, - // workspace: ViewHandle, - // cx: &mut AsyncAppContext, - // ) { - // match pane_group { - // SerializedPaneGroup::Group { axis, children } => { - // process_splits(pane_group, parent) - // } - // SerializedPaneGroup::Pane(pane) => { - // process_pane(pane) - // }, - // } - // } - - async fn deserialize_pane( - project: ModelHandle, - pane: SerializedPane, - pane_handle: ViewHandle, - workspace_id: WorkspaceId, - workspace: &ViewHandle, - cx: &mut AsyncAppContext, - ) { - for item in pane.children { - let project = project.clone(); - let workspace_id = workspace_id.clone(); - let item_handle = pane_handle - .update(cx, |_, cx| { - if let Some(deserializer) = cx.global::().get(&item.kind) - { - deserializer( - project, - workspace.downgrade(), - workspace_id, - item.item_id, - cx, - ) - } else { - Task::ready(Err(anyhow!( - "Deserializer does not exist for item kind: {}", - item.kind - ))) - } - }) - .await - .log_err(); - - if let Some(item_handle) = item_handle { - workspace.update(cx, |workspace, cx| { - Pane::add_item( - workspace, - &pane_handle, - item_handle, - false, - false, - None, - cx, - ); - }) - } - } - } - cx.spawn(|mut cx| async move { if let Some(workspace) = workspace.upgrade(&cx) { let (project, dock_pane_handle) = workspace.read_with(&cx, |workspace, _| { (workspace.project().clone(), workspace.dock_pane().clone()) }); - deserialize_pane( - project, - serialized_workspace.dock_pane, - dock_pane_handle, - serialized_workspace.workspace_id, - &workspace, - &mut cx, - ) - .await; + + serialized_workspace + .dock_pane + .deserialize_to( + &project, + &dock_pane_handle, + &serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; // Traverse the splits tree and add to things - // process_splits(serialized_workspace.center_group, None, workspace, &mut cx); + + let root = serialized_workspace + .center_group + .deserialize( + &project, + &serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; + + // Remove old panes from workspace panes list + workspace.update(&mut cx, |workspace, cx| { + workspace.remove_panes(workspace.center.root.clone(), cx); + + // Swap workspace center group + workspace.center = PaneGroup::with_root(root); + cx.notify(); + }); workspace.update(&mut cx, |workspace, cx| { Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) From a0cb6542ba87b201bd0108673098f62ff3fc0dee Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 16:56:17 -0800 Subject: [PATCH 160/240] Polishing workspace data structures Co-authored-by: kay@zed.dev --- crates/sqlez/src/migrations.rs | 2 +- crates/sqlez/src/statement.rs | 8 +- crates/sqlez/src/thread_safe_connection.rs | 47 ++++ crates/workspace/dest.db | Bin 0 -> 36864 bytes crates/workspace/src/persistence.rs | 288 ++++++++++++++------- crates/workspace/src/persistence/model.rs | 29 ++- crates/workspace/src/workspace.rs | 39 +-- 7 files changed, 287 insertions(+), 126 deletions(-) create mode 100644 crates/workspace/dest.db diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 23af04bbf4..d77d54095b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -58,7 +58,7 @@ impl Connection { mod test { use indoc::indoc; - use crate::connection::Connection; + use crate::{connection::Connection, thread_safe_connection::ThreadSafeConnection}; #[test] fn test_migrations_are_added_to_table() { diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f0afc0e020..164929010b 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -59,11 +59,11 @@ impl<'a> Statement<'a> { ); remaining_sql = CStr::from_ptr(remaining_sql_ptr); statement.raw_statements.push(raw_statement); - } - connection - .last_error() - .with_context(|| format!("Prepare call failed for query:\n{}", query.as_ref()))?; + connection.last_error().with_context(|| { + format!("Prepare call failed for query:\n{}", query.as_ref()) + })?; + } } Ok(statement) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index f415c32960..4ed1805407 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -109,3 +109,50 @@ impl Deref for ThreadSafeConnection { }) } } + +#[cfg(test)] +mod test { + use std::ops::Deref; + + use crate::domain::Domain; + + use super::ThreadSafeConnection; + + #[test] + #[should_panic] + fn wild_zed_lost_failure() { + enum TestWorkspace {} + impl Domain for TestWorkspace { + fn name() -> &'static str { + "workspace" + } + + fn migrations() -> &'static [&'static str] { + &[" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), + FOREIGN KEY(active_pane) REFERENCES panes(pane_id) + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + active INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "] + } + } + + let _ = ThreadSafeConnection::::new(None, false) + .with_initialize_query("PRAGMA FOREIGN_KEYS=true") + .deref(); + } +} diff --git a/crates/workspace/dest.db b/crates/workspace/dest.db new file mode 100644 index 0000000000000000000000000000000000000000..95cbdffc92f30106519872ee986be92fdb20ebfa GIT binary patch literal 36864 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCV31;9VBlmx01%%A!DV1XV&h^;G3doh z@$$DYaIk%5;P2r*&F{~>o@)jt2is?kySOxsijRiCXb6mkz-S1Jh5!vhpgEnLU0ht8 zv0b+$F)1fCvm`aQ7))?D2e~?ixGID=I{CONfQ2-;6ciM|%=pX{1y8>aS9jMS1wa1~ z1;0=q9~}iF%fC?HZ|p>;O%LAXhinAXh(U*I)&>L@}xwunC~h@b^=2arJQxaaC}340d*O zaYdF14R8SkAyh^OEQM?>)L$qzg2ak7KxBMoiY8bKR(l|(1q6BeItE38JpqcNcu+vY z9St@?Qz1Ac$kRDQlY>p%R-BO`GcP5zqPQ?8vm`Y>v9u%~%#R0YE{-<@Q=HAbZ0zEy zs*J6%;6#y}npcuq6c2J!F*2JSC5a%*KvD`w11Lse(T3z$i0cayi&FDS;?s-rOAC+` zf>RPGNh#!}Cgy<>Q(_4`=_D!?<>!|ufXoI5P61jhlcbrn$WQ=Vh$l8sjYIJpMjS(Y zU917~yru$XQi4TYlL#v~leVyf;{)XPVlcsm5(8i%B7+_|jVNHra*4?$nPsV%SqGHK zo$~W@QWNvQp@HmWWHV9x3ztC4MG9aeum&iK6ly-=f(1*9Bn!K^vNB_PB|KQ5p$ZBK zD3cWCuAZGNUX># zMk@zFvE`aqnro$?pdOZ5RFavTn4_+sub}RcUzAyupI4HYqYjRP0f#*Dl)eEg3|^(5{nTmW|WkH5QiiTI3FJE7%3wqKRG+TEVDQ>DF>cY zz{QUfQU(DlO3X{n$S(p%4p;#sPSl<9OG@%{!4VRanVwMsX1P`rB<7{0ra&SFtR3We zSONv>2N#ZL$)Y?}AtgUgy+k1+u`E>~Q30eATudrdrj~#sxg;|;wYVfPw*c%A1s7L0 z$55XT1?SM9AXmST_z+KD*WeIG-vG3F6gi4OhJq^sjL1Q%vtaqtn3+x7T@hD}0}tTh zctZpm)TibEwR4IX!SUCq501aw%=Dr}P-R|>U@@V@A3|IK5`iiCxrv#1V6Q@wT5(Bg z0X!7J0Rqh^s%fP_E$l=Do;RbTSC>{-g(GVC7fzc2c4S~@R7!3hpL!jTBkyV~! zUOc=}gVIicixxwgDRBO1I}>R@hNO0;H5V635QCb_qn*4F(8-kZ2qaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd70wF$B0kjvnp*gF;~x zkA}c#2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(fR*j8?~bz8x4Wc5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC70eA?E_W$8QG0GSXfzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!8488v>*K|6v=oqaGU#fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!3h<2#ogs;XyIV7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fngg0 wqv!t(+o&D&*k}lhhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2*5)C0LNwlO8@`> literal 0 HcmV?d00001 diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index f7517ec8bf..3f04e50461 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -26,6 +26,7 @@ use model::{ connection!(DB: WorkspaceDb); + impl Domain for Workspace { fn name() -> &'static str { "workspace" @@ -37,7 +38,9 @@ impl Domain for Workspace { workspace_id BLOB PRIMARY KEY, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; CREATE TABLE pane_groups( @@ -55,14 +58,21 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL means that this is a dock pane - position INTEGER, -- NULL means that this is a dock pane + active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ON UPDATE CASCADE ) STRICT; + CREATE TABLE center_panes( + pane_id INTEGER PRIMARY KEY, + parent_group_id INTEGER, -- NULL means that this is a root pane + position INTEGER, -- NULL means that this is a root pane + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique workspace_id BLOB NOT NULL, @@ -131,12 +141,13 @@ impl WorkspaceDb { workspace: &SerializedWorkspace, ) { self.with_savepoint("update_worktrees", || { + self.exec_bound(indoc! {" + UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; + DELETE FROM pane_groups WHERE workspace_id = ?1; + DELETE FROM panes WHERE workspace_id = ?1;"})? + (old_id.as_ref().unwrap_or(&workspace.workspace_id)).context("Clearing old panes")?; + if let Some(old_id) = old_id { - self.exec_bound(indoc! {" - DELETE FROM pane_groups WHERE workspace_id = ?"})?(&old_id)?; - - // If collision, delete - self.exec_bound(indoc! {" UPDATE OR REPLACE workspaces SET workspace_id = ?, @@ -147,18 +158,26 @@ impl WorkspaceDb { &workspace.workspace_id, workspace.dock_position, &old_id, - ))?; + )).context("Updating workspace with new worktree roots")?; } else { - self.exec_bound(indoc! {" - DELETE FROM pane_groups WHERE workspace_id = ?"})?(&workspace.workspace_id)?; self.exec_bound( "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace.workspace_id, workspace.dock_position))?; + )?((&workspace.workspace_id, workspace.dock_position)).context("Uodating workspace")?; } // Save center pane group and dock pane - self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None)?; + self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None).context("save pane group in save workspace")?; + + let dock_id = self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None, true).context("save pane in save workspace")?; + + // Complete workspace initialization + self.exec_bound(indoc! {" + UPDATE workspaces + SET dock_pane = ? + WHERE workspace_id = ?"})?(( + dock_id, + &workspace.workspace_id, + )).context("Finishing initialization with dock pane")?; Ok(()) }) @@ -196,38 +215,42 @@ impl WorkspaceDb { .into_iter() .next() .context("No center pane group") - .map(|pane_group| { - // Rewrite the special case of the root being a leaf node - if let SerializedPaneGroup::Group { axis: Axis::Horizontal, ref children } = pane_group { - if children.len() == 1 { - if let Some(SerializedPaneGroup::Pane(pane)) = children.get(0) { - return SerializedPaneGroup::Pane(pane.clone()) - } - } - } - pane_group - }) } - fn get_pane_group_children<'a>( + fn get_pane_group_children( &self, workspace_id: &WorkspaceId, group_id: Option, ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + type GroupKey<'a> = (Option, &'a WorkspaceId); + type GroupOrPane = (Option, Option, Option, Option); + self.select_bound::(indoc! {" + SELECT group_id, axis, pane_id, active + FROM (SELECT + group_id, + axis, + NULL as pane_id, + NULL as active, + position, + parent_group_id, + workspace_id + FROM pane_groups + UNION + SELECT + NULL, + NULL, + center_panes.pane_id, + panes.active as active, + position, + parent_group_id, + panes.workspace_id as workspace_id + FROM center_panes + JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position "})?((group_id, workspace_id))? .into_iter() - .map(|(group_id, axis, pane_id)| { + .map(|(group_id, axis, pane_id, active)| { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, @@ -236,10 +259,8 @@ impl WorkspaceDb { Some(group_id), )?, }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items( pane_id)?, - })) + } else if let Some((pane_id, active)) = pane_id.zip(active) { + Ok(SerializedPaneGroup::Pane(SerializedPane::new(self.get_items( pane_id)?, active))) } else { bail!("Pane Group Child was neither a pane group or a pane"); } @@ -253,22 +274,15 @@ impl WorkspaceDb { pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { - // Rewrite the root node to fit with the database - let pane_group = if parent.is_none() && matches!(pane_group, SerializedPaneGroup::Pane { .. }) { - SerializedPaneGroup::Group { axis: Axis::Horizontal, children: vec![pane_group.clone()] } - } else { - pane_group.clone() - }; - match pane_group { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); let group_id = self.select_row_bound::<_, i64>(indoc!{" - INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) - VALUES (?, ?, ?, ?) - RETURNING group_id"})? - ((workspace_id, parent_id, position, axis))? + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) + RETURNING group_id"})? + ((workspace_id, parent_id, position, *axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -277,21 +291,24 @@ impl WorkspaceDb { Ok(()) } SerializedPaneGroup::Pane(pane) => { - self.save_pane(workspace_id, &pane, parent) + self.save_pane(workspace_id, &pane, parent, false)?; + Ok(()) }, } } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + let (pane_id, active) = self.select_row_bound(indoc! {" + SELECT pane_id, active + FROM panes + WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?( workspace_id, )? .context("No dock pane for workspace")?; Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, + active )) } @@ -299,20 +316,32 @@ impl WorkspaceDb { &self, workspace_id: &WorkspaceId, pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - + parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane + dock: bool, + ) -> Result { let pane_id = self.select_row_bound::<_, i64>(indoc!{" - INSERT INTO panes(workspace_id, parent_group_id, position) - VALUES (?, ?, ?) + INSERT INTO panes(workspace_id, active) + VALUES (?, ?) RETURNING pane_id"}, - )?((workspace_id, parent_id, order))? + )?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; + + if !dock { + let (parent_id, order) = unzip_option(parent); + self.exec_bound(indoc! {" + INSERT INTO center_panes(pane_id, parent_group_id, position) + VALUES (?, ?, ?)"})?(( + pane_id, parent_id, order + ))?; + } self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") + .context("Saving items")?; + + Ok(pane_id) } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" @@ -352,6 +381,7 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 2), @@ -359,6 +389,7 @@ mod tests { SerializedItem::new("Terminal", 4), ], + active: false }; // ----------------- @@ -372,28 +403,30 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + false) + ), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 7), SerializedItem::new("Terminal", 8), - ], - }), + false, + )), ], }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 9), SerializedItem::new("Terminal", 10), ], - }), + false, + )), ], }; @@ -518,14 +551,14 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); - let dock_pane = crate::persistence::model::SerializedPane { - children: vec![ + let dock_pane = crate::persistence::model::SerializedPane::new( + vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 4), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), - ], - }; + ], false + ); let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); @@ -538,7 +571,7 @@ mod tests { #[test] fn test_simple_split() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db(Some("simple_split"))); @@ -553,33 +586,96 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], - }), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), ], }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - }), + false)), ], }; let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); db.save_workspace(None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.center_group, new_workspace.center_group); + } + + #[test] + fn test_cleanup_panes() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); + + let center_pane = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), + ], + }, + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), + ], + false)), + ], + }; + + let id = &["/tmp"]; + + let mut workspace = default_workspace(id, Default::default(), ¢er_pane); + + db.save_workspace(None, &workspace); + + workspace.center_group = SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), + ], + }; + + db.save_workspace(None, &workspace); + + let new_workspace = db.workspace_for_roots(id).unwrap(); + + assert_eq!(workspace.center_group, new_workspace.center_group); - assert_eq!(workspace.center_group, center_pane); } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index b4b5db5f11..9eca121c21 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -76,6 +76,7 @@ impl Default for SerializedPaneGroup { fn default() -> Self { Self::Pane(SerializedPane { children: Vec::new(), + active: false, }) } } @@ -88,27 +89,35 @@ impl SerializedPaneGroup { workspace_id: &WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, - ) -> Member { + ) -> (Member, Option>) { match self { SerializedPaneGroup::Group { axis, children } => { + let mut current_active_pane = None; let mut members = Vec::new(); for child in children { - let new_member = child + let (new_member, active_pane) = child .deserialize(project, workspace_id, workspace, cx) .await; members.push(new_member); + + current_active_pane = current_active_pane.or(active_pane); } - Member::Axis(PaneAxis { - axis: *axis, - members, - }) + ( + Member::Axis(PaneAxis { + axis: *axis, + members, + }), + current_active_pane, + ) } SerializedPaneGroup::Pane(serialized_pane) => { let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); + let active = serialized_pane.active; serialized_pane .deserialize_to(project, &pane, workspace_id, workspace, cx) .await; - Member::Pane(pane) + + (Member::Pane(pane.clone()), active.then(|| pane)) } } } @@ -116,12 +125,13 @@ impl SerializedPaneGroup { #[derive(Debug, PartialEq, Eq, Default, Clone)] pub struct SerializedPane { + pub(crate) active: bool, pub(crate) children: Vec, } impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } + pub fn new(children: Vec, active: bool) -> Self { + SerializedPane { children, active } } pub async fn deserialize_to( @@ -154,6 +164,7 @@ impl SerializedPane { }) .await .log_err(); + if let Some(item_handle) = item_handle { workspace.update(cx, |workspace, cx| { Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 072bd80e1d..fbe21be81c 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2280,18 +2280,22 @@ impl Workspace { pane_handle: &ViewHandle, cx: &AppContext, ) -> SerializedPane { - SerializedPane { - children: pane_handle - .read(cx) - .items() - .filter_map(|item_handle| { - Some(SerializedItem { - kind: Arc::from(item_handle.serialized_item_kind()?), - item_id: item_handle.id(), + let (items, active) = { + let pane = pane_handle.read(cx); + ( + pane.items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) }) - }) - .collect::>(), - } + .collect::>(), + pane.is_active(), + ) + }; + + SerializedPane::new(items, active) } let dock_pane = serialize_pane_handle(self.dock.pane(), cx); @@ -2353,7 +2357,7 @@ impl Workspace { // Traverse the splits tree and add to things - let root = serialized_workspace + let (root, active_pane) = serialized_workspace .center_group .deserialize( &project, @@ -2369,11 +2373,14 @@ impl Workspace { // Swap workspace center group workspace.center = PaneGroup::with_root(root); - cx.notify(); - }); - workspace.update(&mut cx, |workspace, cx| { - Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); + + if let Some(active_pane) = active_pane { + cx.focus(active_pane); + } + + cx.notify(); }); } }) From 992b94eef3e48242a6df24517b7eb8f1efeb6351 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:06:33 -0800 Subject: [PATCH 161/240] Rebased to main --- crates/collab/src/integration_tests.rs | 4 ++-- crates/collab/src/main.rs | 2 +- crates/editor/src/items.rs | 4 ++-- crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/migrations.rs | 2 +- crates/terminal/src/terminal.rs | 2 -- crates/workspace/src/persistence/model.rs | 4 +--- crates/workspace/src/workspace.rs | 4 ++-- 8 files changed, 10 insertions(+), 14 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index e1b242713f..386ccfbbff 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,6 +1,6 @@ use crate::{ - db::{NewUserParams, ProjectId, TestDb, UserId}, - rpc::{Executor, Server, Store}, + db::{NewUserParams, ProjectId, SqliteTestDb as TestDb, UserId}, + rpc::{Executor, Server}, AppState, }; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index d26ea1a0fa..dc98a2ee68 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -9,11 +9,11 @@ mod db_tests; #[cfg(test)] mod integration_tests; -use crate::db::{Db, PostgresDb}; use crate::rpc::ResultExt as _; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; +use db::DefaultDb as Db; use serde::Deserialize; use std::{ env::args, diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 60ac8d5278..5d900cd942 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,7 +1,7 @@ use crate::{ display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, EditorMode, Event, ExcerptId, - MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, + movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, + MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, }; use anyhow::{anyhow, Context, Result}; use futures::FutureExt; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 4b39f94638..acac2eff4c 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -24,7 +24,7 @@ impl Domain for Editor { } impl EditorDb { - fn get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { + fn _get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { unimplemented!(); } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index d77d54095b..23af04bbf4 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -58,7 +58,7 @@ impl Connection { mod test { use indoc::indoc; - use crate::{connection::Connection, thread_safe_connection::ThreadSafeConnection}; + use crate::connection::Connection; #[test] fn test_migrations_are_added_to_table() { diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 66a64903d3..7e469e19fe 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -34,9 +34,7 @@ use mappings::mouse::{ use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; -use terminal_container_view::TerminalContainer; use util::ResultExt; -use workspace::register_deserializable_item; use std::{ cmp::min, diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 9eca121c21..0d4aade867 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -16,9 +16,7 @@ use sqlez::{ }; use util::ResultExt; -use crate::{ - dock::DockPosition, item::ItemHandle, ItemDeserializers, Member, Pane, PaneAxis, Workspace, -}; +use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct WorkspaceId(Arc>); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index fbe21be81c..66fdd19c70 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -677,7 +677,7 @@ impl Workspace { cx: &mut MutableAppContext, ) -> Task<( ViewHandle, - Vec, Arc>>>, + Vec, anyhow::Error>>>, )> { let project_handle = Project::local( app_state.client.clone(), @@ -740,7 +740,7 @@ impl Workspace { Some( workspace .update(&mut cx, |workspace, cx| { - workspace.open_path(project_path, true, cx) + workspace.open_path(project_path, None, true, cx) }) .await, ) From 7ceb5e815e0050a14f922bc36e33d17a8622474f Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:18:23 -0800 Subject: [PATCH 162/240] workspace level integration of serialization complete! Time for item level integration.... Co-Authored-By: kay@zed.dev --- crates/workspace/src/persistence.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 3f04e50461..772e98f84b 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -211,13 +211,13 @@ impl WorkspaceDb { &self, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? + self.get_pane_group(workspace_id, None)? .into_iter() .next() .context("No center pane group") } - fn get_pane_group_children( + fn get_pane_group( &self, workspace_id: &WorkspaceId, group_id: Option, @@ -254,7 +254,7 @@ impl WorkspaceDb { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children( + children: self.get_pane_group( workspace_id, Some(group_id), )?, @@ -265,6 +265,14 @@ impl WorkspaceDb { bail!("Pane Group Child was neither a pane group or a pane"); } }) + // Filter out panes and pane groups which don't have any children or items + .filter(|pane_group| { + match pane_group { + Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + _ => true, + } + }) .collect::>() } From 9077b058a2d4286908b833442746e59f62dcf8cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:26:01 -0800 Subject: [PATCH 163/240] removed test file --- crates/workspace/dest.db | Bin 36864 -> 0 bytes crates/workspace/src/persistence.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 crates/workspace/dest.db diff --git a/crates/workspace/dest.db b/crates/workspace/dest.db deleted file mode 100644 index 95cbdffc92f30106519872ee986be92fdb20ebfa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36864 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCV31;9VBlmx01%%A!DV1XV&h^;G3doh z@$$DYaIk%5;P2r*&F{~>o@)jt2is?kySOxsijRiCXb6mkz-S1Jh5!vhpgEnLU0ht8 zv0b+$F)1fCvm`aQ7))?D2e~?ixGID=I{CONfQ2-;6ciM|%=pX{1y8>aS9jMS1wa1~ z1;0=q9~}iF%fC?HZ|p>;O%LAXhinAXh(U*I)&>L@}xwunC~h@b^=2arJQxaaC}340d*O zaYdF14R8SkAyh^OEQM?>)L$qzg2ak7KxBMoiY8bKR(l|(1q6BeItE38JpqcNcu+vY z9St@?Qz1Ac$kRDQlY>p%R-BO`GcP5zqPQ?8vm`Y>v9u%~%#R0YE{-<@Q=HAbZ0zEy zs*J6%;6#y}npcuq6c2J!F*2JSC5a%*KvD`w11Lse(T3z$i0cayi&FDS;?s-rOAC+` zf>RPGNh#!}Cgy<>Q(_4`=_D!?<>!|ufXoI5P61jhlcbrn$WQ=Vh$l8sjYIJpMjS(Y zU917~yru$XQi4TYlL#v~leVyf;{)XPVlcsm5(8i%B7+_|jVNHra*4?$nPsV%SqGHK zo$~W@QWNvQp@HmWWHV9x3ztC4MG9aeum&iK6ly-=f(1*9Bn!K^vNB_PB|KQ5p$ZBK zD3cWCuAZGNUX># zMk@zFvE`aqnro$?pdOZ5RFavTn4_+sub}RcUzAyupI4HYqYjRP0f#*Dl)eEg3|^(5{nTmW|WkH5QiiTI3FJE7%3wqKRG+TEVDQ>DF>cY zz{QUfQU(DlO3X{n$S(p%4p;#sPSl<9OG@%{!4VRanVwMsX1P`rB<7{0ra&SFtR3We zSONv>2N#ZL$)Y?}AtgUgy+k1+u`E>~Q30eATudrdrj~#sxg;|;wYVfPw*c%A1s7L0 z$55XT1?SM9AXmST_z+KD*WeIG-vG3F6gi4OhJq^sjL1Q%vtaqtn3+x7T@hD}0}tTh zctZpm)TibEwR4IX!SUCq501aw%=Dr}P-R|>U@@V@A3|IK5`iiCxrv#1V6Q@wT5(Bg z0X!7J0Rqh^s%fP_E$l=Do;RbTSC>{-g(GVC7fzc2c4S~@R7!3hpL!jTBkyV~! zUOc=}gVIicixxwgDRBO1I}>R@hNO0;H5V635QCb_qn*4F(8-kZ2qaiRF0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd70wF$B0kjvnp*gF;~x zkA}c#2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(fR*j8?~bz8x4Wc5Eu=C z(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC70eA?E_W$8QG0GSXfzc2c4S~@R7!85Z5Eu=C z(GVC7fzc2c4S~@R7!8488v>*K|6v=oqaGU#fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c z4S~@R7!3h<2#ogs;XyIV7!85Z5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fngg0 wqv!t(+o&D&*k}lhhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2By2*5)C0LNwlO8@`> diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 772e98f84b..b8beaa0e6d 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -377,7 +377,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db, write_db_to}; + use db::{open_memory_db}; use settings::DockAnchor; use super::*; @@ -571,7 +571,7 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); db.save_workspace(None, &workspace); - write_db_to(&db, "dest.db").unwrap(); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); From cb1d2cd1f2984f3c6b2e8de36ee4321785c13c11 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Fri, 18 Nov 2022 20:59:59 -0800 Subject: [PATCH 164/240] WIP serializing and deserializing editors --- crates/editor/src/editor.rs | 27 ++++++++++++++- crates/editor/src/items.rs | 25 +++++++------- crates/editor/src/persistence.rs | 31 ++++++++++++++---- crates/sqlez/src/thread_safe_connection.rs | 38 +++++++++++----------- 4 files changed, 83 insertions(+), 38 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d1ccc5e8f3..81cf7a9211 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -83,7 +83,7 @@ use theme::{DiagnosticStyle, Theme}; use util::{post_inc, ResultExt, TryFutureExt}; use workspace::{ItemNavHistory, Workspace}; -use crate::git::diff_hunk_to_display; +use crate::{git::diff_hunk_to_display, persistence::DB}; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); @@ -1137,6 +1137,31 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); + if let Some(project) = project.as_ref() { + if let Some(file) = buffer + .read(cx) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()) + .and_then(|file| file.as_local()) + { + let item_id = cx.weak_handle().id(); + let workspace_id = project + .read(cx) + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into(); + let path = file.abs_path(cx); + dbg!(&path); + + cx.background() + .spawn(async move { + DB.save_path(item_id, workspace_id, path).log_err(); + }) + .detach(); + } + } + Self::new(EditorMode::Full, buffer, project, None, cx) } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 5d900cd942..f7dcd57f42 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,8 +1,3 @@ -use crate::{ - display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, - MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, -}; use anyhow::{anyhow, Context, Result}; use futures::FutureExt; use gpui::{ @@ -29,6 +24,12 @@ use workspace::{ ItemId, ItemNavHistory, Pane, StatusItemView, ToolbarItemLocation, Workspace, WorkspaceId, }; +use crate::{ + display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, + movement::surrounding_word, persistence::DB, Anchor, Autoscroll, Editor, Event, ExcerptId, + MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, +}; + pub const MAX_TAB_TITLE_LEN: usize = 24; impl FollowableItem for Editor { @@ -554,21 +555,21 @@ impl Item for Editor { } fn serialized_item_kind() -> Option<&'static str> { - // TODO: Some("Editor") - None + Some("Editor") } fn deserialize( project: ModelHandle, _workspace: WeakViewHandle, - _workspace_id: WorkspaceId, - _item_id: ItemId, + workspace_id: WorkspaceId, + item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { - // Look up the path with this key associated, create a self with that path - let path = Path::new("."); if let Some(project_item) = project.update(cx, |project, cx| { - let (worktree, path) = project.find_local_worktree(path, cx)?; + // Look up the path with this key associated, create a self with that path + let path = DB.get_path(item_id, workspace_id).ok()?; + dbg!(&path); + let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), path: path.into(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index acac2eff4c..2c190d8608 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,11 +1,11 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; +use anyhow::{Context, Result}; use db::connection; use indoc::indoc; use lazy_static::lazy_static; -use project::WorktreeId; use sqlez::domain::Domain; -use workspace::{ItemId, Workspace}; +use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Editor; @@ -18,13 +18,32 @@ impl Domain for Editor { fn migrations() -> &'static [&'static str] { &[indoc! {" - + CREATE TABLE editors( + item_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, + path BLOB NOT NULL, + PRIMARY KEY(item_id, workspace_id) + ) STRICT; "}] } } impl EditorDb { - fn _get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { - unimplemented!(); + pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { + self.select_row_bound(indoc! {" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ?"})?((item_id, &workspace_id))? + .context("Path not found for serialized editor") + } + + pub fn save_path( + &self, + item_id: ItemId, + workspace_id: WorkspaceId, + path: PathBuf, + ) -> Result<()> { + self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)"})?((item_id, &workspace_id, &path)) } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 4ed1805407..5a5095ad77 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -129,25 +129,25 @@ mod test { fn migrations() -> &'static [&'static str] { &[" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_visible INTEGER, -- Boolean - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, - FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), - FOREIGN KEY(active_pane) REFERENCES panes(pane_id) - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - active INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE - ) STRICT; - "] + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), + FOREIGN KEY(active_pane) REFERENCES panes(pane_id) + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + active INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "] } } From a8ed95e1dcce910ec3b4bb8298885d2b67a7ea8a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 19 Nov 2022 15:14:13 -0800 Subject: [PATCH 165/240] Implementing persistence for the terminal working directory, found an issue with my current data model. :( --- Cargo.lock | 2 +- crates/db/src/db.rs | 12 ++-- crates/editor/src/persistence.rs | 7 ++- crates/terminal/Cargo.toml | 1 + crates/terminal/src/persistence.rs | 61 +++++++++++++++++++ crates/terminal/src/terminal.rs | 21 ++++++- .../terminal/src/terminal_container_view.rs | 18 ++++-- crates/workspace/Cargo.toml | 1 - crates/workspace/src/persistence.rs | 3 +- crates/workspace/src/persistence/model.rs | 8 +-- 10 files changed, 113 insertions(+), 21 deletions(-) create mode 100644 crates/terminal/src/persistence.rs diff --git a/Cargo.lock b/Cargo.lock index d53e91aa71..e887dfee66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5889,6 +5889,7 @@ dependencies = [ "anyhow", "client", "context_menu", + "db", "dirs 4.0.0", "editor", "futures 0.3.25", @@ -7659,7 +7660,6 @@ dependencies = [ "serde_json", "settings", "smallvec", - "sqlez", "theme", "util", ] diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 39891718fb..6e4e6e0619 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,11 +1,15 @@ pub mod kvp; +// Re-export indoc and sqlez so clients only need to include us +pub use indoc::indoc; +pub use lazy_static; +pub use sqlez; + use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; -use indoc::indoc; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; use sqlez::domain::{Domain, Migrator}; @@ -54,17 +58,17 @@ pub fn write_db_to>( #[macro_export] macro_rules! connection { ($id:ident: $t:ident<$d:ty>) => { - pub struct $t(::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); impl ::std::ops::Deref for $t { - type Target = ::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; fn deref(&self) -> &Self::Target { &self.0 } } - lazy_static! { + ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { ::db::open_memory_db(None) } else { diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 2c190d8608..5870bc71e5 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -3,7 +3,6 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; use db::connection; use indoc::indoc; -use lazy_static::lazy_static; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -22,7 +21,11 @@ impl Domain for Editor { item_id INTEGER NOT NULL, workspace_id BLOB NOT NULL, path BLOB NOT NULL, - PRIMARY KEY(item_id, workspace_id) + PRIMARY KEY(item_id, workspace_id), + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; "}] } diff --git a/crates/terminal/Cargo.toml b/crates/terminal/Cargo.toml index 785cf3365b..5593ee92d4 100644 --- a/crates/terminal/Cargo.toml +++ b/crates/terminal/Cargo.toml @@ -17,6 +17,7 @@ settings = { path = "../settings" } theme = { path = "../theme" } util = { path = "../util" } workspace = { path = "../workspace" } +db = { path = "../db" } alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } smallvec = { version = "1.6", features = ["union"] } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs new file mode 100644 index 0000000000..c7808b0dbf --- /dev/null +++ b/crates/terminal/src/persistence.rs @@ -0,0 +1,61 @@ +use std::path::{Path, PathBuf}; + +use db::{connection, indoc, sqlez::domain::Domain}; +use util::{iife, ResultExt}; +use workspace::{ItemId, Workspace, WorkspaceId}; + +use crate::Terminal; + +connection!(TERMINAL_CONNECTION: TerminalDb<(Workspace, Terminal)>); + +impl Domain for Terminal { + fn name() -> &'static str { + "terminal" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE terminals ( + item_id INTEGER, + workspace_id BLOB, + working_directory BLOB, + PRIMARY KEY(item_id, workspace_id), + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "}] + } +} + +impl TerminalDb { + pub fn save_working_directory( + &self, + item_id: ItemId, + workspace_id: &WorkspaceId, + working_directory: &Path, + ) { + iife!({ + self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?) + "})?((item_id, workspace_id, working_directory)) + }) + .log_err(); + } + + pub fn get_working_directory( + &self, + item_id: ItemId, + workspace_id: &WorkspaceId, + ) -> Option { + iife!({ + self.select_row_bound::<(ItemId, &WorkspaceId), PathBuf>(indoc! {" + SELECT working_directory + FROM terminals + WHERE item_id = ? workspace_id = ?"})?((item_id, workspace_id)) + }) + .log_err() + .flatten() + } +} diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 7e469e19fe..088729ff02 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -1,4 +1,5 @@ pub mod mappings; +mod persistence; pub mod terminal_container_view; pub mod terminal_element; pub mod terminal_view; @@ -32,9 +33,11 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; +use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; +use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -281,6 +284,8 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, + item_id: ItemId, + workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -385,6 +390,8 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, + workspace_id, + item_id, }; Ok(TerminalBuilder { @@ -528,6 +535,8 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, + item_id: ItemId, + workspace_id: WorkspaceId, } impl Terminal { @@ -567,7 +576,17 @@ impl Terminal { cx.emit(Event::Wakeup); if self.update_process_info() { - cx.emit(Event::TitleChanged) + cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = self.foreground_process_info { + cx.background().spawn(async move { + TERMINAL_CONNECTION.save_working_directory( + self.item_id, + &self.workspace_id, + &foreground_info.cwd, + ); + }); + } } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 49b6ae341f..2789f81676 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -1,3 +1,4 @@ +use crate::persistence::TERMINAL_CONNECTION; use crate::terminal_view::TerminalView; use crate::{Event, Terminal, TerminalBuilder, TerminalError}; @@ -13,7 +14,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, Pane}; +use workspace::{register_deserializable_item, ItemId, Pane, WorkspaceId}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -89,6 +90,8 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, + item_id: ItemId, + workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -115,6 +118,8 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), + item_id, + workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); @@ -386,13 +391,14 @@ impl Item for TerminalContainer { fn deserialize( _project: ModelHandle, _workspace: WeakViewHandle, - _workspace_id: workspace::WorkspaceId, - _item_id: workspace::ItemId, + workspace_id: workspace::WorkspaceId, + item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - // TODO: Pull the current working directory out of the DB. - - Task::ready(Ok(cx.add_view(|cx| TerminalContainer::new(None, false, cx)))) + let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); + Task::ready(Ok(cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, cx) + }))) } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 0ce3bc220b..b67ccdeeb7 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -30,7 +30,6 @@ language = { path = "../language" } menu = { path = "../menu" } project = { path = "../project" } settings = { path = "../settings" } -sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } async-recursion = "1.0.0" diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index b8beaa0e6d..372c4cafce 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -9,10 +9,9 @@ use anyhow::{anyhow, bail, Result, Context}; use db::connection; use gpui::Axis; use indoc::indoc; -use lazy_static::lazy_static; -use sqlez::domain::Domain; +use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 0d4aade867..5f046d76ee 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -8,12 +8,12 @@ use anyhow::Result; use async_recursion::async_recursion; use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; -use project::Project; -use settings::DockAnchor; -use sqlez::{ +use db::sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use project::Project; +use settings::DockAnchor; use util::ResultExt; use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; @@ -228,8 +228,8 @@ impl Column for DockPosition { #[cfg(test)] mod tests { + use db::sqlez::connection::Connection; use settings::DockAnchor; - use sqlez::connection::Connection; use super::WorkspaceId; From e659823e6c309561c276d0ba451cb6ef331484c7 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 19:19:42 -0800 Subject: [PATCH 166/240] WIP termial implementation. need some way of getting the currently valid workspace ID --- crates/db/Cargo.toml | 1 - crates/sqlez/src/lib.rs | 2 + crates/sqlez/src/typed_statements.rs | 54 +++++++++++++++++++ crates/terminal/src/persistence.rs | 46 ++++++---------- crates/terminal/src/terminal.rs | 26 ++++----- .../terminal/src/terminal_container_view.rs | 10 ++-- 6 files changed, 84 insertions(+), 55 deletions(-) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 27a11bea7b..70721c310c 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -23,7 +23,6 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } - [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } tempdir = { version = "0.3.7" } diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index ecebbd2643..c5d2658666 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,3 +1,5 @@ +pub use anyhow; + pub mod bindable; pub mod connection; pub mod domain; diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index 98f51b970a..ce289437c2 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -52,3 +52,57 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) } } + +#[macro_export] +macro_rules! exec_method { + ($id:ident(): $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result<()> { + iife!({ + self.exec($sql)?() + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + iife!({ + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + }) + } + }; +} + +#[macro_export] +macro_rules! select_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result> { + iife!({ + self.select::<$return_type>($sql)?(()) + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + iife!({ + self.exec_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + }) + } + }; +} + +#[macro_export] +macro_rules! select_row_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result> { + iife!({ + self.select_row::<$return_type>($sql)?(()) + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + iife!({ + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + }) + } + }; +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index c7808b0dbf..9c72105530 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,7 +1,10 @@ use std::path::{Path, PathBuf}; -use db::{connection, indoc, sqlez::domain::Domain}; -use util::{iife, ResultExt}; +use db::{ + connection, indoc, + sqlez::{domain::Domain, exec_method, select_row_method}, +}; +use util::iife; use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Terminal; @@ -29,33 +32,16 @@ impl Domain for Terminal { } impl TerminalDb { - pub fn save_working_directory( - &self, - item_id: ItemId, - workspace_id: &WorkspaceId, - working_directory: &Path, - ) { - iife!({ - self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?, ?, ?) - "})?((item_id, workspace_id, working_directory)) - }) - .log_err(); - } + exec_method!( + save_working_directory(item_id: ItemId, workspace_id: &WorkspaceId, working_directory: &Path): + "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?)" + ); - pub fn get_working_directory( - &self, - item_id: ItemId, - workspace_id: &WorkspaceId, - ) -> Option { - iife!({ - self.select_row_bound::<(ItemId, &WorkspaceId), PathBuf>(indoc! {" - SELECT working_directory - FROM terminals - WHERE item_id = ? workspace_id = ?"})?((item_id, workspace_id)) - }) - .log_err() - .flatten() - } + select_row_method!( + get_working_directory(item_id: ItemId, workspace_id: &WorkspaceId) -> PathBuf: + "SELECT working_directory + FROM terminals + WHERE item_id = ? workspace_id = ?" + ); } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 088729ff02..1c564afc63 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -33,11 +33,9 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; -use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; -use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -284,8 +282,6 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, - item_id: ItemId, - workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -390,8 +386,6 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, - workspace_id, - item_id, }; Ok(TerminalBuilder { @@ -535,8 +529,6 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, - item_id: ItemId, - workspace_id: WorkspaceId, } impl Terminal { @@ -578,15 +570,15 @@ impl Terminal { if self.update_process_info() { cx.emit(Event::TitleChanged); - if let Some(foreground_info) = self.foreground_process_info { - cx.background().spawn(async move { - TERMINAL_CONNECTION.save_working_directory( - self.item_id, - &self.workspace_id, - &foreground_info.cwd, - ); - }); - } + // if let Some(foreground_info) = self.foreground_process_info { + // cx.background().spawn(async move { + // TERMINAL_CONNECTION.save_working_directory( + // self.item_id, + // &self.workspace_id, + // &foreground_info.cwd, + // ); + // }); + // } } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 2789f81676..88d4862bdc 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -8,13 +8,13 @@ use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; -use util::truncate_and_trailoff; +use util::{truncate_and_trailoff, ResultExt}; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, ItemId, Pane, WorkspaceId}; +use workspace::{register_deserializable_item, Pane}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -90,8 +90,6 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, - item_id: ItemId, - workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -118,8 +116,6 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), - item_id, - workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); @@ -397,7 +393,7 @@ impl Item for TerminalContainer { ) -> Task>> { let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); Task::ready(Ok(cx.add_view(|cx| { - TerminalContainer::new(working_directory, false, cx) + TerminalContainer::new(working_directory.log_err().flatten(), false, cx) }))) } } From a47f2ca445673b3f8896be4b554cb77f4fc39892 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 22:41:10 -0800 Subject: [PATCH 167/240] Added UUID based, stable workspace ID for caching on item startup. Completed first sketch of terminal persistence. Still need to debug it though.... --- Cargo.lock | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 141 ++++++- crates/diagnostics/src/diagnostics.rs | 6 +- crates/editor/src/editor.rs | 48 +-- crates/editor/src/items.rs | 5 +- crates/editor/src/persistence.rs | 18 +- crates/search/src/project_search.rs | 4 +- crates/sqlez/src/bindable.rs | 7 + crates/sqlez/src/typed_statements.rs | 54 --- crates/terminal/src/persistence.rs | 20 +- crates/terminal/src/terminal.rs | 35 +- .../terminal/src/terminal_container_view.rs | 40 +- crates/workspace/src/dock.rs | 2 +- crates/workspace/src/item.rs | 31 +- crates/workspace/src/persistence.rs | 345 +++++++++--------- crates/workspace/src/persistence/model.rs | 49 ++- crates/workspace/src/shared_screen.rs | 10 +- crates/workspace/src/workspace.rs | 43 ++- crates/zed/src/main.rs | 4 +- 20 files changed, 501 insertions(+), 364 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e887dfee66..f4998f235a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,6 +1572,7 @@ dependencies = [ "sqlez", "tempdir", "util", + "uuid 1.2.2", ] [[package]] @@ -6834,6 +6835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom 0.2.8", + "rand 0.8.5", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 70721c310c..7e58b2e9bf 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,6 +22,7 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } +uuid = { version = "1.2.2", features = ["v4", "fast-rng"] } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6e4e6e0619..aa09dc812d 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,21 +1,26 @@ pub mod kvp; -// Re-export indoc and sqlez so clients only need to include us +// Re-export +pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; - -use std::fs::{create_dir_all, remove_dir_all}; -use std::path::Path; +use sqlez::bindable::{Bind, Column}; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; -use sqlez::domain::{Domain, Migrator}; +#[cfg(any(test, feature = "test-support"))] +use sqlez::domain::Domain; + +use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; +use std::fs::{create_dir_all, remove_dir_all}; +use std::path::Path; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; +use uuid::Uuid as RealUuid; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; @@ -25,6 +30,47 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; +#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Uuid(RealUuid); + +impl std::ops::Deref for Uuid { + type Target = RealUuid; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Bind for Uuid { + fn bind( + &self, + statement: &sqlez::statement::Statement, + start_index: i32, + ) -> anyhow::Result { + statement.bind(self.as_bytes(), start_index) + } +} + +impl Column for Uuid { + fn column( + statement: &mut sqlez::statement::Statement, + start_index: i32, + ) -> anyhow::Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((Uuid::from_bytes(blob)?, start_index + 1)) + } +} + +impl Uuid { + pub fn new() -> Self { + Uuid(RealUuid::new_v4()) + } + + fn from_bytes(bytes: &[u8]) -> anyhow::Result { + Ok(Uuid(RealUuid::from_bytes(bytes.try_into()?))) + } +} + /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -77,3 +123,88 @@ macro_rules! connection { } }; } + +#[macro_export] +macro_rules! exec_method { + ($id:ident(): $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec($sql)?() + .context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; +} + +#[macro_export] +macro_rules! select_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; +} + +#[macro_export] +macro_rules! select_row_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + + } + }; +} diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 639a108724..ef8b81ac66 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -584,7 +584,11 @@ impl Item for ProjectDiagnosticsEditor { }); } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split( + &self, + _workspace_id: workspace::WorkspaceId, + cx: &mut ViewContext, + ) -> Option where Self: Sized, { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 81cf7a9211..d66fc3e28c 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -83,7 +83,7 @@ use theme::{DiagnosticStyle, Theme}; use util::{post_inc, ResultExt, TryFutureExt}; use workspace::{ItemNavHistory, Workspace}; -use crate::{git::diff_hunk_to_display, persistence::DB}; +use crate::git::diff_hunk_to_display; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); @@ -1137,30 +1137,30 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); - if let Some(project) = project.as_ref() { - if let Some(file) = buffer - .read(cx) - .as_singleton() - .and_then(|buffer| buffer.read(cx).file()) - .and_then(|file| file.as_local()) - { - let item_id = cx.weak_handle().id(); - let workspace_id = project - .read(cx) - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect::>() - .into(); - let path = file.abs_path(cx); - dbg!(&path); + // if let Some(project) = project.as_ref() { + // if let Some(file) = buffer + // .read(cx) + // .as_singleton() + // .and_then(|buffer| buffer.read(cx).file()) + // .and_then(|file| file.as_local()) + // { + // // let item_id = cx.weak_handle().id(); + // // let workspace_id = project + // // .read(cx) + // // .visible_worktrees(cx) + // // .map(|worktree| worktree.read(cx).abs_path()) + // // .collect::>() + // // .into(); + // let path = file.abs_path(cx); + // dbg!(&path); - cx.background() - .spawn(async move { - DB.save_path(item_id, workspace_id, path).log_err(); - }) - .detach(); - } - } + // // cx.background() + // // .spawn(async move { + // // DB.save_path(item_id, workspace_id, path).log_err(); + // // }) + // // .detach(); + // } + // } Self::new(EditorMode::Full, buffer, project, None, cx) } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index f7dcd57f42..1e695d2364 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -368,7 +368,7 @@ impl Item for Editor { self.buffer.read(cx).is_singleton() } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, cx: &mut ViewContext) -> Option where Self: Sized, { @@ -561,14 +561,13 @@ impl Item for Editor { fn deserialize( project: ModelHandle, _workspace: WeakViewHandle, - workspace_id: WorkspaceId, + workspace_id: workspace::WorkspaceId, item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { if let Some(project_item) = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path let path = DB.get_path(item_id, workspace_id).ok()?; - dbg!(&path); let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 5870bc71e5..4a44a32447 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; -use db::connection; +use db::{connection, exec_method}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -35,18 +35,12 @@ impl EditorDb { pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { self.select_row_bound(indoc! {" SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"})?((item_id, &workspace_id))? + WHERE item_id = ? AND workspace_id = ?"})?((item_id, workspace_id))? .context("Path not found for serialized editor") } - pub fn save_path( - &self, - item_id: ItemId, - workspace_id: WorkspaceId, - path: PathBuf, - ) -> Result<()> { - self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" - INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)"})?((item_id, &workspace_id, &path)) - } + exec_method!(save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path): + "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)" + ); } diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 322d035870..6fa7d07d6f 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -26,7 +26,7 @@ use util::ResultExt as _; use workspace::{ item::{Item, ItemEvent, ItemHandle}, searchable::{Direction, SearchableItem, SearchableItemHandle}, - ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, + ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, WorkspaceId, }; actions!(project_search, [SearchInNew, ToggleFocus]); @@ -315,7 +315,7 @@ impl Item for ProjectSearchView { .update(cx, |editor, cx| editor.reload(project, cx)) } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, cx: &mut ViewContext) -> Option where Self: Sized, { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 1e4f0df33f..18c4acedad 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -36,6 +36,13 @@ impl Bind for &[u8] { } } +impl Bind for &[u8; C] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self.as_slice())?; + Ok(start_index + 1) + } +} + impl Bind for Vec { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_blob(start_index, self)?; diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index ce289437c2..98f51b970a 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -52,57 +52,3 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) } } - -#[macro_export] -macro_rules! exec_method { - ($id:ident(): $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result<()> { - iife!({ - self.exec($sql)?() - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - iife!({ - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) - }) - } - }; -} - -#[macro_export] -macro_rules! select_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result> { - iife!({ - self.select::<$return_type>($sql)?(()) - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - iife!({ - self.exec_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) - }) - } - }; -} - -#[macro_export] -macro_rules! select_row_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result> { - iife!({ - self.select_row::<$return_type>($sql)?(()) - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - iife!({ - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) - }) - } - }; -} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 9c72105530..8928164676 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,10 +1,7 @@ use std::path::{Path, PathBuf}; -use db::{ - connection, indoc, - sqlez::{domain::Domain, exec_method, select_row_method}, -}; -use util::iife; +use db::{connection, exec_method, indoc, select_row_method, sqlez::domain::Domain}; + use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Terminal; @@ -19,13 +16,12 @@ impl Domain for Terminal { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE terminals ( - item_id INTEGER, workspace_id BLOB, + item_id INTEGER, working_directory BLOB, - PRIMARY KEY(item_id, workspace_id), + PRIMARY KEY(workspace_id, item_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ON UPDATE CASCADE ) STRICT; "}] } @@ -33,15 +29,15 @@ impl Domain for Terminal { impl TerminalDb { exec_method!( - save_working_directory(item_id: ItemId, workspace_id: &WorkspaceId, working_directory: &Path): + save_working_directory(model_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?, ?, ?)" + VALUES (?1, ?2, ?3)" ); select_row_method!( - get_working_directory(item_id: ItemId, workspace_id: &WorkspaceId) -> PathBuf: + get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> PathBuf: "SELECT working_directory FROM terminals - WHERE item_id = ? workspace_id = ?" + WHERE item_id = ? AND workspace_id = ?" ); } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 1c564afc63..fdf16b7825 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -33,9 +33,11 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; +use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; +use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -282,6 +284,8 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, + item_id: ItemId, + workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -386,6 +390,8 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, + workspace_id, + item_id, }; Ok(TerminalBuilder { @@ -529,6 +535,8 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, + workspace_id: WorkspaceId, + item_id: ItemId, } impl Terminal { @@ -566,20 +574,6 @@ impl Terminal { } AlacTermEvent::Wakeup => { cx.emit(Event::Wakeup); - - if self.update_process_info() { - cx.emit(Event::TitleChanged); - - // if let Some(foreground_info) = self.foreground_process_info { - // cx.background().spawn(async move { - // TERMINAL_CONNECTION.save_working_directory( - // self.item_id, - // &self.workspace_id, - // &foreground_info.cwd, - // ); - // }); - // } - } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { self.events @@ -888,6 +882,19 @@ impl Terminal { if self.update_process_info() { cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = &self.foreground_process_info { + let cwd = foreground_info.cwd.clone(); + let item_id = self.item_id; + let workspace_id = self.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .save_working_directory(item_id, workspace_id, cwd.as_path()) + .log_err(); + }) + .detach(); + } } //Note that the ordering of events matters for event processing diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 88d4862bdc..fdda388642 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -1,6 +1,6 @@ use crate::persistence::TERMINAL_CONNECTION; use crate::terminal_view::TerminalView; -use crate::{Event, Terminal, TerminalBuilder, TerminalError}; +use crate::{Event, TerminalBuilder, TerminalError}; use alacritty_terminal::index::Point; use dirs::home_dir; @@ -14,7 +14,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, Pane}; +use workspace::{register_deserializable_item, Pane, WorkspaceId}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -82,7 +82,9 @@ impl TerminalContainer { .unwrap_or(WorkingDirectory::CurrentProjectDirectory); let working_directory = get_working_directory(workspace, cx, strategy); - let view = cx.add_view(|cx| TerminalContainer::new(working_directory, false, cx)); + let view = cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + }); workspace.add_item(Box::new(view), cx); } @@ -90,6 +92,7 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, + workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -116,10 +119,13 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), + cx.view_id(), + workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); let view = cx.add_view(|cx| TerminalView::from_terminal(terminal, modal, cx)); + cx.subscribe(&view, |_this, _content, event, cx| cx.emit(*event)) .detach(); TerminalContainerContent::Connected(view) @@ -139,18 +145,6 @@ impl TerminalContainer { } } - pub fn from_terminal( - terminal: ModelHandle, - modal: bool, - cx: &mut ViewContext, - ) -> Self { - let connected_view = cx.add_view(|cx| TerminalView::from_terminal(terminal, modal, cx)); - TerminalContainer { - content: TerminalContainerContent::Connected(connected_view), - associated_directory: None, - } - } - fn connected(&self) -> Option> { match &self.content { TerminalContainerContent::Connected(vh) => Some(vh.clone()), @@ -278,13 +272,18 @@ impl Item for TerminalContainer { .boxed() } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option { + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut ViewContext, + ) -> Option { //From what I can tell, there's no way to tell the current working //Directory of the terminal from outside the shell. There might be //solutions to this, but they are non-trivial and require more IPC Some(TerminalContainer::new( self.associated_directory.clone(), false, + workspace_id, cx, )) } @@ -391,9 +390,14 @@ impl Item for TerminalContainer { item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); + let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, workspace_id); Task::ready(Ok(cx.add_view(|cx| { - TerminalContainer::new(working_directory.log_err().flatten(), false, cx) + TerminalContainer::new( + working_directory.log_err().flatten(), + false, + workspace_id, + cx, + ) }))) } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 2e4fbcad6f..fb28571172 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -206,7 +206,7 @@ impl Dock { cx.focus(last_active_center_pane); } cx.emit(crate::Event::DockAnchorChanged); - workspace.serialize_workspace(None, cx); + workspace.serialize_workspace(cx); cx.notify(); } diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index d006f2fe15..b990ba20a2 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -22,11 +22,8 @@ use theme::Theme; use util::ResultExt; use crate::{ - pane, - persistence::model::{ItemId, WorkspaceId}, - searchable::SearchableItemHandle, - DelayedDebouncedEditAction, FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, - Workspace, + pane, persistence::model::ItemId, searchable::SearchableItemHandle, DelayedDebouncedEditAction, + FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, Workspace, WorkspaceId, }; #[derive(Eq, PartialEq, Hash)] @@ -52,7 +49,7 @@ pub trait Item: View { fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; fn is_singleton(&self, cx: &AppContext) -> bool; fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); - fn clone_on_split(&self, _: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, _: &mut ViewContext) -> Option where Self: Sized, { @@ -121,7 +118,9 @@ pub trait Item: View { fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { None } + fn serialized_item_kind() -> Option<&'static str>; + fn deserialize( project: ModelHandle, workspace: WeakViewHandle, @@ -144,7 +143,11 @@ pub trait ItemHandle: 'static + fmt::Debug { fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; fn is_singleton(&self, cx: &AppContext) -> bool; fn boxed_clone(&self) -> Box; - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut MutableAppContext, + ) -> Option>; fn added_to_pane( &self, workspace: &mut Workspace, @@ -246,9 +249,13 @@ impl ItemHandle for ViewHandle { Box::new(self.clone()) } - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut MutableAppContext, + ) -> Option> { self.update(cx, |item, cx| { - cx.add_option_view(|cx| item.clone_on_split(cx)) + cx.add_option_view(|cx| item.clone_on_split(workspace_id, cx)) }) .map(|handle| Box::new(handle) as Box) } @@ -812,7 +819,11 @@ pub(crate) mod test { self.push_to_nav_history(cx); } - fn clone_on_split(&self, _: &mut ViewContext) -> Option + fn clone_on_split( + &self, + _workspace_id: WorkspaceId, + _: &mut ViewContext, + ) -> Option where Self: Sized, { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 372c4cafce..bd59afd497 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -2,39 +2,38 @@ pub mod model; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::path::Path; -use anyhow::{anyhow, bail, Result, Context}; +use anyhow::{anyhow, bail, Context, Result}; use db::connection; use gpui::Axis; use indoc::indoc; - use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; +use crate::WorkspaceId; use super::Workspace; use model::{ - GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, - SerializedWorkspace, WorkspaceId, + GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, + WorkspaceLocation, }; connection!(DB: WorkspaceDb); - impl Domain for Workspace { fn name() -> &'static str { "workspace" } - + fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE workspaces( workspace_id BLOB PRIMARY KEY, + workspace_location BLOB NOT NULL UNIQUE, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -97,21 +96,25 @@ impl WorkspaceDb { &self, worktree_roots: &[P], ) -> Option { - let workspace_id: WorkspaceId = worktree_roots.into(); + let workspace_location: WorkspaceLocation = worktree_roots.into(); // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_position): (WorkspaceId, DockPosition) = iife!({ + let (workspace_id, workspace_location, dock_position): ( + WorkspaceId, + WorkspaceLocation, + DockPosition, + ) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" - SELECT workspace_id, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { self.select_row_bound(indoc! {" - SELECT workspace_id, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces - WHERE workspace_id = ?"})?(&workspace_id)? + WHERE workspace_location = ?"})?(&workspace_location)? } .context("No workspaces found") }) @@ -119,13 +122,14 @@ impl WorkspaceDb { .flatten()?; Some(SerializedWorkspace { - workspace_id: workspace_id.clone(), + id: workspace_id, + location: workspace_location.clone(), dock_pane: self - .get_dock_pane(&workspace_id) + .get_dock_pane(workspace_id) .context("Getting dock pane") .log_err()?, center_group: self - .get_center_pane_group(&workspace_id) + .get_center_pane_group(workspace_id) .context("Getting center group") .log_err()?, dock_position, @@ -134,72 +138,61 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace( - &self, - old_id: Option, - workspace: &SerializedWorkspace, - ) { + pub fn save_workspace(&self, workspace: &SerializedWorkspace) { self.with_savepoint("update_worktrees", || { + // Clear out panes and pane_groups self.exec_bound(indoc! {" UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})? - (old_id.as_ref().unwrap_or(&workspace.workspace_id)).context("Clearing old panes")?; - - if let Some(old_id) = old_id { - self.exec_bound(indoc! {" - UPDATE OR REPLACE workspaces - SET workspace_id = ?, - dock_visible = ?, - dock_anchor = ?, - timestamp = CURRENT_TIMESTAMP - WHERE workspace_id = ?"})?(( - &workspace.workspace_id, - workspace.dock_position, - &old_id, - )).context("Updating workspace with new worktree roots")?; - } else { - self.exec_bound( - "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace.workspace_id, workspace.dock_position)).context("Uodating workspace")?; - } - + DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + .context("Clearing old panes")?; + + // Update or insert + self.exec_bound(indoc! { + "INSERT OR REPLACE INTO + workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) + VALUES + (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP)" + })?((workspace.id, &workspace.location, workspace.dock_position)) + .context("Updating workspace")?; + // Save center pane group and dock pane - self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None).context("save pane group in save workspace")?; - - let dock_id = self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None, true).context("save pane in save workspace")?; - + self.save_pane_group(workspace.id, &workspace.center_group, None) + .context("save pane group in save workspace")?; + + let dock_id = self + .save_pane(workspace.id, &workspace.dock_pane, None, true) + .context("save pane in save workspace")?; + // Complete workspace initialization self.exec_bound(indoc! {" UPDATE workspaces SET dock_pane = ? - WHERE workspace_id = ?"})?(( - dock_id, - &workspace.workspace_id, - )).context("Finishing initialization with dock pane")?; + WHERE workspace_id = ?"})?((dock_id, workspace.id)) + .context("Finishing initialization with dock pane")?; Ok(()) }) .with_context(|| { format!( - "Update workspace with roots {:?} failed.", - workspace.workspace_id.paths() + "Update workspace with roots {:?} and id {:?} failed.", + workspace.location.paths(), + workspace.id ) }) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec>> { + pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.select_bound::( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + self.select_bound::( + "SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?", )?(limit)? .into_iter() - .map(|id| id.paths()) - .collect::>>>(), + .collect::>(), ) }) .log_err() @@ -208,7 +201,7 @@ impl WorkspaceDb { pub(crate) fn get_center_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, ) -> Result { self.get_pane_group(workspace_id, None)? .into_iter() @@ -218,10 +211,10 @@ impl WorkspaceDb { fn get_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, group_id: Option, ) -> Result> { - type GroupKey<'a> = (Option, &'a WorkspaceId); + type GroupKey = (Option, WorkspaceId); type GroupOrPane = (Option, Option, Option, Option); self.select_bound::(indoc! {" SELECT group_id, axis, pane_id, active @@ -253,31 +246,29 @@ impl WorkspaceDb { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group( - workspace_id, - Some(group_id), - )?, + children: self.get_pane_group(workspace_id, Some(group_id))?, }) } else if let Some((pane_id, active)) = pane_id.zip(active) { - Ok(SerializedPaneGroup::Pane(SerializedPane::new(self.get_items( pane_id)?, active))) + Ok(SerializedPaneGroup::Pane(SerializedPane::new( + self.get_items(pane_id)?, + active, + ))) } else { bail!("Pane Group Child was neither a pane group or a pane"); } }) // Filter out panes and pane groups which don't have any children or items - .filter(|pane_group| { - match pane_group { - Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), - Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), - _ => true, - } + .filter(|pane_group| match pane_group { + Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + _ => true, }) .collect::>() } pub(crate) fn save_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { @@ -285,26 +276,31 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = self.select_row_bound::<_, i64>(indoc!{" + let group_id = self.select_row_bound::<_, i64>(indoc! {" INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) - RETURNING group_id"})? - ((workspace_id, parent_id, position, *axis))? - .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; - + RETURNING group_id"})?(( + workspace_id, + parent_id, + position, + *axis, + ))? + .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; + for (position, group) in children.iter().enumerate() { self.save_pane_group(workspace_id, group, Some((group_id, position)))? } + Ok(()) } SerializedPaneGroup::Pane(pane) => { self.save_pane(workspace_id, &pane, parent, false)?; Ok(()) - }, + } } } - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + pub(crate) fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { let (pane_id, active) = self.select_row_bound(indoc! {" SELECT pane_id, active FROM panes @@ -315,40 +311,35 @@ impl WorkspaceDb { Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, - active + active, )) } pub(crate) fn save_pane( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = self.select_row_bound::<_, i64>(indoc!{" + let pane_id = self.select_row_bound::<_, i64>(indoc! {" INSERT INTO panes(workspace_id, active) VALUES (?, ?) - RETURNING pane_id"}, - )?((workspace_id, pane.active))? + RETURNING pane_id"})?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; - + if !dock { let (parent_id, order) = unzip_option(parent); self.exec_bound(indoc! {" INSERT INTO center_panes(pane_id, parent_group_id, position) - VALUES (?, ?, ?)"})?(( - pane_id, parent_id, order - ))?; + VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; } self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items")?; - + Ok(pane_id) } - - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" @@ -359,7 +350,7 @@ impl WorkspaceDb { pub(crate) fn save_items( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { @@ -376,7 +367,8 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db}; + + use db::{open_memory_db, Uuid}; use settings::DockAnchor; use super::*; @@ -388,15 +380,13 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { - children: vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), SerializedItem::new("Terminal", 4), - ], - active: false + active: false, }; // ----------------- @@ -415,8 +405,8 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false) - ), + false, + )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ SerializedItem::new("Terminal", 7), @@ -430,7 +420,6 @@ mod tests { vec![ SerializedItem::new("Terminal", 9), SerializedItem::new("Terminal", 10), - ], false, )), @@ -438,25 +427,24 @@ mod tests { }; let workspace = SerializedWorkspace { - workspace_id: (["/tmp", "/tmp2"]).into(), - dock_position: DockPosition::Shown(DockAnchor::Bottom), + id: Uuid::new(), + location: (["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, }; - - db.save_workspace(None, &workspace); + + db.save_workspace(&workspace); let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); - + assert_eq!(workspace, round_trip_workspace.unwrap()); // Test guaranteed duplicate IDs - db.save_workspace(None, &workspace); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + db.save_workspace(&workspace); + let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace, round_trip_workspace.unwrap()); - - } #[test] @@ -466,21 +454,23 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { - workspace_id: (["/tmp", "/tmp2"]).into(), + id: WorkspaceId::new(), + location: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; let mut workspace_2 = SerializedWorkspace { - workspace_id: (["/tmp"]).into(), + id: WorkspaceId::new(), + location: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; - db.save_workspace(None, &workspace_1); - db.save_workspace(None, &workspace_2); + db.save_workspace(&workspace_1); + db.save_workspace(&workspace_2); // Test that paths are treated as a set assert_eq!( @@ -497,8 +487,9 @@ mod tests { assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); // Test 'mutate' case of updating a pre-existing id - workspace_2.workspace_id = (["/tmp", "/tmp2"]).into(); - db.save_workspace(Some((&["/tmp"]).into()), &workspace_2); + workspace_2.location = (["/tmp", "/tmp2"]).into(); + + db.save_workspace(&workspace_2); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 @@ -506,33 +497,28 @@ mod tests { // Test other mechanism for mutating let mut workspace_3 = SerializedWorkspace { - workspace_id: (&["/tmp", "/tmp2"]).into(), + id: WorkspaceId::new(), + location: (&["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), }; - - db.save_workspace(None, &workspace_3); + db.save_workspace(&workspace_3); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works - workspace_3.workspace_id = (["/tmp3", "/tmp4", "/tmp2"]).into(); - db.save_workspace( - Some((&["/tmp", "/tmp2"]).into()), - &workspace_3, - ); + workspace_3.location = (["/tmp3", "/tmp4", "/tmp2"]).into(); + db.save_workspace(&workspace_3); assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); assert_eq!( db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) .unwrap(), workspace_3 ); - - } use crate::dock::DockPosition; @@ -545,7 +531,8 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - workspace_id: workspace_id.into(), + id: WorkspaceId::new(), + location: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, @@ -564,12 +551,13 @@ mod tests { SerializedItem::new("Terminal", 4), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), - ], false + ], + false, ); let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); - db.save_workspace(None, &workspace); + db.save_workspace(&workspace); let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); @@ -593,16 +581,20 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }, SerializedPaneGroup::Pane(SerializedPane::new( @@ -610,41 +602,46 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false)), + false, + )), ], }; let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); } - + #[test] fn test_cleanup_panes() { env_logger::try_init().ok(); - + let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); - + let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, children: vec![ SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }, SerializedPaneGroup::Pane(SerializedPane::new( @@ -652,37 +649,41 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false)), + false, + )), ], }; let id = &["/tmp"]; - + let mut workspace = default_workspace(id, Default::default(), ¢er_pane); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + workspace.center_group = SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }; - - db.save_workspace(None, &workspace); - + + db.save_workspace(&workspace); + let new_workspace = db.workspace_for_roots(id).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); - } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 5f046d76ee..ff8be51406 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -16,18 +16,20 @@ use project::Project; use settings::DockAnchor; use util::ResultExt; -use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; +use crate::{ + dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace, WorkspaceId, +}; #[derive(Debug, Clone, PartialEq, Eq)] -pub struct WorkspaceId(Arc>); +pub struct WorkspaceLocation(Arc>); -impl WorkspaceId { +impl WorkspaceLocation { pub fn paths(&self) -> Arc> { self.0.clone() } } -impl, T: IntoIterator> From for WorkspaceId { +impl, T: IntoIterator> From for WorkspaceLocation { fn from(iterator: T) -> Self { let mut roots = iterator .into_iter() @@ -38,7 +40,7 @@ impl, T: IntoIterator> From for WorkspaceId { } } -impl Bind for &WorkspaceId { +impl Bind for &WorkspaceLocation { fn bind(&self, statement: &Statement, start_index: i32) -> Result { bincode::serialize(&self.0) .expect("Bincode serialization of paths should not fail") @@ -46,16 +48,20 @@ impl Bind for &WorkspaceId { } } -impl Column for WorkspaceId { +impl Column for WorkspaceLocation { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + Ok(( + WorkspaceLocation(bincode::deserialize(blob)?), + start_index + 1, + )) } } #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { - pub workspace_id: WorkspaceId, + pub id: WorkspaceId, + pub location: WorkspaceLocation, pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, @@ -70,10 +76,11 @@ pub enum SerializedPaneGroup { Pane(SerializedPane), } +#[cfg(test)] impl Default for SerializedPaneGroup { fn default() -> Self { Self::Pane(SerializedPane { - children: Vec::new(), + children: vec![SerializedItem::default()], active: false, }) } @@ -84,7 +91,7 @@ impl SerializedPaneGroup { pub(crate) async fn deserialize( &self, project: &ModelHandle, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, ) -> (Member, Option>) { @@ -136,13 +143,12 @@ impl SerializedPane { &self, project: &ModelHandle, pane_handle: &ViewHandle, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, ) { for item in self.children.iter() { let project = project.clone(); - let workspace_id = workspace_id.clone(); let item_handle = pane_handle .update(cx, |_, cx| { if let Some(deserializer) = cx.global::().get(&item.kind) { @@ -191,6 +197,16 @@ impl SerializedItem { } } +#[cfg(test)] +impl Default for SerializedItem { + fn default() -> Self { + SerializedItem { + kind: Arc::from("Terminal"), + item_id: 100000, + } + } +} + impl Bind for &SerializedItem { fn bind(&self, statement: &Statement, start_index: i32) -> Result { let next_index = statement.bind(self.kind.clone(), start_index)?; @@ -231,7 +247,7 @@ mod tests { use db::sqlez::connection::Connection; use settings::DockAnchor; - use super::WorkspaceId; + use super::WorkspaceLocation; #[test] fn test_workspace_round_trips() { @@ -245,7 +261,7 @@ mod tests { .unwrap()() .unwrap(); - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + let workspace_id: WorkspaceLocation = WorkspaceLocation::from(&["\test2", "\test1"]); db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") .unwrap()((&workspace_id, DockAnchor::Bottom)) @@ -255,7 +271,10 @@ mod tests { db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") .unwrap()() .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + Some(( + WorkspaceLocation::from(&["\test1", "\test2"]), + DockAnchor::Bottom + )) ); } } diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index 28623950df..7dee642423 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,7 +1,5 @@ use crate::{ - item::ItemEvent, - persistence::model::{ItemId, WorkspaceId}, - Item, ItemNavHistory, Pane, Workspace, + item::ItemEvent, persistence::model::ItemId, Item, ItemNavHistory, Pane, Workspace, WorkspaceId, }; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; @@ -148,7 +146,11 @@ impl Item for SharedScreen { self.nav_history = Some(history); } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option { + fn clone_on_split( + &self, + _workspace_id: WorkspaceId, + cx: &mut ViewContext, + ) -> Option { let track = self.track.upgrade()?; Some(Self::new(&track, self.peer_id, self.user.clone(), cx)) } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 66fdd19c70..4b02522496 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -26,6 +26,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; +use db::Uuid; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -45,7 +46,7 @@ use log::{error, warn}; pub use pane::*; pub use pane_group::*; use persistence::model::SerializedItem; -pub use persistence::model::{ItemId, WorkspaceId}; +pub use persistence::model::{ItemId, WorkspaceLocation}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -128,6 +129,8 @@ pub struct OpenProjectEntryInPane { project_entry: ProjectEntryId, } +pub type WorkspaceId = Uuid; + impl_internal_actions!( workspace, [ @@ -530,6 +533,7 @@ pub struct Workspace { last_leaders_by_pane: HashMap, PeerId>, window_edited: bool, active_call: Option<(ModelHandle, Vec)>, + database_id: WorkspaceId, _observe_current_user: Task<()>, } @@ -556,7 +560,7 @@ impl Workspace { project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); // TODO: Cache workspace_id on workspace and read from it here - this.serialize_workspace(None, cx); + this.serialize_workspace(cx); } project::Event::DisconnectedFromHost => { this.update_window_edited(cx); @@ -630,6 +634,12 @@ impl Workspace { active_call = Some((call, subscriptions)); } + let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { + id + } else { + WorkspaceId::new() + }; + let mut this = Workspace { modal: None, weak_self: weak_handle.clone(), @@ -657,6 +667,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, + database_id: id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); @@ -1317,7 +1328,7 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); - self.serialize_workspace(None, cx); + self.serialize_workspace(cx); } pub fn open_path( @@ -1522,7 +1533,7 @@ impl Workspace { entry.remove(); } } - self.serialize_workspace(None, cx); + self.serialize_workspace(cx); } _ => {} } @@ -1544,7 +1555,7 @@ impl Workspace { pane.read(cx).active_item().map(|item| { let new_pane = self.add_pane(cx); - if let Some(clone) = item.clone_on_split(cx.as_mut()) { + if let Some(clone) = item.clone_on_split(self.database_id(), cx.as_mut()) { Pane::add_item(self, &new_pane, clone, true, true, None, cx); } self.center.split(&pane, &new_pane, direction).unwrap(); @@ -2255,7 +2266,11 @@ impl Workspace { } } - fn workspace_id(&self, cx: &AppContext) -> WorkspaceId { + pub fn database_id(&self) -> WorkspaceId { + self.database_id + } + + fn location(&self, cx: &AppContext) -> WorkspaceLocation { self.project() .read(cx) .visible_worktrees(cx) @@ -2275,7 +2290,7 @@ impl Workspace { } } - fn serialize_workspace(&self, old_id: Option, cx: &AppContext) { + fn serialize_workspace(&self, cx: &AppContext) { fn serialize_pane_handle( pane_handle: &ViewHandle, cx: &AppContext, @@ -2320,7 +2335,8 @@ impl Workspace { let center_group = build_serialized_pane_group(&self.center.root, cx); let serialized_workspace = SerializedWorkspace { - workspace_id: self.workspace_id(cx), + id: self.database_id, + location: self.location(cx), dock_position: self.dock.position(), dock_pane, center_group, @@ -2328,7 +2344,7 @@ impl Workspace { cx.background() .spawn(async move { - persistence::DB.save_workspace(old_id, &serialized_workspace); + persistence::DB.save_workspace(&serialized_workspace); }) .detach(); } @@ -2349,7 +2365,7 @@ impl Workspace { .deserialize_to( &project, &dock_pane_handle, - &serialized_workspace.workspace_id, + serialized_workspace.id, &workspace, &mut cx, ) @@ -2359,12 +2375,7 @@ impl Workspace { let (root, active_pane) = serialized_workspace .center_group - .deserialize( - &project, - &serialized_workspace.workspace_id, - &workspace, - &mut cx, - ) + .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) .await; // Remove old panes from workspace panes list diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 53273b45d8..c95b7c4071 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -597,6 +597,8 @@ pub fn default_item_factory( let working_directory = get_working_directory(workspace, cx, strategy); - let terminal_handle = cx.add_view(|cx| TerminalContainer::new(working_directory, false, cx)); + let terminal_handle = cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + }); Box::new(terminal_handle) } From e1eff3f4cd28b335610cc6dacc8c7b73d6f1a34c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 23:44:30 -0800 Subject: [PATCH 168/240] WIP: Some bugs switching to database provided IDs, terminal titles don't reload when restored from serialized, workspace tests are no longer passing but should be easy to fix when it isn't 11:44 --- Cargo.lock | 2 - crates/db/Cargo.toml | 1 - crates/db/src/db.rs | 45 +------------------ crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/thread_safe_connection.rs | 4 +- crates/terminal/src/persistence.rs | 4 +- crates/workspace/src/persistence.rs | 50 ++++++++++++---------- crates/workspace/src/persistence/model.rs | 2 +- crates/workspace/src/workspace.rs | 7 ++- 9 files changed, 37 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4998f235a..e887dfee66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,7 +1572,6 @@ dependencies = [ "sqlez", "tempdir", "util", - "uuid 1.2.2", ] [[package]] @@ -6835,7 +6834,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom 0.2.8", - "rand 0.8.5", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 7e58b2e9bf..70721c310c 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,7 +22,6 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } -uuid = { version = "1.2.2", features = ["v4", "fast-rng"] } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index aa09dc812d..7ec4a12223 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,7 +5,6 @@ pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; -use sqlez::bindable::{Bind, Column}; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; @@ -20,7 +19,6 @@ use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; -use uuid::Uuid as RealUuid; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; @@ -30,47 +28,6 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; -#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Uuid(RealUuid); - -impl std::ops::Deref for Uuid { - type Target = RealUuid; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Bind for Uuid { - fn bind( - &self, - statement: &sqlez::statement::Statement, - start_index: i32, - ) -> anyhow::Result { - statement.bind(self.as_bytes(), start_index) - } -} - -impl Column for Uuid { - fn column( - statement: &mut sqlez::statement::Statement, - start_index: i32, - ) -> anyhow::Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((Uuid::from_bytes(blob)?, start_index + 1)) - } -} - -impl Uuid { - pub fn new() -> Self { - Uuid(RealUuid::new_v4()) - } - - fn from_bytes(bytes: &[u8]) -> anyhow::Result { - Ok(Uuid(RealUuid::from_bytes(bytes.try_into()?))) - } -} - /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -186,7 +143,7 @@ macro_rules! select_row_method { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?(()) + self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 4a44a32447..2344037384 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -19,7 +19,7 @@ impl Domain for Editor { &[indoc! {" CREATE TABLE editors( item_id INTEGER NOT NULL, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, path BLOB NOT NULL, PRIMARY KEY(item_id, workspace_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 5a5095ad77..7c5bf6388c 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -130,7 +130,7 @@ mod test { fn migrations() -> &'static [&'static str] { &[" CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, + workspace_id INTEGER PRIMARY KEY, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -141,7 +141,7 @@ mod test { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 8928164676..d624724e5c 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -16,7 +16,7 @@ impl Domain for Terminal { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE terminals ( - workspace_id BLOB, + workspace_id INTEGER, item_id INTEGER, working_directory BLOB, PRIMARY KEY(workspace_id, item_id), @@ -29,7 +29,7 @@ impl Domain for Terminal { impl TerminalDb { exec_method!( - save_working_directory(model_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): + save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) VALUES (?1, ?2, ?3)" ); diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index bd59afd497..f635744817 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::connection; +use db::{connection, select_row_method}; use gpui::Axis; use indoc::indoc; @@ -32,8 +32,8 @@ impl Domain for Workspace { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - workspace_location BLOB NOT NULL UNIQUE, + workspace_id INTEGER PRIMARY KEY, + workspace_location BLOB UNIQUE, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -43,7 +43,7 @@ impl Domain for Workspace { CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, parent_group_id INTEGER, -- NULL indicates that this is a root node position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' @@ -55,7 +55,7 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE @@ -73,7 +73,7 @@ impl Domain for Workspace { CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, @@ -149,10 +149,12 @@ impl WorkspaceDb { // Update or insert self.exec_bound(indoc! { - "INSERT OR REPLACE INTO + "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) VALUES - (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP)" + (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + ON CONFLICT DO UPDATE SET + workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" })?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; @@ -183,6 +185,11 @@ impl WorkspaceDb { .log_err(); } + select_row_method!( + next_id() -> WorkspaceId: + "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + ); + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { iife!({ @@ -199,10 +206,7 @@ impl WorkspaceDb { .unwrap_or_default() } - pub(crate) fn get_center_pane_group( - &self, - workspace_id: WorkspaceId, - ) -> Result { + fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { self.get_pane_group(workspace_id, None)? .into_iter() .next() @@ -266,7 +270,7 @@ impl WorkspaceDb { .collect::>() } - pub(crate) fn save_pane_group( + fn save_pane_group( &self, workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, @@ -300,7 +304,7 @@ impl WorkspaceDb { } } - pub(crate) fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { + fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { let (pane_id, active) = self.select_row_bound(indoc! {" SELECT pane_id, active FROM panes @@ -315,7 +319,7 @@ impl WorkspaceDb { )) } - pub(crate) fn save_pane( + fn save_pane( &self, workspace_id: WorkspaceId, pane: &SerializedPane, @@ -341,14 +345,14 @@ impl WorkspaceDb { Ok(pane_id) } - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" SELECT kind, item_id FROM items WHERE pane_id = ? ORDER BY position"})?(pane_id)?) } - pub(crate) fn save_items( + fn save_items( &self, workspace_id: WorkspaceId, pane_id: PaneId, @@ -368,7 +372,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db, Uuid}; + use db::open_memory_db; use settings::DockAnchor; use super::*; @@ -427,7 +431,7 @@ mod tests { }; let workspace = SerializedWorkspace { - id: Uuid::new(), + id: 5, location: (["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, @@ -454,7 +458,7 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 1, location: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), @@ -462,7 +466,7 @@ mod tests { }; let mut workspace_2 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 2, location: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), @@ -497,7 +501,7 @@ mod tests { // Test other mechanism for mutating let mut workspace_3 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 3, location: (&["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), @@ -531,7 +535,7 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - id: WorkspaceId::new(), + id: 4, location: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index ff8be51406..111a6904c6 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -255,7 +255,7 @@ mod tests { db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( - workspace_id BLOB, + workspace_id INTEGER, dock_anchor TEXT );"}) .unwrap()() diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 4b02522496..0a4a6c8740 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -26,7 +26,6 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::Uuid; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -45,8 +44,8 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -use persistence::model::SerializedItem; pub use persistence::model::{ItemId, WorkspaceLocation}; +use persistence::{model::SerializedItem, DB}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -129,7 +128,7 @@ pub struct OpenProjectEntryInPane { project_entry: ProjectEntryId, } -pub type WorkspaceId = Uuid; +pub type WorkspaceId = i64; impl_internal_actions!( workspace, @@ -637,7 +636,7 @@ impl Workspace { let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { id } else { - WorkspaceId::new() + DB.next_id().log_err().flatten().unwrap_or(0) }; let mut this = Workspace { From cf4c103660e396375b5a4aa090e9d4103b4afb09 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 09:30:41 -0800 Subject: [PATCH 169/240] Fixed workspace tests --- crates/workspace/src/persistence.rs | 81 ++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index f635744817..88a894a922 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -147,14 +147,19 @@ impl WorkspaceDb { DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) .context("Clearing old panes")?; + self.exec_bound(indoc! {" + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? + "})?((&workspace.location, workspace.id)) + .context("clearing out old locations")?; + // Update or insert self.exec_bound(indoc! { "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) - ON CONFLICT DO UPDATE SET - workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" + ON CONFLICT DO UPDATE SET + workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" })?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; @@ -372,11 +377,83 @@ impl WorkspaceDb { #[cfg(test)] mod tests { + use std::sync::Arc; + use db::open_memory_db; use settings::DockAnchor; use super::*; + #[test] + fn test_workspace_id_stability() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + + db.migrate( + "test_table", + &["CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"], + ) + .unwrap(); + + let mut workspace_1 = SerializedWorkspace { + id: 1, + location: (["/tmp", "/tmp2"]).into(), + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let mut workspace_2 = SerializedWorkspace { + id: 2, + location: (["/tmp"]).into(), + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + db.save_workspace(&workspace_1); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", 1)) + .unwrap(); + + db.save_workspace(&workspace_2); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-2", 2)) + .unwrap(); + + workspace_1.location = (["/tmp", "/tmp3"]).into(); + db.save_workspace(&workspace_1); + db.save_workspace(&workspace_1); + + workspace_2.dock_pane.children.push(SerializedItem { + kind: Arc::from("Test"), + item_id: 10, + }); + db.save_workspace(&workspace_2); + + let test_text_1 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(2) + .unwrap() + .unwrap(); + assert_eq!(test_text_1, "test-text-2"); + + let test_text_2 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(1) + .unwrap() + .unwrap(); + assert_eq!(test_text_2, "test-text-1"); + } + #[test] fn test_full_workspace_serialization() { env_logger::try_init().ok(); From 76c42af62aa64775d81b480dec45ae2d915ee02b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 09:47:29 -0800 Subject: [PATCH 170/240] Finished terminal working directory restoration --- crates/terminal/src/terminal.rs | 34 ++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index fdf16b7825..b5192b6876 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -574,6 +574,23 @@ impl Terminal { } AlacTermEvent::Wakeup => { cx.emit(Event::Wakeup); + + if self.update_process_info() { + cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = &self.foreground_process_info { + let cwd = foreground_info.cwd.clone(); + let item_id = self.item_id; + let workspace_id = self.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .save_working_directory(item_id, workspace_id, cwd.as_path()) + .log_err(); + }) + .detach(); + } + } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { self.events @@ -880,23 +897,6 @@ impl Terminal { return; }; - if self.update_process_info() { - cx.emit(Event::TitleChanged); - - if let Some(foreground_info) = &self.foreground_process_info { - let cwd = foreground_info.cwd.clone(); - let item_id = self.item_id; - let workspace_id = self.workspace_id; - cx.background() - .spawn(async move { - TERMINAL_CONNECTION - .save_working_directory(item_id, workspace_id, cwd.as_path()) - .log_err(); - }) - .detach(); - } - } - //Note that the ordering of events matters for event processing while let Some(e) = self.events.pop_front() { self.process_terminal_event(&e, &mut terminal, cx) From 37174f45f0f8e403e031ee683c1e1f8e6b8c1e87 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 10:38:16 -0800 Subject: [PATCH 171/240] Touched up sql macro --- crates/db/src/db.rs | 57 ++++++++++++----------------- crates/editor/src/persistence.rs | 11 +++--- crates/terminal/src/persistence.rs | 14 +++---- crates/workspace/src/persistence.rs | 47 ++++++++++++++++++++---- 4 files changed, 76 insertions(+), 53 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 7ec4a12223..20b2ac142a 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -82,36 +82,31 @@ macro_rules! connection { } #[macro_export] -macro_rules! exec_method { - ($id:ident(): $sql:literal) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { - use $crate::anyhow::Context; +macro_rules! sql_method { + ($id:ident() -> Result<()>: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; - self.exec($sql)?() - .context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - ::std::stringify!($sql), - )) - } + self.exec($sql)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } }; - ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { - use $crate::anyhow::Context; + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - ::std::stringify!($sql), - )) - } + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } }; -} - -#[macro_export] -macro_rules! select_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { + ($id:ident() -> Result>: $sql:literal) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -123,7 +118,7 @@ macro_rules! select_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -135,11 +130,7 @@ macro_rules! select_method { )) } }; -} - -#[macro_export] -macro_rules! select_row_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { + ($id:ident() -> Result>: $sql:literal) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -151,7 +142,7 @@ macro_rules! select_row_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 2344037384..b2186e2432 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; -use db::{connection, exec_method}; +use db::{connection, sql_method}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -39,8 +39,9 @@ impl EditorDb { .context("Path not found for serialized editor") } - exec_method!(save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path): - "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)" - ); + sql_method! { + save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: + "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)" + } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index d624724e5c..384dcc18e0 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::{Path, PathBuf}; -use db::{connection, exec_method, indoc, select_row_method, sqlez::domain::Domain}; +use db::{connection, indoc, sql_method, sqlez::domain::Domain}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -28,16 +28,16 @@ impl Domain for Terminal { } impl TerminalDb { - exec_method!( - save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): + sql_method! { + save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path) -> Result<()>: "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) VALUES (?1, ?2, ?3)" - ); + } - select_row_method!( - get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> PathBuf: + sql_method! { + get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: "SELECT working_directory FROM terminals WHERE item_id = ? AND workspace_id = ?" - ); + } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 88a894a922..a4073d27d3 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, select_row_method}; +use db::{connection, sql_method}; use gpui::Axis; use indoc::indoc; @@ -190,10 +190,10 @@ impl WorkspaceDb { .log_err(); } - select_row_method!( - next_id() -> WorkspaceId: + sql_method! { + next_id() -> Result>: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" - ); + } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { @@ -384,6 +384,37 @@ mod tests { use super::*; + #[test] + fn test_next_id_stability() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + + db.migrate( + "test_table", + &["CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"], + ) + .unwrap(); + + let id = db.next_id().unwrap(); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", id)) + .unwrap(); + + let test_text_1 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(1) + .unwrap() + .unwrap(); + assert_eq!(test_text_1, "test-text-1"); + } + #[test] fn test_workspace_id_stability() { env_logger::try_init().ok(); @@ -439,19 +470,19 @@ mod tests { }); db.save_workspace(&workspace_2); - let test_text_1 = db + let test_text_2 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .unwrap()(2) .unwrap() .unwrap(); - assert_eq!(test_text_1, "test-text-2"); + assert_eq!(test_text_2, "test-text-2"); - let test_text_2 = db + let test_text_1 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .unwrap()(1) .unwrap() .unwrap(); - assert_eq!(test_text_2, "test-text-1"); + assert_eq!(test_text_1, "test-text-1"); } #[test] From 2dc1130902e0936adff67e53d11737a102304071 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 10:52:19 -0800 Subject: [PATCH 172/240] Added extra sql methods --- crates/db/src/db.rs | 46 +++++++++++++++++++++++++++----- crates/editor/src/items.rs | 1 + crates/editor/src/persistence.rs | 16 +++++------ 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 20b2ac142a..bde69fead7 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -83,7 +83,7 @@ macro_rules! connection { #[macro_export] macro_rules! sql_method { - ($id:ident() -> Result<()>: $sql:literal) => { + ($id:ident() -> Result<()>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { use $crate::anyhow::Context; @@ -94,7 +94,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { use $crate::anyhow::Context; @@ -106,7 +106,7 @@ macro_rules! sql_method { )) } }; - ($id:ident() -> Result>: $sql:literal) => { + ($id:ident() -> Result>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -118,7 +118,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -130,7 +130,7 @@ macro_rules! sql_method { )) } }; - ($id:ident() -> Result>: $sql:literal) => { + ($id:ident() -> Result>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -142,7 +142,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -155,4 +155,38 @@ macro_rules! sql_method { } }; + ($id:ident() -> Result<$return_type:ty>>: $sql:expr) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row::<$return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>>: $sql:expr) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 1e695d2364..aea0d8b437 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -568,6 +568,7 @@ impl Item for Editor { if let Some(project_item) = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path let path = DB.get_path(item_id, workspace_id).ok()?; + let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index b2186e2432..5747558700 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,6 +1,5 @@ use std::path::{Path, PathBuf}; -use anyhow::{Context, Result}; use db::{connection, sql_method}; use indoc::indoc; use sqlez::domain::Domain; @@ -32,16 +31,17 @@ impl Domain for Editor { } impl EditorDb { - pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { - self.select_row_bound(indoc! {" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"})?((item_id, workspace_id))? - .context("Path not found for serialized editor") + sql_method! { + get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + indoc! {" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ?"} } sql_method! { save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: - "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)" + indoc! {" + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)"} } } From 3e0f9d27a7a9aa9156dda51e80cf944d09205bfb Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 13:42:26 -0800 Subject: [PATCH 173/240] Made dev tools not break everything about the db Also improved multi statements to allow out of order parameter binding in statements Ensured that all statements are run for maybe_row and single, and that of all statements only 1 of them returns only 1 row Made bind and column calls add useful context to errors Co-authored-by: kay@zed.dev --- crates/db/src/db.rs | 33 ++-- crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/bindable.rs | 49 ++++-- crates/sqlez/src/connection.rs | 6 + crates/sqlez/src/statement.rs | 185 +++++++++++++++------- crates/sqlez/src/typed_statements.rs | 10 +- crates/terminal/src/persistence.rs | 18 ++- crates/workspace/src/persistence.rs | 10 +- crates/workspace/src/persistence/model.rs | 4 +- crates/workspace/src/workspace.rs | 12 +- dest-term.db | Bin 0 -> 45056 bytes dest-workspace.db | Bin 0 -> 36864 bytes dest.db | Bin 0 -> 45056 bytes 13 files changed, 219 insertions(+), 110 deletions(-) create mode 100644 dest-term.db create mode 100644 dest-workspace.db create mode 100644 dest.db diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index bde69fead7..b3370db753 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -6,17 +6,11 @@ pub use indoc::indoc; pub use lazy_static; pub use sqlez; -#[cfg(any(test, feature = "test-support"))] -use anyhow::Result; -#[cfg(any(test, feature = "test-support"))] -use sqlez::connection::Connection; -#[cfg(any(test, feature = "test-support"))] -use sqlez::domain::Domain; - use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; +use std::sync::atomic::{AtomicBool, Ordering}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; @@ -28,13 +22,21 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; +lazy_static::lazy_static! { + static ref DB_WIPED: AtomicBool = AtomicBool::new(false); +} + /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); - if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() { + if *RELEASE_CHANNEL == ReleaseChannel::Dev + && std::env::var("WIPE_DB").is_ok() + && !DB_WIPED.load(Ordering::Acquire) + { remove_dir_all(¤t_db_dir).ok(); + DB_WIPED.store(true, Ordering::Relaxed); } create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); @@ -48,15 +50,6 @@ pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnectio ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } -#[cfg(any(test, feature = "test-support"))] -pub fn write_db_to>( - conn: &ThreadSafeConnection, - dest: P, -) -> Result<()> { - let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - conn.backup_main(&destination) -} - /// Implements a basic DB wrapper for a given domain #[macro_export] macro_rules! connection { @@ -155,11 +148,11 @@ macro_rules! sql_method { } }; - ($id:ident() -> Result<$return_type:ty>>: $sql:expr) => { + ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?(($($arg),+)) + self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), @@ -172,7 +165,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>>: $sql:expr) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { use $crate::anyhow::Context; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 5747558700..a77eec7fd1 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -32,7 +32,7 @@ impl Domain for Editor { impl EditorDb { sql_method! { - get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result: indoc! {" SELECT path FROM editors WHERE item_id = ? AND workspace_id = ?"} diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 18c4acedad..51f67dd03f 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use anyhow::Result; +use anyhow::{Context, Result}; use crate::statement::{SqlType, Statement}; @@ -19,61 +19,82 @@ pub trait Column: Sized { impl Bind for bool { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind(self.then_some(1).unwrap_or(0), start_index) + statement + .bind(self.then_some(1).unwrap_or(0), start_index) + .with_context(|| format!("Failed to bind bool at index {start_index}")) } } impl Column for bool { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i32::column(statement, start_index).map(|(i, next_index)| (i != 0, next_index)) + i32::column(statement, start_index) + .map(|(i, next_index)| (i != 0, next_index)) + .with_context(|| format!("Failed to read bool at index {start_index}")) } } impl Bind for &[u8] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self)?; + statement + .bind_blob(start_index, self) + .with_context(|| format!("Failed to bind &[u8] at index {start_index}"))?; Ok(start_index + 1) } } impl Bind for &[u8; C] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self.as_slice())?; + statement + .bind_blob(start_index, self.as_slice()) + .with_context(|| format!("Failed to bind &[u8; C] at index {start_index}"))?; Ok(start_index + 1) } } impl Bind for Vec { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self)?; + statement + .bind_blob(start_index, self) + .with_context(|| format!("Failed to bind Vec at index {start_index}"))?; Ok(start_index + 1) } } impl Column for Vec { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let result = statement.column_blob(start_index)?; + let result = statement + .column_blob(start_index) + .with_context(|| format!("Failed to read Vec at index {start_index}"))?; + Ok((Vec::from(result), start_index + 1)) } } impl Bind for f64 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_double(start_index, *self)?; + statement + .bind_double(start_index, *self) + .with_context(|| format!("Failed to bind f64 at index {start_index}"))?; Ok(start_index + 1) } } impl Column for f64 { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let result = statement.column_double(start_index)?; + let result = statement + .column_double(start_index) + .with_context(|| format!("Failed to parse f64 at index {start_index}"))?; + Ok((result, start_index + 1)) } } impl Bind for i32 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_int(start_index, *self)?; + statement + .bind_int(start_index, *self) + .with_context(|| format!("Failed to bind i32 at index {start_index}"))?; + Ok(start_index + 1) } } @@ -87,7 +108,9 @@ impl Column for i32 { impl Bind for i64 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_int64(start_index, *self)?; + statement + .bind_int64(start_index, *self) + .with_context(|| format!("Failed to bind i64 at index {start_index}"))?; Ok(start_index + 1) } } @@ -101,7 +124,9 @@ impl Column for i64 { impl Bind for usize { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - (*self as i64).bind(statement, start_index) + (*self as i64) + .bind(statement, start_index) + .with_context(|| format!("Failed to bind usize at index {start_index}")) } } diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 1eaeb090e1..5a71cefb52 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -1,6 +1,7 @@ use std::{ ffi::{CStr, CString}, marker::PhantomData, + path::Path, }; use anyhow::{anyhow, Result}; @@ -73,6 +74,11 @@ impl Connection { } } + pub fn backup_main_to(&self, destination: impl AsRef) -> Result<()> { + let destination = Self::open_file(destination.as_ref().to_string_lossy().as_ref()); + self.backup_main(&destination) + } + pub(crate) fn last_error(&self) -> Result<()> { unsafe { let code = sqlite3_errcode(self.sqlite3); diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 164929010b..0a7305c6ed 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -19,8 +19,6 @@ pub struct Statement<'a> { pub enum StepResult { Row, Done, - Misuse, - Other(i32), } #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -40,12 +38,14 @@ impl<'a> Statement<'a> { connection, phantom: PhantomData, }; - unsafe { - let sql = CString::new(query.as_ref())?; + let sql = CString::new(query.as_ref()).context("Error creating cstr")?; let mut remaining_sql = sql.as_c_str(); while { - let remaining_sql_str = remaining_sql.to_str()?.trim(); + let remaining_sql_str = remaining_sql + .to_str() + .context("Parsing remaining sql")? + .trim(); remaining_sql_str != ";" && !remaining_sql_str.is_empty() } { let mut raw_statement = 0 as *mut sqlite3_stmt; @@ -92,116 +92,136 @@ impl<'a> Statement<'a> { } } + fn bind_index_with(&self, index: i32, bind: impl Fn(&*mut sqlite3_stmt) -> ()) -> Result<()> { + let mut any_succeed = false; + unsafe { + for raw_statement in self.raw_statements.iter() { + if index <= sqlite3_bind_parameter_count(*raw_statement) { + bind(raw_statement); + self.connection + .last_error() + .with_context(|| format!("Failed to bind value at index {index}"))?; + any_succeed = true; + } else { + continue; + } + } + } + if any_succeed { + Ok(()) + } else { + Err(anyhow!("Failed to bind parameters")) + } + } + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; let len = blob.len() as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); - } - } - self.connection.last_error() + + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); + }) } pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { let index = index as c_int; let pointer = unsafe { sqlite3_column_blob(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read blob at index {index}"))?; if pointer.is_null() { return Ok(&[]); } let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read length of blob at index {index}"))?; + unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_double(*raw_statement, index, double); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_double(*raw_statement, index, double); + }) } pub fn column_double(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_double(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read double at index {index}"))?; Ok(result) } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { let index = index as c_int; - - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_int(*raw_statement, index, int); - } - }; - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_int(*raw_statement, index, int); + }) } pub fn column_int(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_int(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read int at index {index}"))?; Ok(result) } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_int64(*raw_statement, index, int); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_int64(*raw_statement, index, int); + }) } pub fn column_int64(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_int64(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read i64 at index {index}"))?; Ok(result) } pub fn bind_null(&self, index: i32) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_null(*raw_statement, index); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_null(*raw_statement, index); + }) } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); - } - } - self.connection.last_error() + + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); + }) } pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { let index = index as c_int; let pointer = unsafe { sqlite3_column_text(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read text from column {index}"))?; if pointer.is_null() { return Ok(""); } let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read text length at {index}"))?; let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; Ok(str::from_utf8(slice)?) @@ -247,11 +267,11 @@ impl<'a> Statement<'a> { self.step() } } - SQLITE_MISUSE => Ok(StepResult::Misuse), - other => self - .connection - .last_error() - .map(|_| StepResult::Other(other)), + SQLITE_MISUSE => Err(anyhow!("Statement step returned SQLITE_MISUSE")), + _other_error => { + self.connection.last_error()?; + unreachable!("Step returned error code and last error failed to catch it"); + } } } } @@ -293,11 +313,17 @@ impl<'a> Statement<'a> { callback: impl FnOnce(&mut Statement) -> Result, ) -> Result { if this.step()? != StepResult::Row { + return Err(anyhow!("single called with query that returns no rows.")); + } + let result = callback(this)?; + + if this.step()? != StepResult::Done { return Err(anyhow!( - "Single(Map) called with query that returns no rows." + "single called with a query that returns more than one row." )); } - callback(this) + + Ok(result) } let result = logic(self, callback); self.reset(); @@ -316,10 +342,21 @@ impl<'a> Statement<'a> { this: &mut Statement, callback: impl FnOnce(&mut Statement) -> Result, ) -> Result> { - if this.step()? != StepResult::Row { + if this.step().context("Failed on step call")? != StepResult::Row { return Ok(None); } - callback(this).map(|r| Some(r)) + + let result = callback(this) + .map(|r| Some(r)) + .context("Failed to parse row result")?; + + if this.step().context("Second step call")? != StepResult::Done { + return Err(anyhow!( + "maybe called with a query that returns more than one row." + )); + } + + Ok(result) } let result = logic(self, callback); self.reset(); @@ -350,6 +387,38 @@ mod test { statement::{Statement, StepResult}, }; + #[test] + fn binding_multiple_statements_with_parameter_gaps() { + let connection = + Connection::open_memory(Some("binding_multiple_statements_with_parameter_gaps")); + + connection + .exec(indoc! {" + CREATE TABLE test ( + col INTEGER + )"}) + .unwrap()() + .unwrap(); + + let statement = Statement::prepare( + &connection, + indoc! {" + INSERT INTO test(col) VALUES (?3); + SELECT * FROM test WHERE col = ?1"}, + ) + .unwrap(); + + statement + .bind_int(1, 1) + .expect("Could not bind parameter to first index"); + statement + .bind_int(2, 2) + .expect("Could not bind parameter to second index"); + statement + .bind_int(3, 3) + .expect("Could not bind parameter to third index"); + } + #[test] fn blob_round_trips() { let connection1 = Connection::open_memory(Some("blob_round_trips")); diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index 98f51b970a..c7d8b20aa5 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use crate::{ bindable::{Bind, Column}, @@ -49,6 +49,12 @@ impl Connection { query: &str, ) -> Result Result>> { let mut statement = Statement::prepare(&self, query)?; - Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) + Ok(move |bindings| { + statement + .with_bindings(bindings) + .context("Bindings failed")? + .maybe_row::() + .context("Maybe row failed") + }) } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 384dcc18e0..07bca0c66f 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -29,15 +29,21 @@ impl Domain for Terminal { impl TerminalDb { sql_method! { - save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path) -> Result<()>: - "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3)" + save_working_directory(item_id: ItemId, + workspace_id: WorkspaceId, + working_directory: &Path) -> Result<()>: + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} } sql_method! { get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: - "SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ?" + indoc!{" + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? + "} } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index a4073d27d3..477e5a4960 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -152,7 +152,7 @@ impl WorkspaceDb { "})?((&workspace.location, workspace.id)) .context("clearing out old locations")?; - // Update or insert + // Upsert self.exec_bound(indoc! { "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) @@ -190,8 +190,8 @@ impl WorkspaceDb { .log_err(); } - sql_method! { - next_id() -> Result>: + sql_method!{ + next_id() -> Result: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" } @@ -402,6 +402,10 @@ mod tests { .unwrap(); let id = db.next_id().unwrap(); + // Assert the empty row got inserted + assert_eq!(Some(id), db.select_row_bound:: + ("SELECT workspace_id FROM workspaces WHERE workspace_id = ?").unwrap() + (id).unwrap()); db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") .unwrap()(("test-text-1", id)) diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 111a6904c6..2f0bc050d2 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use anyhow::Result; +use anyhow::{Context, Result}; use async_recursion::async_recursion; use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; @@ -52,7 +52,7 @@ impl Column for WorkspaceLocation { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; Ok(( - WorkspaceLocation(bincode::deserialize(blob)?), + WorkspaceLocation(bincode::deserialize(blob).context("Bincode failed")?), start_index + 1, )) } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 0a4a6c8740..155c95e4e8 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -633,11 +633,11 @@ impl Workspace { active_call = Some((call, subscriptions)); } - let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { - id - } else { - DB.next_id().log_err().flatten().unwrap_or(0) - }; + let database_id = serialized_workspace + .as_ref() + .map(|ws| ws.id) + .or_else(|| DB.next_id().log_err()) + .unwrap_or(0); let mut this = Workspace { modal: None, @@ -666,7 +666,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, - database_id: id, + database_id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); diff --git a/dest-term.db b/dest-term.db new file mode 100644 index 0000000000000000000000000000000000000000..d6115b0670d7ce5e17610aa1d678d320a1615e5a GIT binary patch literal 45056 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?ZjVBlsz01zL9VIqt$1|FJ)K`&m4 zm%oL9i?xn{zlV1Tzdv^?S2o9Ow!<77SnJp(;L$rOH5vk=Aut*O!#D(*_&M3d<>eWh zb4wDFa#BlDi*hsb5_5{-3@+y&SH}=ng%C$4A6Esqu!07cf`WoVd45rLaY15oYJ6sj zf~Q}ItGjEE4p=6$BsCX95TraaFFigbvnVyWB)_Oq!O6$p38En&$kW#`C{n@OHBtkl z4rZ*50@Nr?h&ng_AXiU!KMZx63PG-Ju0gJT&aS}-cNL?Ur3rQ#$l?Be3NEfbt|6`p z&W^#(jxMfTnhL=oL7vVbhFomouJViwnRzLx6~%=)nI)<5iKQj^V17J20OAedOz!3^ zc6M=bamIFIa3F%*TMQ;RPy!Du1PM0~6Df2Q{QN@{{6c+vbQE-T6+$vHixo196-qKv z!G2dSRw&C%Emz1)(NQSQS15r>=jE3ul;&j?mZpMzg&bzEs6dztR#K3dha6F8ve}t= zDGDL35g};e1^LC9C7JnoI1Hs#e1KvJg#G;#LIYetNe~)Sn8^U@FO(Dj5-ZjKk@1-+ zc;X#m8gk-;#*Gdl5rIvBr4LOGHgQ{VT!7v+~0ASnbVCQzzU$W2Ym1Er_L z5_sxKR4B^NFHr!Q4M`hlu}qR?5fos^*%2g$hztd=h0wS~&4m#Epc;qbH;g!j z__|mF=6Ov8+*!LxoE4l;TX@0o0d^IbU_*%kun>_!PeiUuOfJbROU2ARpsencpP!SO zmhE`9+yk`FSOYIqKkO7=W0hZN_QVU}6_nRAg+E z1}6`AJQgEZ%qWQiAr46!a6WR(<4GSm`N@gkItSD~Pzd$&3=DOJcrhhEIXk{Avp6#; z2cDZXOw_ht`!A|c`2zWkoW^@ z2PJ%1QU>b>m!xQ^r#w|5B|lHSL?I)wEL9;<0i+XLrYcmXmVnbjNoH~8Nh%yzPlZ~0##N8Ef zRZsAED2_Kout9xl4p1Yvh!Gskjr!nd&dp3O0>?rzg2jXq%?NR5doU$GH!(90>{Uo~ z6_=zIz(WxnAaFfkRa~093=9lR>>>;tV(cOyH;m%Z5Eu=C(GVC7fzc2csUgr<&%`Rr z(Od`{VnC^MVS>fbIu~A#Beks{l`p6liXjM6KG?@#thxHr8Cm5y=G%i@3myqV2?Dri z@o4jszRgQ(F4O@*P%Ct_7YOb{4EtUnwz!98KyWfZ3;`?P>DnQwf{%KFI|f5xoCG}x zMhEK@kbA0V1Cbd0Rai*@8!Ey#SZ57R6rd3;7)C95K$ROb^?{g3^&Fxr4H~f-9Zd(# zZotBDu#Ki$bAkH*3<4vyr8nyD(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R z7#ShJ1@h)-{~r_zqj)p~MnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz5li2#n7E z58J36_1I_#jE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb8YVV6^`a4~kL7Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD4BHSG?f(zks2%m#Xb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDz(ZiP{|^s}QO0NpjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb2435EwoGZ`eldsK-V_U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMneD|0;BW)@SqrFjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(fR*j s8?~bz8x4Wc5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC70eA=i00J@7NB{r; literal 0 HcmV?d00001 diff --git a/dest-workspace.db b/dest-workspace.db new file mode 100644 index 0000000000000000000000000000000000000000..90682f86421cbe4f9180ef12caecb0620d5218ee GIT binary patch literal 36864 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?lnVBlmx01%%A!DV1XV&h`6GU&xi z@$$DYaIhU_;P2sG!tc-B%9YJ=o9!^i23(p(#YaP6Gz3ONU^E0qLx6}7XwG717Z(?2 zY&R}ROv*{kEJ@8R1`{04L9UJ=t_mTJPCl**U?B}I1qB5#Gd?p#!P76q)!j8n!OuTL z!7tRuM@KiySN}RITfq9U?l~Kd3a>AGxJguLR=$45Uzpf$}i3=$;{8gbSzlf%|FQ1)7?+O z+ci=H*$tWsL9TADL9TwzuE7d$iDFbWU=u(A;_s*6;_BlX;;P{680_rm;)*O28sGv7 zMW~DpSPI!%sJ~Eb1c?=EfXMjF6iu)etoA@m3kdS`bqtCGdjb?q@u0AVI~r_)rb2K? zkf(EqCI_3itvDk?W?o8aMR8$HW=U#%VrfY}m>&<)TpVu*rZ}5<+1SNZRT*1j!RaD7 zHLoPKC?4ddVq`WuN-9B?fg~1?2INRZaxBF41&Kwec_s1bMfs%#ND9G;36!c7a#IuY zKq&i_zfeDA-*owfO%e10W&GV zqOM7t6`W67c){@j@_R9uU_*%kun>_!kDNwuWRS$JBPoE5z#66~Qm9#p3l=ghGA!)k%F2xG_3)5^1}rErpiEYbP=ZQ8!U`4x zaGzqNQ@G=i5-ECNLntX{=A~pNCzhmQPtJMyDUi|-cXB4ufW(T-Vzkl_6k)D;rMXrL z3hH60MJ1WZi8<;D`U>hE`9+yk`FSOYIqKkO7=W0B6a>f?qZJA$sR$`jiOM+58cgis zii(VF(%|F)kH=yJiy0+xAjBbQ1I|Z|c|7SOCqFq6T<19X_&X_t`gsP1x5_RfM`6VU!x!`yU%1qBF0kd2y3KH{D zQd1!D2i6V>BUn-f>j#&lXsM?>RUsumPrXDTBe5)1AyEOO6I`Y$RHl}I(?UsRZfbE! zVr~K0Aqp<8ZjPZoAqvi+K|!v5A@L!ezOKO`j=lkC`2aa4L56~B4U9NMs^DNb*_fG4 z++7h@^#qTH;&?*@8`P)f0JU?97{Srps1J_j+|2YMa4ZxfSWGC#iJoG8UmvsFd71*Aut*OBR2&4 z(-~RiIp*8L8!0I54!CGBq}c)Ik2Wu9+`P2r;zEfDP%C+~7YOb{4EtUnwz!98KyWfZ z8qdVjwL?+`AN2%x42Hru33?EW4%R6k_f)Al=3os@6rd3;7)C95z(cH%;vd9BF8Sf< z3N&IfI+{M1N7Jpj!2N&zCk*^gMs90o)aRoiFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiRHLO_a<9qbKbBLgENT|+}%BSQrvLn~t=D-$Cw(ELCD9|rzEU?ro(Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2B43jt%4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5EzyrFxvkgmeD%usnHM^4S~@R7!85Z5Eu=C(GVC7 zfzc2c4S~@R7!85Z5I_lm(f&V5aEx+BLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(ScU-T{J+ut|HCqBM?Ezf0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd72z G5C8y*?Izv; literal 0 HcmV?d00001 diff --git a/dest.db b/dest.db new file mode 100644 index 0000000000000000000000000000000000000000..e378341661f624e7b45f7401f74ea0980eb02e47 GIT binary patch literal 45056 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?imVBlsz01%%A!3E1Baj`NP8T8_% zc==lxxLE5L_40T2OHy+&1VPF(^U~u}GK*4^OY(~<6`Xwhogf+lf;@d4gCZ5YT_ZI> z>R`s|C_s(Ugs5}#4|4T%_rp-9sSxDq<{ISc=jlUb4)pIBOw59Y_i10dcI&g5>+ zVrLf@7iVlY1_vU@y~SXH110dlLXdC+F_A(?!OuTL!7tRuM@K`>#Rn*sK-k|;AvC}RlmwwMg_#VX{z6FsAhBW%5E-AD zf+yY~rXeRjXx!)^5)s%0So+Z9U=z0$$CW-nnv3HN!4zjRFB`kKsw!h^EI3^xr{7Rw}QMot4D6F~unoE<@8h{#X?TL_I?)LaPh52|q}e#3}k zh_8z^V4l}hz@4?5#96`lw1pQOA7EF32{x1%01FWr^hD&k#N?99vQ*661Ip@7`T04i ziFx1vLH0GWu_#`KOQ02O3Sc9!hAD~^YF2_5dMz?6?BdGGjP3RCkbwp)C@`Q*R*X=B zNT2zvmoS37ops%3rkzbTqm7iCVn4=Dkh5?94+Gd<)4JLMRMMcIo zX>js@$73;q#f*|T5aN)u0p}yfJf8HClb@Uju5&=`1BFmO&%jVuh!<1xle6Q?GK({l za^QIi9Jx+NSqZEtF)ukIzX%+MUyshvIlc1RK<+<^VNyix|Pt+^7$Z=G@HmB5*7eBUnr*(Tos>wg*%4a}zW3z+Qz! zS8+*d0X!7J0Rqh^s%fP_E#4f_XA;vBOa>FPd4S~@R7!85Z5Eu=Cks1Px^-Qd? z9LD#=t=0Y711hqm(dx79S#IWxLVvBod1_UPq#1OCoo~|8|D)^`;xMMIB#!1kF zV05rf0lBA&HV}!?Uxk$vu%RM+gLT&6L;)Jnf??E>2UNL1Qy++lRL>#0(x4HW(b06! z>;^0h2is`6H5aJ=&mb^TTY97Z9u0xf5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7 zfsqjcTp(|b_WwblFp5V*U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhoOhQR3j z|FDhPQICy=z-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjD`R_1V;P+@SqrFjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(f Date: Mon, 21 Nov 2022 13:43:43 -0800 Subject: [PATCH 174/240] Removed database test files --- .gitignore | 2 +- dest-term.db | Bin 45056 -> 0 bytes dest-workspace.db | Bin 36864 -> 0 bytes dest.db | Bin 45056 -> 0 bytes 4 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 dest-term.db delete mode 100644 dest-workspace.db delete mode 100644 dest.db diff --git a/.gitignore b/.gitignore index e2d90adbb1..356f4d97cd 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc -crates/db/test-db.db +**/*.db diff --git a/dest-term.db b/dest-term.db deleted file mode 100644 index d6115b0670d7ce5e17610aa1d678d320a1615e5a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45056 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?ZjVBlsz01zL9VIqt$1|FJ)K`&m4 zm%oL9i?xn{zlV1Tzdv^?S2o9Ow!<77SnJp(;L$rOH5vk=Aut*O!#D(*_&M3d<>eWh zb4wDFa#BlDi*hsb5_5{-3@+y&SH}=ng%C$4A6Esqu!07cf`WoVd45rLaY15oYJ6sj zf~Q}ItGjEE4p=6$BsCX95TraaFFigbvnVyWB)_Oq!O6$p38En&$kW#`C{n@OHBtkl z4rZ*50@Nr?h&ng_AXiU!KMZx63PG-Ju0gJT&aS}-cNL?Ur3rQ#$l?Be3NEfbt|6`p z&W^#(jxMfTnhL=oL7vVbhFomouJViwnRzLx6~%=)nI)<5iKQj^V17J20OAedOz!3^ zc6M=bamIFIa3F%*TMQ;RPy!Du1PM0~6Df2Q{QN@{{6c+vbQE-T6+$vHixo196-qKv z!G2dSRw&C%Emz1)(NQSQS15r>=jE3ul;&j?mZpMzg&bzEs6dztR#K3dha6F8ve}t= zDGDL35g};e1^LC9C7JnoI1Hs#e1KvJg#G;#LIYetNe~)Sn8^U@FO(Dj5-ZjKk@1-+ zc;X#m8gk-;#*Gdl5rIvBr4LOGHgQ{VT!7v+~0ASnbVCQzzU$W2Ym1Er_L z5_sxKR4B^NFHr!Q4M`hlu}qR?5fos^*%2g$hztd=h0wS~&4m#Epc;qbH;g!j z__|mF=6Ov8+*!LxoE4l;TX@0o0d^IbU_*%kun>_!PeiUuOfJbROU2ARpsencpP!SO zmhE`9+yk`FSOYIqKkO7=W0hZN_QVU}6_nRAg+E z1}6`AJQgEZ%qWQiAr46!a6WR(<4GSm`N@gkItSD~Pzd$&3=DOJcrhhEIXk{Avp6#; z2cDZXOw_ht`!A|c`2zWkoW^@ z2PJ%1QU>b>m!xQ^r#w|5B|lHSL?I)wEL9;<0i+XLrYcmXmVnbjNoH~8Nh%yzPlZ~0##N8Ef zRZsAED2_Kout9xl4p1Yvh!Gskjr!nd&dp3O0>?rzg2jXq%?NR5doU$GH!(90>{Uo~ z6_=zIz(WxnAaFfkRa~093=9lR>>>;tV(cOyH;m%Z5Eu=C(GVC7fzc2csUgr<&%`Rr z(Od`{VnC^MVS>fbIu~A#Beks{l`p6liXjM6KG?@#thxHr8Cm5y=G%i@3myqV2?Dri z@o4jszRgQ(F4O@*P%Ct_7YOb{4EtUnwz!98KyWfZ3;`?P>DnQwf{%KFI|f5xoCG}x zMhEK@kbA0V1Cbd0Rai*@8!Ey#SZ57R6rd3;7)C95K$ROb^?{g3^&Fxr4H~f-9Zd(# zZotBDu#Ki$bAkH*3<4vyr8nyD(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2c4S~@R z7#ShJ1@h)-{~r_zqj)p~MnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU1V%$(Gz5li2#n7E z58J36_1I_#jE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb8YVV6^`a4~kL7Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeD4BHSG?f(zks2%m#Xb6mkz-S1JhQMeD zjE2By2#kinXb6mkz-S1JhQMeDz(ZiP{|^s}QO0NpjE2By2#kinXb6mkz-S1JhQMeD zjE2By2#kinXb2435EwoGZ`eldsK-V_U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!n zMneD|0;BW)@SqrFjE2By2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(fR*j s8?~bz8x4Wc5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC70eA=i00J@7NB{r; diff --git a/dest-workspace.db b/dest-workspace.db deleted file mode 100644 index 90682f86421cbe4f9180ef12caecb0620d5218ee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36864 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?lnVBlmx01%%A!DV1XV&h`6GU&xi z@$$DYaIhU_;P2sG!tc-B%9YJ=o9!^i23(p(#YaP6Gz3ONU^E0qLx6}7XwG717Z(?2 zY&R}ROv*{kEJ@8R1`{04L9UJ=t_mTJPCl**U?B}I1qB5#Gd?p#!P76q)!j8n!OuTL z!7tRuM@KiySN}RITfq9U?l~Kd3a>AGxJguLR=$45Uzpf$}i3=$;{8gbSzlf%|FQ1)7?+O z+ci=H*$tWsL9TADL9TwzuE7d$iDFbWU=u(A;_s*6;_BlX;;P{680_rm;)*O28sGv7 zMW~DpSPI!%sJ~Eb1c?=EfXMjF6iu)etoA@m3kdS`bqtCGdjb?q@u0AVI~r_)rb2K? zkf(EqCI_3itvDk?W?o8aMR8$HW=U#%VrfY}m>&<)TpVu*rZ}5<+1SNZRT*1j!RaD7 zHLoPKC?4ddVq`WuN-9B?fg~1?2INRZaxBF41&Kwec_s1bMfs%#ND9G;36!c7a#IuY zKq&i_zfeDA-*owfO%e10W&GV zqOM7t6`W67c){@j@_R9uU_*%kun>_!kDNwuWRS$JBPoE5z#66~Qm9#p3l=ghGA!)k%F2xG_3)5^1}rErpiEYbP=ZQ8!U`4x zaGzqNQ@G=i5-ECNLntX{=A~pNCzhmQPtJMyDUi|-cXB4ufW(T-Vzkl_6k)D;rMXrL z3hH60MJ1WZi8<;D`U>hE`9+yk`FSOYIqKkO7=W0B6a>f?qZJA$sR$`jiOM+58cgis zii(VF(%|F)kH=yJiy0+xAjBbQ1I|Z|c|7SOCqFq6T<19X_&X_t`gsP1x5_RfM`6VU!x!`yU%1qBF0kd2y3KH{D zQd1!D2i6V>BUn-f>j#&lXsM?>RUsumPrXDTBe5)1AyEOO6I`Y$RHl}I(?UsRZfbE! zVr~K0Aqp<8ZjPZoAqvi+K|!v5A@L!ezOKO`j=lkC`2aa4L56~B4U9NMs^DNb*_fG4 z++7h@^#qTH;&?*@8`P)f0JU?97{Srps1J_j+|2YMa4ZxfSWGC#iJoG8UmvsFd71*Aut*OBR2&4 z(-~RiIp*8L8!0I54!CGBq}c)Ik2Wu9+`P2r;zEfDP%C+~7YOb{4EtUnwz!98KyWfZ z8qdVjwL?+`AN2%x42Hru33?EW4%R6k_f)Al=3os@6rd3;7)C95z(cH%;vd9BF8Sf< z3N&IfI+{M1N7Jpj!2N&zCk*^gMs90o)aRoiFd71*Aut*OqaiRF0;3@?8UmvsFd71* zAut*OqaiRHLO_a<9qbKbBLgENT|+}%BSQrvLn~t=D-$Cw(ELCD9|rzEU?ro(Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjE2B43jt%4S~@R7!85Z z5Eu=C(GVC7fzc2c4S~@R7!85Z5EzyrFxvkgmeD%usnHM^4S~@R7!85Z5Eu=C(GVC7 zfzc2c4S~@R7!85Z5I_lm(f&V5aEx+BLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhmU z1V%$(ScU-T{J+ut|HCqBM?Ezf0;3@?8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd72z G5C8y*?Izv; diff --git a/dest.db b/dest.db deleted file mode 100644 index e378341661f624e7b45f7401f74ea0980eb02e47..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45056 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|?imVBlsz01%%A!3E1Baj`NP8T8_% zc==lxxLE5L_40T2OHy+&1VPF(^U~u}GK*4^OY(~<6`Xwhogf+lf;@d4gCZ5YT_ZI> z>R`s|C_s(Ugs5}#4|4T%_rp-9sSxDq<{ISc=jlUb4)pIBOw59Y_i10dcI&g5>+ zVrLf@7iVlY1_vU@y~SXH110dlLXdC+F_A(?!OuTL!7tRuM@K`>#Rn*sK-k|;AvC}RlmwwMg_#VX{z6FsAhBW%5E-AD zf+yY~rXeRjXx!)^5)s%0So+Z9U=z0$$CW-nnv3HN!4zjRFB`kKsw!h^EI3^xr{7Rw}QMot4D6F~unoE<@8h{#X?TL_I?)LaPh52|q}e#3}k zh_8z^V4l}hz@4?5#96`lw1pQOA7EF32{x1%01FWr^hD&k#N?99vQ*661Ip@7`T04i ziFx1vLH0GWu_#`KOQ02O3Sc9!hAD~^YF2_5dMz?6?BdGGjP3RCkbwp)C@`Q*R*X=B zNT2zvmoS37ops%3rkzbTqm7iCVn4=Dkh5?94+Gd<)4JLMRMMcIo zX>js@$73;q#f*|T5aN)u0p}yfJf8HClb@Uju5&=`1BFmO&%jVuh!<1xle6Q?GK({l za^QIi9Jx+NSqZEtF)ukIzX%+MUyshvIlc1RK<+<^VNyix|Pt+^7$Z=G@HmB5*7eBUnr*(Tos>wg*%4a}zW3z+Qz! zS8+*d0X!7J0Rqh^s%fP_E#4f_XA;vBOa>FPd4S~@R7!85Z5Eu=Cks1Px^-Qd? z9LD#=t=0Y711hqm(dx79S#IWxLVvBod1_UPq#1OCoo~|8|D)^`;xMMIB#!1kF zV05rf0lBA&HV}!?Uxk$vu%RM+gLT&6L;)Jnf??E>2UNL1Qy++lRL>#0(x4HW(b06! z>;^0h2is`6H5aJ=&mb^TTY97Z9u0xf5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7 zfsqjcTp(|b_WwblFp5V*U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnhoOhQR3j z|FDhPQICy=z-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDjD`R_1V;P+@SqrFjE2By z2#kinXb6mkz-S1JhQMeDjE2By2#kinXb6mkz_1O0(f Date: Wed, 23 Nov 2022 01:53:58 -0800 Subject: [PATCH 175/240] working serialized writes with panics on failure. Everything seems to be working --- Cargo.lock | 3 + crates/collab/src/integration_tests.rs | 4 + crates/collab_ui/src/collab_ui.rs | 1 + crates/command_palette/src/command_palette.rs | 2 +- crates/db/src/db.rs | 143 ++++++++- crates/db/src/kvp.rs | 2 +- crates/db/test.db | Bin 40960 -> 0 bytes crates/diagnostics/src/diagnostics.rs | 1 + crates/editor/src/editor.rs | 29 +- crates/editor/src/items.rs | 39 ++- crates/editor/src/persistence.rs | 4 +- .../src/test/editor_lsp_test_context.rs | 1 + crates/file_finder/src/file_finder.rs | 12 +- crates/project_panel/src/project_panel.rs | 2 + crates/sqlez/Cargo.toml | 5 +- crates/sqlez/src/bindable.rs | 12 + crates/sqlez/src/connection.rs | 12 +- crates/sqlez/src/lib.rs | 5 +- crates/sqlez/src/migrations.rs | 58 ++-- crates/sqlez/src/statement.rs | 11 +- crates/sqlez/src/thread_safe_connection.rs | 133 +++++--- crates/sqlez/src/util.rs | 28 ++ crates/terminal/src/persistence.rs | 40 ++- crates/terminal/src/terminal.rs | 21 +- .../terminal/src/terminal_container_view.rs | 8 + .../src/tests/terminal_test_context.rs | 1 + crates/vim/src/test/vim_test_context.rs | 1 + crates/workspace/src/dock.rs | 2 +- crates/workspace/src/item.rs | 11 +- crates/workspace/src/pane.rs | 8 +- crates/workspace/src/persistence.rs | 295 ++++++++++-------- crates/workspace/src/persistence/model.rs | 2 +- crates/workspace/src/workspace.rs | 71 +++-- crates/zed/src/zed.rs | 14 +- 34 files changed, 669 insertions(+), 312 deletions(-) delete mode 100644 crates/db/test.db create mode 100644 crates/sqlez/src/util.rs diff --git a/Cargo.lock b/Cargo.lock index e887dfee66..150149c529 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5590,8 +5590,11 @@ name = "sqlez" version = "0.1.0" dependencies = [ "anyhow", + "futures 0.3.25", "indoc", + "lazy_static", "libsqlite3-sys", + "parking_lot 0.11.2", "thread_local", ] diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 386ccfbbff..989f0ac586 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -909,6 +909,7 @@ async fn test_host_disconnect( let (_, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -3711,6 +3712,7 @@ async fn test_collaborating_with_code_actions( let (_window_b, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -3938,6 +3940,7 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T let (_window_b, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -6075,6 +6078,7 @@ impl TestClient { cx.add_view(&root_view, |cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index 3a20a2fc69..964cec0f82 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -53,6 +53,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { let mut workspace = Workspace::new( Default::default(), + 0, project, app_state.default_item_factory, cx, diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index 5af23b45d7..3742e36c72 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -351,7 +351,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let editor = cx.add_view(&workspace, |cx| { let mut editor = Editor::single_line(None, cx); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index b3370db753..b42b264b56 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -42,11 +42,11 @@ pub fn open_file_db() -> ThreadSafeConnection { create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(Some(db_path.to_string_lossy().as_ref()), true) + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) .with_initialize_query(INITIALIZE_QUERY) } -pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnection { +pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } @@ -66,7 +66,7 @@ macro_rules! connection { ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - ::db::open_memory_db(None) + ::db::open_memory_db(stringify!($id)) } else { ::db::open_file_db() }); @@ -77,7 +77,7 @@ macro_rules! connection { #[macro_export] macro_rules! sql_method { ($id:ident() -> Result<()>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + pub fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec($sql)?().context(::std::format!( @@ -87,8 +87,21 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result<()>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.exec($sql)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) @@ -99,8 +112,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + connection.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select::<$return_type>($sql)?(()) @@ -111,8 +138,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -123,8 +164,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() @@ -135,8 +190,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row::<$return_type>($sql)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -148,8 +217,22 @@ macro_rules! sql_method { } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { + pub fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() @@ -165,8 +248,27 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result<$return_type:ty>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row::<$return_type>($sql)?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -182,4 +284,23 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 3cdcd99016..dd82c17615 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -61,7 +61,7 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = KeyValueStore(crate::open_memory_db(Some("test_kvp"))); + let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/db/test.db b/crates/db/test.db deleted file mode 100644 index cedefe5f832586d90e62e9a50c9e8c7506cf81e7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40960 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCVBlk5VBlgv01%%A!DV1XV&h`+G3doh z@$$DYaB|o&@b~aN<`3pw!?%um3zs^F9p@ih+D64kLtr!nMnhmU1V%$(Gz1232sB%B zu#1a}Gq#16BqrsgW|pMp7J~^+=O9Hj$qTuNl;_B`i zq~PZtqTmC5jyj>$TkR70@5ajCS8szHd>>8{9mncS44Z##wGZ#BJqFal>5e;%fF__>$iD8LwICBwS$5H%4Elg z7^nm!dSLdzlQ-4lMj-g3q{I23`~=Q&7_kn@N%4t!$r<@Y;4lT}9Y|2B zJLQ*@gS!Qu&Qcfx?X@QM!%FoY9P0RxadP!z( zYH>+oZUNW^1s7L0$55XT1?SM9AXmST_z+KD*WeIG-vD@t3@@>aS=q$h6>*hVaPJkz z8zR_jc}(o$va*bg`ruH@E{iWN$uCNU(ab0z1`~yZS9WS8#EBTeRF;@inhKGF)nIT( z>NB&6JIms7B+Q0*Ll_Ixr{)0F@kNZ_=!H5oH#5B`5mW;eBUnr*jzx$oK-`;>pPQJO z2X+=DB^H;Y7Qmwc9QJTMU{ze2ybKHsES#$t_!skS;*Q~51#-tI9u0xf5Eu=C(GVC7 zfzc2c4S~@R7z`oMtk1$K%Fz}H>X4%}13?1CkOm-#32NKJI^D3gDYywfI=TWMu>tp! zL1QRLgEb&AYL2d0bF~FCvx;)`DuPT#i7}ACXm5Oo_r|TcxG*9WnhnA6i!{iI(S3yX zPC=O!+!2T8RB$g1ZL|p9g#(RijO?*5Yp%vTCRTZlW?y(1qQnAR6dDz9K0Kr`A_F{T zF=&T8thu5YS%oLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1O`P2fad?1_&XW+_w#oS3I~lkVl)IsLtr!nMnhmU1V%$( zGz3ONU^E0qLtr!nMnhmU1cq}6m@u)L$Z{CN4%Gu43IOu1eo1Zt?1%*;10y3{LqlBy zGX+BfD?<}2LsNT3RzFEjVLqL*+71X20)}fbTW@Rv8f#gZ;j1ySGWc, searchable: bool, cursor_shape: CursorShape, + workspace_id: Option, keymap_context_layers: BTreeMap, input_enabled: bool, leader_replica_id: Option, @@ -1137,31 +1138,6 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); - // if let Some(project) = project.as_ref() { - // if let Some(file) = buffer - // .read(cx) - // .as_singleton() - // .and_then(|buffer| buffer.read(cx).file()) - // .and_then(|file| file.as_local()) - // { - // // let item_id = cx.weak_handle().id(); - // // let workspace_id = project - // // .read(cx) - // // .visible_worktrees(cx) - // // .map(|worktree| worktree.read(cx).abs_path()) - // // .collect::>() - // // .into(); - // let path = file.abs_path(cx); - // dbg!(&path); - - // // cx.background() - // // .spawn(async move { - // // DB.save_path(item_id, workspace_id, path).log_err(); - // // }) - // // .detach(); - // } - // } - Self::new(EditorMode::Full, buffer, project, None, cx) } @@ -1262,6 +1238,7 @@ impl Editor { searchable: true, override_text_style: None, cursor_shape: Default::default(), + workspace_id: None, keymap_context_layers: Default::default(), input_enabled: true, leader_replica_id: None, diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index aea0d8b437..e724156fae 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -17,7 +17,7 @@ use std::{ path::{Path, PathBuf}, }; use text::Selection; -use util::TryFutureExt; +use util::{ResultExt, TryFutureExt}; use workspace::{ item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, @@ -554,6 +554,43 @@ impl Item for Editor { Some(breadcrumbs) } + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + let workspace_id = workspace.database_id(); + let item_id = cx.view_id(); + + fn serialize( + buffer: ModelHandle, + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut MutableAppContext, + ) { + if let Some(file) = buffer.read(cx).file().and_then(|file| file.as_local()) { + let path = file.abs_path(cx); + + cx.background() + .spawn(async move { + DB.save_path(item_id, workspace_id, path.clone()) + .await + .log_err() + }) + .detach(); + } + } + + if let Some(buffer) = self.buffer().read(cx).as_singleton() { + serialize(buffer.clone(), workspace_id, item_id, cx); + + cx.subscribe(&buffer, |this, buffer, event, cx| { + if let Some(workspace_id) = this.workspace_id { + if let language::Event::FileHandleChanged = event { + serialize(buffer, workspace_id, cx.view_id(), cx); + } + } + }) + .detach(); + } + } + fn serialized_item_kind() -> Option<&'static str> { Some("Editor") } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index a77eec7fd1..b2f76294aa 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use db::{connection, sql_method}; use indoc::indoc; @@ -39,7 +39,7 @@ impl EditorDb { } sql_method! { - save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: + async save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()>: indoc! {" INSERT OR REPLACE INTO editors(item_id, workspace_id, path) VALUES (?, ?, ?)"} diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 9cf305ad37..b65b09cf17 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -66,6 +66,7 @@ impl<'a> EditorLspTestContext<'a> { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index b0016002fa..5122a46c2c 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -317,7 +317,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); cx.dispatch_action(window_id, Toggle); @@ -373,7 +373,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/dir".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -449,7 +449,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -475,7 +475,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -529,7 +529,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -569,7 +569,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index dae1f70aae..e88f3004eb 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1396,6 +1396,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, @@ -1495,6 +1496,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index cbb4504a04..cab1af7d6c 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -9,4 +9,7 @@ edition = "2021" anyhow = { version = "1.0.38", features = ["backtrace"] } indoc = "1.0.7" libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } -thread_local = "1.1.4" \ No newline at end of file +thread_local = "1.1.4" +lazy_static = "1.4" +parking_lot = "0.11.1" +futures = "0.3" \ No newline at end of file diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 51f67dd03f..ffef7814f9 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -322,6 +322,18 @@ impl Bind for &Path { } } +impl Bind for Arc { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.as_ref().bind(statement, start_index) + } +} + +impl Bind for PathBuf { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + (self.as_ref() as &Path).bind(statement, start_index) + } +} + impl Column for PathBuf { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 5a71cefb52..4beddb4fed 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -10,16 +10,18 @@ use libsqlite3_sys::*; pub struct Connection { pub(crate) sqlite3: *mut sqlite3, persistent: bool, - phantom: PhantomData, + pub(crate) write: bool, + _sqlite: PhantomData, } unsafe impl Send for Connection {} impl Connection { - fn open(uri: &str, persistent: bool) -> Result { + pub(crate) fn open(uri: &str, persistent: bool) -> Result { let mut connection = Self { sqlite3: 0 as *mut _, persistent, - phantom: PhantomData, + write: true, + _sqlite: PhantomData, }; let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; @@ -60,6 +62,10 @@ impl Connection { self.persistent } + pub fn can_write(&self) -> bool { + self.write + } + pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index c5d2658666..a22cfff2b3 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,5 +1,3 @@ -pub use anyhow; - pub mod bindable; pub mod connection; pub mod domain; @@ -8,3 +6,6 @@ pub mod savepoint; pub mod statement; pub mod thread_safe_connection; pub mod typed_statements; +mod util; + +pub use anyhow; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 23af04bbf4..6c0aafaf20 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -11,46 +11,48 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { - // Setup the migrations table unconditionally - self.exec(indoc! {" - CREATE TABLE IF NOT EXISTS migrations ( + self.with_savepoint("migrating", || { + // Setup the migrations table unconditionally + self.exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( domain TEXT, step INTEGER, migration TEXT - )"})?()?; + )"})?()?; - let completed_migrations = - self.select_bound::<&str, (String, usize, String)>(indoc! {" + let completed_migrations = + self.select_bound::<&str, (String, usize, String)>(indoc! {" SELECT domain, step, migration FROM migrations WHERE domain = ? ORDER BY step - "})?(domain)?; + "})?(domain)?; - let mut store_completed_migration = - self.exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + let mut store_completed_migration = self + .exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; - for (index, migration) in migrations.iter().enumerate() { - if let Some((_, _, completed_migration)) = completed_migrations.get(index) { - if completed_migration != migration { - return Err(anyhow!(formatdoc! {" - Migration changed for {} at step {} - - Stored migration: - {} - - Proposed migration: - {}", domain, index, completed_migration, migration})); - } else { - // Migration already run. Continue - continue; + for (index, migration) in migrations.iter().enumerate() { + if let Some((_, _, completed_migration)) = completed_migrations.get(index) { + if completed_migration != migration { + return Err(anyhow!(formatdoc! {" + Migration changed for {} at step {} + + Stored migration: + {} + + Proposed migration: + {}", domain, index, completed_migration, migration})); + } else { + // Migration already run. Continue + continue; + } } + + self.exec(migration)?()?; + store_completed_migration((domain, index, *migration))?; } - self.exec(migration)?()?; - store_completed_migration((domain, index, *migration))?; - } - - Ok(()) + Ok(()) + }) } } diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 0a7305c6ed..86035f5d0a 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -2,7 +2,7 @@ use std::ffi::{c_int, CStr, CString}; use std::marker::PhantomData; use std::{ptr, slice, str}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, bail, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; @@ -57,12 +57,21 @@ impl<'a> Statement<'a> { &mut raw_statement, &mut remaining_sql_ptr, ); + remaining_sql = CStr::from_ptr(remaining_sql_ptr); statement.raw_statements.push(raw_statement); connection.last_error().with_context(|| { format!("Prepare call failed for query:\n{}", query.as_ref()) })?; + + if !connection.can_write() && sqlite3_stmt_readonly(raw_statement) == 0 { + let sql = CStr::from_ptr(sqlite3_sql(raw_statement)); + + bail!( + "Write statement prepared with connection that is not write capable. SQL:\n{} ", + sql.to_str()?) + } } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 7c5bf6388c..5402c6b5e1 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,36 +1,41 @@ -use std::{marker::PhantomData, ops::Deref, sync::Arc}; - -use connection::Connection; +use futures::{Future, FutureExt}; +use lazy_static::lazy_static; +use parking_lot::RwLock; +use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; use crate::{ - connection, + connection::Connection, domain::{Domain, Migrator}, + util::UnboundedSyncSender, }; +type QueuedWrite = Box; + +lazy_static! { + static ref QUEUES: RwLock, UnboundedSyncSender>> = + Default::default(); +} + pub struct ThreadSafeConnection { - uri: Option>, + uri: Arc, persistent: bool, initialize_query: Option<&'static str>, - connection: Arc>, - _pd: PhantomData, + connections: Arc>, + _migrator: PhantomData, } unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} impl ThreadSafeConnection { - pub fn new(uri: Option<&str>, persistent: bool) -> Self { - if persistent == true && uri == None { - // This panic is securing the unwrap in open_file(), don't remove it! - panic!("Cannot create a persistent connection without a URI") - } + pub fn new(uri: &str, persistent: bool) -> Self { Self { - uri: uri.map(|str| Arc::from(str)), + uri: Arc::from(uri), persistent, initialize_query: None, - connection: Default::default(), - _pd: PhantomData, + connections: Default::default(), + _migrator: PhantomData, } } @@ -46,13 +51,13 @@ impl ThreadSafeConnection { /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { // This unwrap is secured by a panic in the constructor. Be careful if you remove it! - Connection::open_file(self.uri.as_ref().unwrap()) + Connection::open_file(self.uri.as_ref()) } /// Opens a shared memory connection using the file path as the identifier. This unwraps /// as we expect it always to succeed fn open_shared_memory(&self) -> Connection { - Connection::open_memory(self.uri.as_ref().map(|str| str.deref())) + Connection::open_memory(Some(self.uri.as_ref())) } // Open a new connection for the given domain, leaving this @@ -62,10 +67,74 @@ impl ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query, - connection: Default::default(), - _pd: PhantomData, + connections: Default::default(), + _migrator: PhantomData, } } + + pub fn write( + &self, + callback: impl 'static + Send + FnOnce(&Connection) -> T, + ) -> impl Future { + // Startup write thread for this database if one hasn't already + // been started and insert a channel to queue work for it + if !QUEUES.read().contains_key(&self.uri) { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let mut queues = QUEUES.write(); + queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + } + + // Grab the queue for this database + let queues = QUEUES.read(); + let write_channel = queues.get(&self.uri).unwrap(); + + // Create a one shot channel for the result of the queued write + // so we can await on the result + let (sender, reciever) = futures::channel::oneshot::channel(); + write_channel + .send(Box::new(move |connection| { + sender.send(callback(connection)).ok(); + })) + .expect("Could not send write action to background thread"); + + reciever.map(|response| response.expect("Background thread unexpectedly closed")) + } + + pub(crate) fn create_connection(&self) -> Connection { + let mut connection = if self.persistent { + self.open_file() + } else { + self.open_shared_memory() + }; + + // Enable writes for the migrations and initialization queries + connection.write = true; + + if let Some(initialize_query) = self.initialize_query { + connection.exec(initialize_query).expect(&format!( + "Initialize query failed to execute: {}", + initialize_query + ))() + .unwrap(); + } + + M::migrate(&connection).expect("Migrations failed"); + + // Disable db writes for normal thread local connection + connection.write = false; + connection + } } impl Clone for ThreadSafeConnection { @@ -74,8 +143,8 @@ impl Clone for ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), - connection: self.connection.clone(), - _pd: PhantomData, + connections: self.connections.clone(), + _migrator: PhantomData, } } } @@ -88,25 +157,7 @@ impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { - self.connection.get_or(|| { - let connection = if self.persistent { - self.open_file() - } else { - self.open_shared_memory() - }; - - if let Some(initialize_query) = self.initialize_query { - connection.exec(initialize_query).expect(&format!( - "Initialize query failed to execute: {}", - initialize_query - ))() - .unwrap(); - } - - M::migrate(&connection).expect("Migrations failed"); - - connection - }) + self.connections.get_or(|| self.create_connection()) } } @@ -151,7 +202,7 @@ mod test { } } - let _ = ThreadSafeConnection::::new(None, false) + let _ = ThreadSafeConnection::::new("wild_zed_lost_failure", false) .with_initialize_query("PRAGMA FOREIGN_KEYS=true") .deref(); } diff --git a/crates/sqlez/src/util.rs b/crates/sqlez/src/util.rs new file mode 100644 index 0000000000..b5366cffc4 --- /dev/null +++ b/crates/sqlez/src/util.rs @@ -0,0 +1,28 @@ +use std::ops::Deref; +use std::sync::mpsc::Sender; + +use parking_lot::Mutex; +use thread_local::ThreadLocal; + +pub struct UnboundedSyncSender { + clonable_sender: Mutex>, + local_senders: ThreadLocal>, +} + +impl UnboundedSyncSender { + pub fn new(sender: Sender) -> Self { + Self { + clonable_sender: Mutex::new(sender), + local_senders: ThreadLocal::new(), + } + } +} + +impl Deref for UnboundedSyncSender { + type Target = Sender; + + fn deref(&self) -> &Self::Target { + self.local_senders + .get_or(|| self.clonable_sender.lock().clone()) + } +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 07bca0c66f..1e9b846f38 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use db::{connection, indoc, sql_method, sqlez::domain::Domain}; @@ -17,7 +17,7 @@ impl Domain for Terminal { &[indoc! {" CREATE TABLE terminals ( workspace_id INTEGER, - item_id INTEGER, + item_id INTEGER UNIQUE, working_directory BLOB, PRIMARY KEY(workspace_id, item_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) @@ -29,21 +29,35 @@ impl Domain for Terminal { impl TerminalDb { sql_method! { - save_working_directory(item_id: ItemId, - workspace_id: WorkspaceId, - working_directory: &Path) -> Result<()>: - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) + async update_workspace_id( + new_id: WorkspaceId, + old_id: WorkspaceId, + item_id: ItemId + ) -> Result<()>: + indoc! {" + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? "} } + sql_method! { + async save_working_directory( + item_id: ItemId, + workspace_id: WorkspaceId, + working_directory: PathBuf) -> Result<()>: + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} + } + sql_method! { get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: - indoc!{" - SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ? - "} + indoc!{" + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? + "} } } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index b5192b6876..0cbb6d36b1 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -57,7 +57,8 @@ use gpui::{ geometry::vector::{vec2f, Vector2F}, keymap::Keystroke, scene::{MouseDown, MouseDrag, MouseScrollWheel, MouseUp}, - ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, MutableAppContext, Task, + AppContext, ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, + MutableAppContext, Task, }; use crate::mappings::{ @@ -585,7 +586,8 @@ impl Terminal { cx.background() .spawn(async move { TERMINAL_CONNECTION - .save_working_directory(item_id, workspace_id, cwd.as_path()) + .save_working_directory(item_id, workspace_id, cwd) + .await .log_err(); }) .detach(); @@ -1192,6 +1194,21 @@ impl Terminal { } } + pub fn set_workspace_id(&mut self, id: WorkspaceId, cx: &AppContext) { + let old_workspace_id = self.workspace_id; + let item_id = self.item_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .update_workspace_id(id, old_workspace_id, item_id) + .await + .log_err() + }) + .detach(); + + self.workspace_id = id; + } + pub fn find_matches( &mut self, query: project::search::SearchQuery, diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index fdda388642..a6c28d4baf 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -400,6 +400,14 @@ impl Item for TerminalContainer { ) }))) } + + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + if let Some(connected) = self.connected() { + let id = workspace.database_id(); + let terminal_handle = connected.read(cx).terminal().clone(); + terminal_handle.update(cx, |terminal, cx| terminal.set_workspace_id(id, cx)) + } + } } impl SearchableItem for TerminalContainer { diff --git a/crates/terminal/src/tests/terminal_test_context.rs b/crates/terminal/src/tests/terminal_test_context.rs index 352ce4a0d2..67ebb55805 100644 --- a/crates/terminal/src/tests/terminal_test_context.rs +++ b/crates/terminal/src/tests/terminal_test_context.rs @@ -31,6 +31,7 @@ impl<'a> TerminalTestContext<'a> { let (_, workspace) = self.cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 68c08f2f7a..e0d972896f 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -44,6 +44,7 @@ impl<'a> VimTestContext<'a> { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index fb28571172..0879166bbe 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -575,7 +575,7 @@ mod tests { cx.update(|cx| init(cx)); let project = Project::test(fs, [], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, default_item_factory, cx) + Workspace::new(Default::default(), 0, project, default_item_factory, cx) }); workspace.update(cx, |workspace, cx| { diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index b990ba20a2..e44e7ca09d 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -119,6 +119,8 @@ pub trait Item: View { None } + fn added_to_workspace(&mut self, _workspace: &mut Workspace, _cx: &mut ViewContext) {} + fn serialized_item_kind() -> Option<&'static str>; fn deserialize( @@ -267,7 +269,10 @@ impl ItemHandle for ViewHandle { cx: &mut ViewContext, ) { let history = pane.read(cx).nav_history_for_item(self); - self.update(cx, |this, cx| this.set_nav_history(history, cx)); + self.update(cx, |this, cx| { + this.set_nav_history(history, cx); + this.added_to_workspace(workspace, cx); + }); if let Some(followed_item) = self.to_followable_item_handle(cx) { if let Some(message) = followed_item.to_state_proto(cx) { @@ -426,6 +431,10 @@ impl ItemHandle for ViewHandle { }) .detach(); } + + cx.defer(|workspace, cx| { + workspace.serialize_workspace(cx); + }); } fn deactivated(&self, cx: &mut MutableAppContext) { diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 5db8d6feec..428865ec3b 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1647,7 +1647,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1737,7 +1737,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1815,7 +1815,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1926,7 +1926,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = - cx.add_window(|cx| Workspace::new(None, project, |_, _| unimplemented!(), cx)); + cx.add_window(|cx| Workspace::new(None, 0, project, |_, _| unimplemented!(), cx)); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); add_labled_item(&workspace, &pane, "A", cx); diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 477e5a4960..66b3622119 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, sql_method}; +use db::{connection, sql_method, sqlez::connection::Connection}; use gpui::Axis; use indoc::indoc; @@ -138,60 +138,71 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace(&self, workspace: &SerializedWorkspace) { - self.with_savepoint("update_worktrees", || { - // Clear out panes and pane_groups - self.exec_bound(indoc! {" - UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; - DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) - .context("Clearing old panes")?; + pub async fn save_workspace(&self, workspace: SerializedWorkspace) { + self.write(move |conn| { + conn.with_savepoint("update_worktrees", || { + // Clear out panes and pane_groups + conn.exec_bound(indoc! {" + UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; + DELETE FROM pane_groups WHERE workspace_id = ?1; + DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + .context("Clearing old panes")?; - self.exec_bound(indoc! {" - DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? - "})?((&workspace.location, workspace.id)) - .context("clearing out old locations")?; - - // Upsert - self.exec_bound(indoc! { - "INSERT INTO - workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) - VALUES - (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) - ON CONFLICT DO UPDATE SET - workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" - })?((workspace.id, &workspace.location, workspace.dock_position)) - .context("Updating workspace")?; + conn.exec_bound(indoc! {" + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?( + ( + &workspace.location, + workspace.id.clone(), + ) + ) + .context("clearing out old locations")?; - // Save center pane group and dock pane - self.save_pane_group(workspace.id, &workspace.center_group, None) - .context("save pane group in save workspace")?; + // Upsert + conn.exec_bound(indoc! {" + INSERT INTO workspaces( + workspace_id, + workspace_location, + dock_visible, + dock_anchor, + timestamp + ) + VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + ON CONFLICT DO + UPDATE SET + workspace_location = ?2, + dock_visible = ?3, + dock_anchor = ?4, + timestamp = CURRENT_TIMESTAMP + "})?(( + workspace.id, + &workspace.location, + workspace.dock_position, + )) + .context("Updating workspace")?; - let dock_id = self - .save_pane(workspace.id, &workspace.dock_pane, None, true) - .context("save pane in save workspace")?; + // Save center pane group and dock pane + Self::save_pane_group(conn, workspace.id, &workspace.center_group, None) + .context("save pane group in save workspace")?; - // Complete workspace initialization - self.exec_bound(indoc! {" - UPDATE workspaces - SET dock_pane = ? - WHERE workspace_id = ?"})?((dock_id, workspace.id)) - .context("Finishing initialization with dock pane")?; + let dock_id = Self::save_pane(conn, workspace.id, &workspace.dock_pane, None, true) + .context("save pane in save workspace")?; - Ok(()) + // Complete workspace initialization + conn.exec_bound(indoc! {" + UPDATE workspaces + SET dock_pane = ? + WHERE workspace_id = ?"})?((dock_id, workspace.id)) + .context("Finishing initialization with dock pane")?; + + Ok(()) + }) + .log_err(); }) - .with_context(|| { - format!( - "Update workspace with roots {:?} and id {:?} failed.", - workspace.location.paths(), - workspace.id - ) - }) - .log_err(); + .await; } - sql_method!{ - next_id() -> Result: + sql_method! { + async next_id() -> Result: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" } @@ -276,7 +287,7 @@ impl WorkspaceDb { } fn save_pane_group( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, @@ -285,7 +296,7 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = self.select_row_bound::<_, i64>(indoc! {" + let group_id = conn.select_row_bound::<_, i64>(indoc! {" INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) RETURNING group_id"})?(( @@ -297,13 +308,13 @@ impl WorkspaceDb { .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((group_id, position)))? + Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))? } Ok(()) } SerializedPaneGroup::Pane(pane) => { - self.save_pane(workspace_id, &pane, parent, false)?; + Self::save_pane(conn, workspace_id, &pane, parent, false)?; Ok(()) } } @@ -325,13 +336,13 @@ impl WorkspaceDb { } fn save_pane( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = self.select_row_bound::<_, i64>(indoc! {" + let pane_id = conn.select_row_bound::<_, i64>(indoc! {" INSERT INTO panes(workspace_id, active) VALUES (?, ?) RETURNING pane_id"})?((workspace_id, pane.active))? @@ -339,13 +350,12 @@ impl WorkspaceDb { if !dock { let (parent_id, order) = unzip_option(parent); - self.exec_bound(indoc! {" + conn.exec_bound(indoc! {" INSERT INTO center_panes(pane_id, parent_group_id, position) VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; } - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items")?; + Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?; Ok(pane_id) } @@ -358,12 +368,12 @@ impl WorkspaceDb { } fn save_items( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut insert = self.exec_bound( + let mut insert = conn.exec_bound( "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { @@ -384,32 +394,44 @@ mod tests { use super::*; - #[test] - fn test_next_id_stability() { + #[gpui::test] + async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + let db = WorkspaceDb(open_memory_db("test_next_id_stability")); - db.migrate( - "test_table", - &["CREATE TABLE test_table( - text TEXT, - workspace_id INTEGER, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ) STRICT;"], - ) - .unwrap(); - - let id = db.next_id().unwrap(); + db.write(|conn| { + conn.migrate( + "test_table", + &[indoc! {" + CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"}], + ) + .unwrap(); + }) + .await; + + let id = db.next_id().await.unwrap(); // Assert the empty row got inserted - assert_eq!(Some(id), db.select_row_bound:: - ("SELECT workspace_id FROM workspaces WHERE workspace_id = ?").unwrap() - (id).unwrap()); - - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-1", id)) - .unwrap(); + assert_eq!( + Some(id), + db.select_row_bound::( + "SELECT workspace_id FROM workspaces WHERE workspace_id = ?" + ) + .unwrap()(id) + .unwrap() + ); + + db.write(move |conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", id)) + .unwrap() + }) + .await; let test_text_1 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") @@ -418,22 +440,27 @@ mod tests { .unwrap(); assert_eq!(test_text_1, "test-text-1"); } - - #[test] - fn test_workspace_id_stability() { + + #[gpui::test] + async fn test_workspace_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + let db = WorkspaceDb(open_memory_db("test_workspace_id_stability")); - db.migrate( - "test_table", - &["CREATE TABLE test_table( - text TEXT, - workspace_id INTEGER, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ) STRICT;"], - ) + db.write(|conn| { + conn.migrate( + "test_table", + &[indoc! {" + CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) + REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"}], + ) + }) + .await .unwrap(); let mut workspace_1 = SerializedWorkspace { @@ -452,27 +479,33 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_1); + db.save_workspace(workspace_1.clone()).await; - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-1", 1)) - .unwrap(); + db.write(|conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", 1)) + .unwrap(); + }) + .await; - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2.clone()).await; - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-2", 2)) - .unwrap(); + db.write(|conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-2", 2)) + .unwrap(); + }) + .await; workspace_1.location = (["/tmp", "/tmp3"]).into(); - db.save_workspace(&workspace_1); - db.save_workspace(&workspace_1); + db.save_workspace(workspace_1.clone()).await; + db.save_workspace(workspace_1).await; workspace_2.dock_pane.children.push(SerializedItem { kind: Arc::from("Test"), item_id: 10, }); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2).await; let test_text_2 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") @@ -489,11 +522,11 @@ mod tests { assert_eq!(test_text_1, "test-text-1"); } - #[test] - fn test_full_workspace_serialization() { + #[gpui::test] + async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -550,24 +583,24 @@ mod tests { dock_pane, }; - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); assert_eq!(workspace, round_trip_workspace.unwrap()); // Test guaranteed duplicate IDs - db.save_workspace(&workspace); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; + db.save_workspace(workspace.clone()).await; let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace, round_trip_workspace.unwrap()); } - #[test] - fn test_workspace_assignment() { + #[gpui::test] + async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); + let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { id: 1, @@ -585,8 +618,8 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_1); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_1.clone()).await; + db.save_workspace(workspace_2.clone()).await; // Test that paths are treated as a set assert_eq!( @@ -605,7 +638,7 @@ mod tests { // Test 'mutate' case of updating a pre-existing id workspace_2.location = (["/tmp", "/tmp2"]).into(); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2.clone()).await; assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 @@ -620,7 +653,7 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_3); + db.save_workspace(workspace_3.clone()).await; assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 @@ -628,7 +661,7 @@ mod tests { // Make sure that updating paths differently also works workspace_3.location = (["/tmp3", "/tmp4", "/tmp2"]).into(); - db.save_workspace(&workspace_3); + db.save_workspace(workspace_3.clone()).await; assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); assert_eq!( db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) @@ -655,11 +688,11 @@ mod tests { } } - #[test] - fn test_basic_dock_pane() { + #[gpui::test] + async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); + let db = WorkspaceDb(open_memory_db("basic_dock_pane")); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -673,18 +706,18 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } - #[test] - fn test_simple_split() { + #[gpui::test] + async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("simple_split"))); + let db = WorkspaceDb(open_memory_db("simple_split")); // ----------------- // | 1,2 | 5,6 | @@ -725,18 +758,18 @@ mod tests { let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); } - #[test] - fn test_cleanup_panes() { + #[gpui::test] + async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); + let db = WorkspaceDb(open_memory_db("test_cleanup_panes")); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, @@ -774,7 +807,7 @@ mod tests { let mut workspace = default_workspace(id, Default::default(), ¢er_pane); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; workspace.center_group = SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, @@ -796,7 +829,7 @@ mod tests { ], }; - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(id).unwrap(); diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 2f0bc050d2..dc6d8ba8ee 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -58,7 +58,7 @@ impl Column for WorkspaceLocation { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct SerializedWorkspace { pub id: WorkspaceId, pub location: WorkspaceLocation, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 155c95e4e8..9755c2c6ca 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -539,6 +539,7 @@ pub struct Workspace { impl Workspace { pub fn new( serialized_workspace: Option, + workspace_id: WorkspaceId, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -558,7 +559,6 @@ impl Workspace { } project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); - // TODO: Cache workspace_id on workspace and read from it here this.serialize_workspace(cx); } project::Event::DisconnectedFromHost => { @@ -633,12 +633,6 @@ impl Workspace { active_call = Some((call, subscriptions)); } - let database_id = serialized_workspace - .as_ref() - .map(|ws| ws.id) - .or_else(|| DB.next_id().log_err()) - .unwrap_or(0); - let mut this = Workspace { modal: None, weak_self: weak_handle.clone(), @@ -666,7 +660,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, - database_id, + database_id: workspace_id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); @@ -699,10 +693,17 @@ impl Workspace { ); cx.spawn(|mut cx| async move { + let serialized_workspace = persistence::DB.workspace_for_roots(&abs_paths.as_slice()); + + let paths_to_open = serialized_workspace + .as_ref() + .map(|workspace| workspace.location.paths()) + .unwrap_or(Arc::new(abs_paths)); + // Get project paths for all of the abs_paths let mut worktree_roots: HashSet> = Default::default(); let mut project_paths = Vec::new(); - for path in abs_paths.iter() { + for path in paths_to_open.iter() { if let Some((worktree, project_entry)) = cx .update(|cx| { Workspace::project_path_for_path(project_handle.clone(), &path, true, cx) @@ -717,14 +718,17 @@ impl Workspace { } } - // Use the resolved worktree roots to get the serialized_db from the database - let serialized_workspace = persistence::DB - .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]); + let workspace_id = if let Some(serialized_workspace) = serialized_workspace.as_ref() { + serialized_workspace.id + } else { + DB.next_id().await.unwrap_or(0) + }; // Use the serialized workspace to construct the new window let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { let mut workspace = Workspace::new( serialized_workspace, + workspace_id, project_handle, app_state.default_item_factory, cx, @@ -735,8 +739,8 @@ impl Workspace { // Call open path for each of the project paths // (this will bring them to the front if they were in the serialized workspace) - debug_assert!(abs_paths.len() == project_paths.len()); - let tasks = abs_paths + debug_assert!(paths_to_open.len() == project_paths.len()); + let tasks = paths_to_open .iter() .cloned() .zip(project_paths.into_iter()) @@ -1327,7 +1331,6 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); - self.serialize_workspace(cx); } pub fn open_path( @@ -1532,10 +1535,11 @@ impl Workspace { entry.remove(); } } - self.serialize_workspace(cx); } _ => {} } + + self.serialize_workspace(cx); } else if self.dock.visible_pane().is_none() { error!("pane {} not found", pane_id); } @@ -2342,9 +2346,7 @@ impl Workspace { }; cx.background() - .spawn(async move { - persistence::DB.save_workspace(&serialized_workspace); - }) + .spawn(persistence::DB.save_workspace(serialized_workspace)) .detach(); } @@ -2642,9 +2644,13 @@ pub fn open_paths( fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { - let (workspace, _) = task.await; + let (workspace, opened_paths) = task.await; - workspace.update(&mut cx, |_, cx| cx.dispatch_action(NewFile)) + workspace.update(&mut cx, |_, cx| { + if opened_paths.is_empty() { + cx.dispatch_action(NewFile); + } + }) }) } @@ -2677,6 +2683,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2748,6 +2755,7 @@ mod tests { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2851,6 +2859,7 @@ mod tests { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2895,8 +2904,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item1 = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -2991,8 +3001,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); // Create several workspace items with single project entries, and two // workspace items with multiple project entries. @@ -3093,8 +3104,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3211,8 +3223,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 0abcbeac48..3693a5e580 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -809,7 +809,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); @@ -930,7 +930,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/dir1".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Open a file within an existing worktree. @@ -1091,7 +1091,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Open a file within an existing worktree. @@ -1135,7 +1135,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let worktree = cx.read(|cx| workspace.read(cx).worktrees(cx).next().unwrap()); @@ -1226,7 +1226,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Create a new untitled buffer @@ -1281,7 +1281,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); @@ -1359,6 +1359,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, @@ -1630,6 +1631,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, From 359b8aaf47573473cce3334d7af48d01eac972df Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 24 Nov 2022 00:02:07 -0800 Subject: [PATCH 176/240] rename sql_method to query and adjust the syntax to more closely match function definitions --- crates/auto_update/src/auto_update.rs | 11 +- crates/client/src/telemetry.rs | 4 +- crates/db/src/db.rs | 152 +++++++++++++++++--------- crates/db/src/kvp.rs | 76 +++++++------ crates/editor/src/persistence.rs | 24 ++-- crates/terminal/src/persistence.rs | 42 +++---- crates/workspace/src/persistence.rs | 7 +- 7 files changed, 192 insertions(+), 124 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 2a8d2fcf05..d3fcc36c2f 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -297,9 +297,16 @@ impl AutoUpdater { ) -> Task> { cx.background().spawn(async move { if should_show { - KEY_VALUE_STORE.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; + KEY_VALUE_STORE + .write_kvp( + SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string(), + "".to_string(), + ) + .await?; } else { - KEY_VALUE_STORE.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; + KEY_VALUE_STORE + .delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string()) + .await?; } Ok(()) }) diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 0ce1a07f1b..a81f33c604 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -157,7 +157,9 @@ impl Telemetry { device_id } else { let device_id = Uuid::new_v4().to_string(); - KEY_VALUE_STORE.write_kvp("device_id", &device_id)?; + KEY_VALUE_STORE + .write_kvp("device_id".to_string(), device_id.clone()) + .await?; device_id }; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index b42b264b56..1da51ef867 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -75,45 +75,59 @@ macro_rules! connection { } #[macro_export] -macro_rules! sql_method { - ($id:ident() -> Result<()>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result<()> { +macro_rules! query { + ($vis:vis fn $id:ident() -> Result<()> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec($sql)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result<()>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result<()> { + ($vis:vis async fn $id:ident() -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(|connection| { connection.exec($sql)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + connection.exec_bound::<$arg_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + )) + }).await + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(move |connection| { @@ -121,24 +135,24 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select::<$return_type>($sql)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result>: $sql:expr) => { + ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; @@ -147,25 +161,25 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { @@ -173,25 +187,25 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { @@ -199,57 +213,70 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + )) + + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?() + self.select_row::<$return_type>(indoc! { $sql })?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { @@ -257,35 +284,52 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $sql:expr }) => { + pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + $sql, + )) + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { @@ -293,12 +337,12 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index dd82c17615..1763ed964c 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,16 +1,26 @@ -use anyhow::Result; use indoc::indoc; use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; -use std::ops::Deref; -lazy_static::lazy_static! { - pub static ref KEY_VALUE_STORE: KeyValueStore = - KeyValueStore(crate::open_file_db()); +use crate::{open_file_db, open_memory_db, query}; + +pub struct KeyValueStore(ThreadSafeConnection); + +impl std::ops::Deref for KeyValueStore { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } } -#[derive(Clone)] -pub struct KeyValueStore(ThreadSafeConnection); +lazy_static::lazy_static! { + pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { + open_memory_db(stringify!($id)) + } else { + open_file_db() + }); +} impl Domain for KeyValueStore { fn name() -> &'static str { @@ -27,56 +37,52 @@ impl Domain for KeyValueStore { } } -impl Deref for KeyValueStore { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - impl KeyValueStore { - pub fn read_kvp(&self, key: &str) -> Result> { - self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) + query! { + pub fn read_kvp(key: &str) -> Result> { + "SELECT value FROM kv_store WHERE key = (?)" + } } - pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.exec_bound("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?(( - key, value, - ))?; - - Ok(()) + query! { + pub async fn write_kvp(key: String, value: String) -> Result<()> { + "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))" + } } - pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.exec_bound("DELETE FROM kv_store WHERE key = (?)")?(key) + query! { + pub async fn delete_kvp(key: String) -> Result<()> { + "DELETE FROM kv_store WHERE key = (?)" + } } } #[cfg(test)] mod tests { - use anyhow::Result; - use crate::kvp::KeyValueStore; - #[test] - fn test_kvp() -> Result<()> { + #[gpui::test] + async fn test_kvp() { let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); - db.write_kvp("key-1", "one").unwrap(); + db.write_kvp("key-1".to_string(), "one".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string())); - db.write_kvp("key-1", "one-2").unwrap(); + db.write_kvp("key-1".to_string(), "one-2".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string())); - db.write_kvp("key-2", "two").unwrap(); + db.write_kvp("key-2".to_string(), "two".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string())); - db.delete_kvp("key-1").unwrap(); + db.delete_kvp("key-1".to_string()).await.unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), None); - - Ok(()) } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index b2f76294aa..548be88c80 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, sql_method}; +use db::{connection, query}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -31,17 +31,21 @@ impl Domain for Editor { } impl EditorDb { - sql_method! { - get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result: - indoc! {" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"} + query! { + pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { + indoc!{" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ? + "} + } } - sql_method! { - async save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()>: - indoc! {" + query! { + pub async fn save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()> { + indoc!{" INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)"} + VALUES (?, ?, ?) + "} + } } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 1e9b846f38..5fb7758bec 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, indoc, sql_method, sqlez::domain::Domain}; +use db::{connection, indoc, query, sqlez::domain::Domain}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -28,36 +28,40 @@ impl Domain for Terminal { } impl TerminalDb { - sql_method! { - async update_workspace_id( + query! { + pub async fn update_workspace_id( new_id: WorkspaceId, old_id: WorkspaceId, item_id: ItemId - ) -> Result<()>: - indoc! {" - UPDATE terminals - SET workspace_id = ? - WHERE workspace_id = ? AND item_id = ? - "} + ) -> Result<()> { + indoc!{" + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? + "} + } } - sql_method! { - async save_working_directory( + query! { + pub async fn save_working_directory( item_id: ItemId, workspace_id: WorkspaceId, - working_directory: PathBuf) -> Result<()>: - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) - "} + working_directory: PathBuf + ) -> Result<()> { + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} + } } - sql_method! { - get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + query! { + pub fn get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { indoc!{" SELECT working_directory - FROM terminals + FROM terminals WHERE item_id = ? AND workspace_id = ? "} + } } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 66b3622119..17b0aad13f 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, sql_method, sqlez::connection::Connection}; +use db::{connection, query, sqlez::connection::Connection}; use gpui::Axis; use indoc::indoc; @@ -201,9 +201,10 @@ impl WorkspaceDb { .await; } - sql_method! { - async next_id() -> Result: + query! { + pub async fn next_id() -> Result { "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + } } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots From 260164a711a0e36dbf3962a7d2e94c5542fa6df6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 28 Nov 2022 12:26:13 -0800 Subject: [PATCH 177/240] Added basic syntax checker to sqlez --- crates/sqlez/src/statement.rs | 73 +++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 86035f5d0a..f3970827f8 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -489,3 +489,76 @@ mod test { ); } } + +mod syntax_check { + use std::{ + ffi::{CStr, CString}, + ptr, + }; + + use libsqlite3_sys::{ + sqlite3_close, sqlite3_errmsg, sqlite3_error_offset, sqlite3_extended_errcode, + sqlite3_extended_result_codes, sqlite3_finalize, sqlite3_open_v2, sqlite3_prepare_v2, + sqlite3_stmt, SQLITE_OPEN_CREATE, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_READWRITE, + }; + + fn syntax_errors(sql: &str) -> Option<(String, i32)> { + let mut sqlite3 = 0 as *mut _; + let mut raw_statement = 0 as *mut sqlite3_stmt; + + let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; + unsafe { + let memory_str = CString::new(":memory:").unwrap(); + sqlite3_open_v2(memory_str.as_ptr(), &mut sqlite3, flags, 0 as *const _); + + let sql = CString::new(sql).unwrap(); + + // Turn on extended error codes + sqlite3_extended_result_codes(sqlite3, 1); + + sqlite3_prepare_v2( + sqlite3, + sql.as_c_str().as_ptr(), + -1, + &mut raw_statement, + &mut ptr::null(), + ); + + let res = sqlite3_extended_errcode(sqlite3); + let offset = sqlite3_error_offset(sqlite3); + + if res == 1 && offset != -1 { + let message = sqlite3_errmsg(sqlite3); + let err_msg = + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(); + + sqlite3_finalize(*&mut raw_statement); + sqlite3_close(sqlite3); + + return Some((err_msg, offset)); + } else { + sqlite3_finalize(*&mut raw_statement); + sqlite3_close(sqlite3); + + None + } + } + } + + #[cfg(test)] + mod test { + use super::syntax_errors; + + #[test] + fn test_check_syntax() { + assert!(syntax_errors("SELECT FROM").is_some()); + + assert!(syntax_errors("SELECT col FROM table_t;").is_none()); + + assert!(syntax_errors("CREATE TABLE t(col TEXT,) STRICT;").is_some()); + + assert!(syntax_errors("CREATE TABLE t(col TEXT) STRICT;").is_none()); + } + } +} From dd9d20be25094aeff536f6149eea03566618e6eb Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Mon, 28 Nov 2022 17:42:18 -0800 Subject: [PATCH 178/240] Added sql! proc macro which checks syntax errors on sql code and displays them with reasonable underline locations Co-Authored-By: Mikayla Maki --- Cargo.lock | 12 ++ Cargo.toml | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 163 +++++++++++++-------- crates/db/src/kvp.rs | 21 ++- crates/editor/src/persistence.rs | 21 +-- crates/gpui_macros/Cargo.toml | 1 + crates/sqlez/src/connection.rs | 63 +++++++- crates/sqlez/src/domain.rs | 6 + crates/sqlez/src/statement.rs | 73 --------- crates/sqlez/src/thread_safe_connection.rs | 2 +- crates/sqlez_macros/Cargo.toml | 16 ++ crates/sqlez_macros/src/sqlez_macros.rs | 78 ++++++++++ crates/terminal/src/persistence.rs | 28 ++-- crates/workspace/src/persistence.rs | 66 ++++----- 15 files changed, 342 insertions(+), 211 deletions(-) create mode 100644 crates/sqlez_macros/Cargo.toml create mode 100644 crates/sqlez_macros/src/sqlez_macros.rs diff --git a/Cargo.lock b/Cargo.lock index 150149c529..9e3181575f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1570,6 +1570,7 @@ dependencies = [ "parking_lot 0.11.2", "serde", "sqlez", + "sqlez_macros", "tempdir", "util", ] @@ -5598,6 +5599,17 @@ dependencies = [ "thread_local", ] +[[package]] +name = "sqlez_macros" +version = "0.1.0" +dependencies = [ + "lazy_static", + "proc-macro2", + "quote", + "sqlez", + "syn", +] + [[package]] name = "sqlformat" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index a97f272e47..c4f54d6a90 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,8 @@ members = [ "crates/search", "crates/settings", "crates/snippet", + "crates/sqlez", + "crates/sqlez_macros", "crates/sum_tree", "crates/terminal", "crates/text", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 70721c310c..2d88d4ece5 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -14,6 +14,7 @@ test-support = [] collections = { path = "../collections" } gpui = { path = "../gpui" } sqlez = { path = "../sqlez" } +sqlez_macros = { path = "../sqlez_macros" } util = { path = "../util" } anyhow = "1.0.57" indoc = "1.0.4" diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 1da51ef867..adf6f5c035 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,6 +5,7 @@ pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; +pub use sqlez_macros; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -76,273 +77,315 @@ macro_rules! connection { #[macro_export] macro_rules! query { - ($vis:vis fn $id:ident() -> Result<()> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.exec($sql)?().context(::std::format!( + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec(sql_stmt)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt, )) } }; - ($vis:vis async fn $id:ident() -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; + self.write(|connection| { - connection.exec($sql)?().context(::std::format!( + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec(sql_stmt)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; + self.write(move |connection| { - connection.exec_bound::<$arg_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<$arg_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(move |connection| { - connection.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select::<$return_type>($sql)?(()) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select::<$return_type>(sql_stmt)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select::<$return_type>($sql)?(()) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select::<$return_type>(sql_stmt)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + self.select_row::<$return_type>(indoc! { $sql })?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; + self.write(|connection| { - connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 1763ed964c..b3f2a716cb 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,6 +1,5 @@ -use indoc::indoc; - use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; +use sqlez_macros::sql; use crate::{open_file_db, open_memory_db, query}; @@ -28,31 +27,31 @@ impl Domain for KeyValueStore { } fn migrations() -> &'static [&'static str] { - &[indoc! {" - CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - "}] + &[sql!( + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + )] } } impl KeyValueStore { query! { pub fn read_kvp(key: &str) -> Result> { - "SELECT value FROM kv_store WHERE key = (?)" + SELECT value FROM kv_store WHERE key = (?) } } query! { pub async fn write_kvp(key: String, value: String) -> Result<()> { - "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))" + INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?)) } } query! { pub async fn delete_kvp(key: String) -> Result<()> { - "DELETE FROM kv_store WHERE key = (?)" + DELETE FROM kv_store WHERE key = (?) } } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 548be88c80..22b0f158c1 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,12 +1,11 @@ use std::path::PathBuf; +use crate::Editor; +use db::sqlez_macros::sql; use db::{connection, query}; -use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; -use crate::Editor; - connection!(DB: EditorDb<(Workspace, Editor)>); impl Domain for Editor { @@ -15,7 +14,7 @@ impl Domain for Editor { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql! ( CREATE TABLE editors( item_id INTEGER NOT NULL, workspace_id INTEGER NOT NULL, @@ -26,26 +25,22 @@ impl Domain for Editor { ON UPDATE CASCADE ) STRICT; - "}] + )] } } impl EditorDb { query! { pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { - indoc!{" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ? - "} + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ? } } query! { pub async fn save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()> { - indoc!{" - INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?) - "} + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?) } } } diff --git a/crates/gpui_macros/Cargo.toml b/crates/gpui_macros/Cargo.toml index d8fc0521cc..e35e0b1d2b 100644 --- a/crates/gpui_macros/Cargo.toml +++ b/crates/gpui_macros/Cargo.toml @@ -12,3 +12,4 @@ doctest = false syn = "1.0" quote = "1.0" proc-macro2 = "1.0" + diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 4beddb4fed..6d859be23f 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -2,6 +2,7 @@ use std::{ ffi::{CStr, CString}, marker::PhantomData, path::Path, + ptr, }; use anyhow::{anyhow, Result}; @@ -85,6 +86,45 @@ impl Connection { self.backup_main(&destination) } + pub fn sql_has_syntax_error(&self, sql: &str) -> Option<(String, usize)> { + let sql = CString::new(sql).unwrap(); + let mut remaining_sql = sql.as_c_str(); + let sql_start = remaining_sql.as_ptr(); + + unsafe { + while { + let remaining_sql_str = remaining_sql.to_str().unwrap().trim(); + remaining_sql_str != ";" && !remaining_sql_str.is_empty() + } { + let mut raw_statement = 0 as *mut sqlite3_stmt; + let mut remaining_sql_ptr = ptr::null(); + sqlite3_prepare_v2( + self.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ); + + let res = sqlite3_errcode(self.sqlite3); + let offset = sqlite3_error_offset(self.sqlite3); + + if res == 1 && offset >= 0 { + let message = sqlite3_errmsg(self.sqlite3); + let err_msg = + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(); + let sub_statement_correction = + remaining_sql.as_ptr() as usize - sql_start as usize; + + return Some((err_msg, offset as usize + sub_statement_correction)); + } + remaining_sql = CStr::from_ptr(remaining_sql_ptr); + } + } + None + } + pub(crate) fn last_error(&self) -> Result<()> { unsafe { let code = sqlite3_errcode(self.sqlite3); @@ -259,10 +299,31 @@ mod test { assert_eq!( connection - .select_row::("SELECt * FROM test") + .select_row::("SELECT * FROM test") .unwrap()() .unwrap(), Some(2) ); } + + #[test] + fn test_sql_has_syntax_errors() { + let connection = Connection::open_memory(Some("test_sql_has_syntax_errors")); + let first_stmt = + "CREATE TABLE kv_store(key TEXT PRIMARY KEY, value TEXT NOT NULL) STRICT ;"; + let second_stmt = "SELECT FROM"; + + let second_offset = connection.sql_has_syntax_error(second_stmt).unwrap().1; + + let res = connection + .sql_has_syntax_error(&format!("{}\n{}", first_stmt, second_stmt)) + .map(|(_, offset)| offset); + + assert_eq!( + res, + Some(first_stmt.len() + second_offset + 1) // TODO: This value is wrong! + ); + + panic!("{:?}", res) + } } diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index b7cfbaef88..3a477b2bc9 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -9,6 +9,12 @@ pub trait Migrator { fn migrate(connection: &Connection) -> anyhow::Result<()>; } +impl Migrator for () { + fn migrate(_connection: &Connection) -> anyhow::Result<()> { + Ok(()) // Do nothing + } +} + impl Migrator for D { fn migrate(connection: &Connection) -> anyhow::Result<()> { connection.migrate(Self::name(), Self::migrations()) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f3970827f8..86035f5d0a 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -489,76 +489,3 @@ mod test { ); } } - -mod syntax_check { - use std::{ - ffi::{CStr, CString}, - ptr, - }; - - use libsqlite3_sys::{ - sqlite3_close, sqlite3_errmsg, sqlite3_error_offset, sqlite3_extended_errcode, - sqlite3_extended_result_codes, sqlite3_finalize, sqlite3_open_v2, sqlite3_prepare_v2, - sqlite3_stmt, SQLITE_OPEN_CREATE, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_READWRITE, - }; - - fn syntax_errors(sql: &str) -> Option<(String, i32)> { - let mut sqlite3 = 0 as *mut _; - let mut raw_statement = 0 as *mut sqlite3_stmt; - - let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; - unsafe { - let memory_str = CString::new(":memory:").unwrap(); - sqlite3_open_v2(memory_str.as_ptr(), &mut sqlite3, flags, 0 as *const _); - - let sql = CString::new(sql).unwrap(); - - // Turn on extended error codes - sqlite3_extended_result_codes(sqlite3, 1); - - sqlite3_prepare_v2( - sqlite3, - sql.as_c_str().as_ptr(), - -1, - &mut raw_statement, - &mut ptr::null(), - ); - - let res = sqlite3_extended_errcode(sqlite3); - let offset = sqlite3_error_offset(sqlite3); - - if res == 1 && offset != -1 { - let message = sqlite3_errmsg(sqlite3); - let err_msg = - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(); - - sqlite3_finalize(*&mut raw_statement); - sqlite3_close(sqlite3); - - return Some((err_msg, offset)); - } else { - sqlite3_finalize(*&mut raw_statement); - sqlite3_close(sqlite3); - - None - } - } - } - - #[cfg(test)] - mod test { - use super::syntax_errors; - - #[test] - fn test_check_syntax() { - assert!(syntax_errors("SELECT FROM").is_some()); - - assert!(syntax_errors("SELECT col FROM table_t;").is_none()); - - assert!(syntax_errors("CREATE TABLE t(col TEXT,) STRICT;").is_some()); - - assert!(syntax_errors("CREATE TABLE t(col TEXT) STRICT;").is_none()); - } - } -} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 5402c6b5e1..88199ff0c8 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -17,7 +17,7 @@ lazy_static! { Default::default(); } -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, diff --git a/crates/sqlez_macros/Cargo.toml b/crates/sqlez_macros/Cargo.toml new file mode 100644 index 0000000000..413a3d30f5 --- /dev/null +++ b/crates/sqlez_macros/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "sqlez_macros" +version = "0.1.0" +edition = "2021" + +[lib] +path = "src/sqlez_macros.rs" +proc-macro = true +doctest = false + +[dependencies] +syn = "1.0" +quote = "1.0" +proc-macro2 = "1.0" +lazy_static = "1.4" +sqlez = { path = "../sqlez" } \ No newline at end of file diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs new file mode 100644 index 0000000000..25249b89b6 --- /dev/null +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -0,0 +1,78 @@ +use proc_macro::{Delimiter, Span, TokenStream, TokenTree}; +use sqlez::thread_safe_connection::ThreadSafeConnection; +use syn::Error; + +lazy_static::lazy_static! { + static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false); +} + +#[proc_macro] +pub fn sql(tokens: TokenStream) -> TokenStream { + let mut sql_tokens = vec![]; + flatten_stream(tokens.clone(), &mut sql_tokens); + + // Lookup of spans by offset at the end of the token + let mut spans: Vec<(usize, Span)> = Vec::new(); + let mut sql = String::new(); + for (token_text, span) in sql_tokens { + sql.push_str(&token_text); + spans.push((sql.len(), span)); + } + + let error = SQLITE.sql_has_syntax_error(sql.trim()); + + if let Some((error, error_offset)) = error { + let error_span = spans + .into_iter() + .skip_while(|(offset, _)| offset <= &error_offset) + .map(|(_, span)| span) + .next() + .unwrap_or(Span::call_site()); + + let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql); + TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) + } else { + format!("r#\"{}\"#", &sql).parse().unwrap() + } +} + +/// This method exists to normalize the representation of groups +/// to always include spaces between tokens. This is why we don't use the usual .to_string(). +/// This allows our token search in token_at_offset to resolve +/// ambiguity of '(tokens)' vs. '( token )', due to sqlite requiring byte offsets +fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) { + for token_tree in tokens.into_iter() { + match token_tree { + TokenTree::Group(group) => { + // push open delimiter + result.push((open_delimiter(group.delimiter()), group.span())); + // recurse + flatten_stream(group.stream(), result); + // push close delimiter + result.push((close_delimiter(group.delimiter()), group.span())); + } + TokenTree::Ident(ident) => { + result.push((format!("{} ", ident.to_string()), ident.span())); + } + leaf_tree => result.push((leaf_tree.to_string(), leaf_tree.span())), + } + } +} + +fn open_delimiter(delimiter: Delimiter) -> String { + match delimiter { + Delimiter::Parenthesis => "(".to_string(), + Delimiter::Brace => "[".to_string(), + Delimiter::Bracket => "{".to_string(), + Delimiter::None => "".to_string(), + } +} + +fn close_delimiter(delimiter: Delimiter) -> String { + match delimiter { + Delimiter::Parenthesis => ")".to_string(), + Delimiter::Brace => "]".to_string(), + Delimiter::Bracket => "}".to_string(), + Delimiter::None => "".to_string(), + } +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 5fb7758bec..f9cfb6fc01 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, indoc, query, sqlez::domain::Domain}; +use db::{connection, query, sqlez::domain::Domain, sqlez_macros::sql}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -14,7 +14,7 @@ impl Domain for Terminal { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, item_id INTEGER UNIQUE, @@ -23,7 +23,7 @@ impl Domain for Terminal { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; - "}] + )] } } @@ -34,11 +34,9 @@ impl TerminalDb { old_id: WorkspaceId, item_id: ItemId ) -> Result<()> { - indoc!{" - UPDATE terminals - SET workspace_id = ? - WHERE workspace_id = ? AND item_id = ? - "} + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? } } @@ -48,20 +46,16 @@ impl TerminalDb { workspace_id: WorkspaceId, working_directory: PathBuf ) -> Result<()> { - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) - "} + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?) } } query! { pub fn get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { - indoc!{" - SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ? - "} + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? } } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 17b0aad13f..0d35c19d5d 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, query, sqlez::connection::Connection}; +use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; use indoc::indoc; @@ -30,49 +30,49 @@ impl Domain for Workspace { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql!( CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, workspace_location BLOB UNIQUE, - dock_visible INTEGER, -- Boolean - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + dock_visible INTEGER, // Boolean + dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE + parent_group_id INTEGER, // NULL indicates that this is a root node + position INTEGER, // NULL indicates that this is a root node + axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - active INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE + active INTEGER NOT NULL, // Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, - parent_group_id INTEGER, -- NULL means that this is a root pane - position INTEGER, -- NULL means that this is a root pane - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + parent_group_id INTEGER, // NULL means that this is a root pane + position INTEGER, // NULL means that this is a root pane + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, pane_id INTEGER NOT NULL, kind TEXT NOT NULL, @@ -84,7 +84,7 @@ impl Domain for Workspace { ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; - "}] + )] } } @@ -158,26 +158,22 @@ impl WorkspaceDb { .context("clearing out old locations")?; // Upsert - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( INSERT INTO workspaces( - workspace_id, - workspace_location, - dock_visible, - dock_anchor, + workspace_id, + workspace_location, + dock_visible, + dock_anchor, timestamp - ) + ) VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) ON CONFLICT DO - UPDATE SET + UPDATE SET workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP - "})?(( - workspace.id, - &workspace.location, - workspace.dock_position, - )) + ))?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -203,7 +199,7 @@ impl WorkspaceDb { query! { pub async fn next_id() -> Result { - "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id } } From 9cd6894dc56ab414912009d487a81ce6e89e7fbc Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 29 Nov 2022 12:16:49 -0800 Subject: [PATCH 179/240] Added multi-threading problem test --- crates/sqlez/src/connection.rs | 11 ++---- crates/sqlez/src/thread_safe_connection.rs | 43 +++++++++++++++++++++- test.rs | 0 3 files changed, 45 insertions(+), 9 deletions(-) delete mode 100644 test.rs diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 6d859be23f..0456266594 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -108,9 +108,11 @@ impl Connection { let res = sqlite3_errcode(self.sqlite3); let offset = sqlite3_error_offset(self.sqlite3); + let message = sqlite3_errmsg(self.sqlite3); + + sqlite3_finalize(raw_statement); if res == 1 && offset >= 0 { - let message = sqlite3_errmsg(self.sqlite3); let err_msg = String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) .into_owned(); @@ -319,11 +321,6 @@ mod test { .sql_has_syntax_error(&format!("{}\n{}", first_stmt, second_stmt)) .map(|(_, offset)| offset); - assert_eq!( - res, - Some(first_stmt.len() + second_offset + 1) // TODO: This value is wrong! - ); - - panic!("{:?}", res) + assert_eq!(res, Some(first_stmt.len() + second_offset + 1)); } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 88199ff0c8..6c35d1e945 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -126,7 +126,7 @@ impl ThreadSafeConnection { "Initialize query failed to execute: {}", initialize_query ))() - .unwrap(); + .unwrap() } M::migrate(&connection).expect("Migrations failed"); @@ -163,12 +163,51 @@ impl Deref for ThreadSafeConnection { #[cfg(test)] mod test { - use std::ops::Deref; + use std::{fs, ops::Deref, thread}; use crate::domain::Domain; use super::ThreadSafeConnection; + #[test] + fn many_initialize_and_migrate_queries_at_once() { + let mut handles = vec![]; + + enum TestDomain {} + impl Domain for TestDomain { + fn name() -> &'static str { + "test" + } + fn migrations() -> &'static [&'static str] { + &["CREATE TABLE test(col1 TEXT, col2 TEXT) STRICT;"] + } + } + + for _ in 0..100 { + handles.push(thread::spawn(|| { + let _ = ThreadSafeConnection::::new("annoying-test.db", false) + .with_initialize_query( + " + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + ", + ) + .deref(); + })); + } + + for handle in handles { + let _ = handle.join(); + } + + // fs::remove_file("annoying-test.db").unwrap(); + // fs::remove_file("annoying-test.db-shm").unwrap(); + // fs::remove_file("annoying-test.db-wal").unwrap(); + } + #[test] #[should_panic] fn wild_zed_lost_failure() { diff --git a/test.rs b/test.rs deleted file mode 100644 index e69de29bb2..0000000000 From a29ccb4ff83cd764182caebd092c81cdbc729499 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 10:54:01 -0800 Subject: [PATCH 180/240] make thread safe connection more thread safe Co-Authored-By: Mikayla Maki --- Cargo.lock | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 32 ++- crates/db/src/kvp.rs | 6 +- crates/sqlez/Cargo.toml | 1 + crates/sqlez/src/migrations.rs | 6 +- crates/sqlez/src/thread_safe_connection.rs | 230 +++++++++++++-------- crates/sqlez/src/util.rs | 4 + crates/sqlez_macros/src/sqlez_macros.rs | 2 +- crates/workspace/src/persistence.rs | 14 +- crates/workspace/src/workspace.rs | 17 +- crates/zed/src/zed.rs | 5 +- 12 files changed, 196 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e3181575f..fd1bb4ea0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1569,6 +1569,7 @@ dependencies = [ "log", "parking_lot 0.11.2", "serde", + "smol", "sqlez", "sqlez_macros", "tempdir", @@ -5596,6 +5597,7 @@ dependencies = [ "lazy_static", "libsqlite3-sys", "parking_lot 0.11.2", + "smol", "thread_local", ] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 2d88d4ece5..69c90e02f9 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -23,6 +23,7 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } +smol = "1.2" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index adf6f5c035..701aa57656 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,31 +4,36 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; +pub use smol; pub use sqlez; pub use sqlez_macros; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; -const INITIALIZE_QUERY: &'static str = indoc! {" - PRAGMA journal_mode=WAL; +const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; -"}; +); + +const DB_INITIALIZE_QUERY: &'static str = sql!( + PRAGMA journal_mode=WAL; +); lazy_static::lazy_static! { static ref DB_WIPED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. -pub fn open_file_db() -> ThreadSafeConnection { +pub async fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); @@ -43,12 +48,19 @@ pub fn open_file_db() -> ThreadSafeConnection { create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(INITIALIZE_QUERY) + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await } -pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { - ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) +pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection { + ThreadSafeConnection::::builder(db_name, false) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await } /// Implements a basic DB wrapper for a given domain @@ -67,9 +79,9 @@ macro_rules! connection { ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - ::db::open_memory_db(stringify!($id)) + $crate::smol::block_on(::db::open_memory_db(stringify!($id))) } else { - ::db::open_file_db() + $crate::smol::block_on(::db::open_file_db()) }); } }; diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index b3f2a716cb..da796fa469 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -15,9 +15,9 @@ impl std::ops::Deref for KeyValueStore { lazy_static::lazy_static! { pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { - open_memory_db(stringify!($id)) + smol::block_on(open_memory_db("KEY_VALUE_STORE")) } else { - open_file_db() + smol::block_on(open_file_db()) }); } @@ -62,7 +62,7 @@ mod tests { #[gpui::test] async fn test_kvp() { - let db = KeyValueStore(crate::open_memory_db("test_kvp")); + let db = KeyValueStore(crate::open_memory_db("test_kvp").await); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index cab1af7d6c..8a7f1ba415 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" anyhow = { version = "1.0.38", features = ["backtrace"] } indoc = "1.0.7" libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } +smol = "1.2" thread_local = "1.1.4" lazy_static = "1.4" parking_lot = "0.11.1" diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 6c0aafaf20..41c505f85b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -15,9 +15,9 @@ impl Connection { // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( - domain TEXT, - step INTEGER, - migration TEXT + domain TEXT, + step INTEGER, + migration TEXT )"})?()?; let completed_migrations = diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 6c35d1e945..880a58d194 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,4 +1,4 @@ -use futures::{Future, FutureExt}; +use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; use parking_lot::RwLock; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; @@ -10,17 +10,25 @@ use crate::{ util::UnboundedSyncSender, }; -type QueuedWrite = Box; +const MIGRATION_RETRIES: usize = 10; +type QueuedWrite = Box; lazy_static! { + /// List of queues of tasks by database uri. This lets us serialize writes to the database + /// and have a single worker thread per db file. This means many thread safe connections + /// (possibly with different migrations) could all be communicating with the same background + /// thread. static ref QUEUES: RwLock, UnboundedSyncSender>> = Default::default(); } +/// Thread safe connection to a given database file or in memory db. This can be cloned, shared, static, +/// whatever. It derefs to a synchronous connection by thread that is read only. A write capable connection +/// may be accessed by passing a callback to the `write` function which will queue the callback pub struct ThreadSafeConnection { uri: Arc, persistent: bool, - initialize_query: Option<&'static str>, + connection_initialize_query: Option<&'static str>, connections: Arc>, _migrator: PhantomData, } @@ -28,87 +36,125 @@ pub struct ThreadSafeConnection { unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} -impl ThreadSafeConnection { - pub fn new(uri: &str, persistent: bool) -> Self { - Self { - uri: Arc::from(uri), - persistent, - initialize_query: None, - connections: Default::default(), - _migrator: PhantomData, - } +pub struct ThreadSafeConnectionBuilder { + db_initialize_query: Option<&'static str>, + connection: ThreadSafeConnection, +} + +impl ThreadSafeConnectionBuilder { + /// Sets the query to run every time a connection is opened. This must + /// be infallible (EG only use pragma statements) and not cause writes. + /// to the db or it will panic. + pub fn with_connection_initialize_query(mut self, initialize_query: &'static str) -> Self { + self.connection.connection_initialize_query = Some(initialize_query); + self } - /// Sets the query to run every time a connection is opened. This must - /// be infallible (EG only use pragma statements) - pub fn with_initialize_query(mut self, initialize_query: &'static str) -> Self { - self.initialize_query = Some(initialize_query); + /// Queues an initialization query for the database file. This must be infallible + /// but may cause changes to the database file such as with `PRAGMA journal_mode` + pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { + self.db_initialize_query = Some(initialize_query); self } + pub async fn build(self) -> ThreadSafeConnection { + let db_initialize_query = self.db_initialize_query; + + self.connection + .write(move |connection| { + if let Some(db_initialize_query) = db_initialize_query { + connection.exec(db_initialize_query).expect(&format!( + "Db initialize query failed to execute: {}", + db_initialize_query + ))() + .unwrap(); + } + + let mut failure_result = None; + for _ in 0..MIGRATION_RETRIES { + failure_result = Some(M::migrate(connection)); + if failure_result.as_ref().unwrap().is_ok() { + break; + } + } + + failure_result.unwrap().expect("Migration failed"); + }) + .await; + + self.connection + } +} + +impl ThreadSafeConnection { + pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { + ThreadSafeConnectionBuilder:: { + db_initialize_query: None, + connection: Self { + uri: Arc::from(uri), + persistent, + connection_initialize_query: None, + connections: Default::default(), + _migrator: PhantomData, + }, + } + } + /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. - /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { - // This unwrap is secured by a panic in the constructor. Be careful if you remove it! Connection::open_file(self.uri.as_ref()) } - /// Opens a shared memory connection using the file path as the identifier. This unwraps - /// as we expect it always to succeed + /// Opens a shared memory connection using the file path as the identifier. This is internal + /// and only called from the deref function. fn open_shared_memory(&self) -> Connection { Connection::open_memory(Some(self.uri.as_ref())) } - // Open a new connection for the given domain, leaving this - // connection intact. - pub fn for_domain(&self) -> ThreadSafeConnection { - ThreadSafeConnection { - uri: self.uri.clone(), - persistent: self.persistent, - initialize_query: self.initialize_query, - connections: Default::default(), - _migrator: PhantomData, - } - } - - pub fn write( - &self, - callback: impl 'static + Send + FnOnce(&Connection) -> T, - ) -> impl Future { + fn queue_write_task(&self, callback: QueuedWrite) { // Startup write thread for this database if one hasn't already // been started and insert a channel to queue work for it if !QUEUES.read().contains_key(&self.uri) { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - let mut queues = QUEUES.write(); - queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + if !queues.contains_key(&self.uri) { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + } } // Grab the queue for this database let queues = QUEUES.read(); let write_channel = queues.get(&self.uri).unwrap(); + write_channel + .send(callback) + .expect("Could not send write action to backgorund thread"); + } + + pub fn write( + &self, + callback: impl 'static + Send + FnOnce(&Connection) -> T, + ) -> impl Future { // Create a one shot channel for the result of the queued write // so we can await on the result - let (sender, reciever) = futures::channel::oneshot::channel(); - write_channel - .send(Box::new(move |connection| { - sender.send(callback(connection)).ok(); - })) - .expect("Could not send write action to background thread"); + let (sender, reciever) = oneshot::channel(); + self.queue_write_task(Box::new(move |connection| { + sender.send(callback(connection)).ok(); + })); - reciever.map(|response| response.expect("Background thread unexpectedly closed")) + reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } pub(crate) fn create_connection(&self) -> Connection { @@ -118,10 +164,11 @@ impl ThreadSafeConnection { self.open_shared_memory() }; - // Enable writes for the migrations and initialization queries - connection.write = true; + // Disallow writes on the connection. The only writes allowed for thread safe connections + // are from the background thread that can serialize them. + connection.write = false; - if let Some(initialize_query) = self.initialize_query { + if let Some(initialize_query) = self.connection_initialize_query { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query @@ -129,20 +176,34 @@ impl ThreadSafeConnection { .unwrap() } - M::migrate(&connection).expect("Migrations failed"); - - // Disable db writes for normal thread local connection - connection.write = false; connection } } +impl ThreadSafeConnection<()> { + /// Special constructor for ThreadSafeConnection which disallows db initialization and migrations. + /// This allows construction to be infallible and not write to the db. + pub fn new( + uri: &str, + persistent: bool, + connection_initialize_query: Option<&'static str>, + ) -> Self { + Self { + uri: Arc::from(uri), + persistent, + connection_initialize_query, + connections: Default::default(), + _migrator: PhantomData, + } + } +} + impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), persistent: self.persistent, - initialize_query: self.initialize_query.clone(), + connection_initialize_query: self.connection_initialize_query.clone(), connections: self.connections.clone(), _migrator: PhantomData, } @@ -163,11 +224,11 @@ impl Deref for ThreadSafeConnection { #[cfg(test)] mod test { - use std::{fs, ops::Deref, thread}; + use indoc::indoc; + use lazy_static::__Deref; + use std::thread; - use crate::domain::Domain; - - use super::ThreadSafeConnection; + use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; #[test] fn many_initialize_and_migrate_queries_at_once() { @@ -185,27 +246,22 @@ mod test { for _ in 0..100 { handles.push(thread::spawn(|| { - let _ = ThreadSafeConnection::::new("annoying-test.db", false) - .with_initialize_query( - " - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA busy_timeout=1; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - ", - ) - .deref(); + let builder = + ThreadSafeConnection::::builder("annoying-test.db", false) + .with_db_initialization_query("PRAGMA journal_mode=WAL") + .with_connection_initialize_query(indoc! {" + PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}); + let _ = smol::block_on(builder.build()).deref(); })); } for handle in handles { let _ = handle.join(); } - - // fs::remove_file("annoying-test.db").unwrap(); - // fs::remove_file("annoying-test.db-shm").unwrap(); - // fs::remove_file("annoying-test.db-wal").unwrap(); } #[test] @@ -241,8 +297,10 @@ mod test { } } - let _ = ThreadSafeConnection::::new("wild_zed_lost_failure", false) - .with_initialize_query("PRAGMA FOREIGN_KEYS=true") - .deref(); + let builder = + ThreadSafeConnection::::builder("wild_zed_lost_failure", false) + .with_connection_initialize_query("PRAGMA FOREIGN_KEYS=true"); + + smol::block_on(builder.build()); } } diff --git a/crates/sqlez/src/util.rs b/crates/sqlez/src/util.rs index b5366cffc4..ce0353b15e 100644 --- a/crates/sqlez/src/util.rs +++ b/crates/sqlez/src/util.rs @@ -4,6 +4,10 @@ use std::sync::mpsc::Sender; use parking_lot::Mutex; use thread_local::ThreadLocal; +/// Unbounded standard library sender which is stored per thread to get around +/// the lack of sync on the standard library version while still being unbounded +/// Note: this locks on the cloneable sender, but its done once per thread, so it +/// shouldn't result in too much contention pub struct UnboundedSyncSender { clonable_sender: Mutex>, local_senders: ThreadLocal>, diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index 25249b89b6..532503a3e6 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -3,7 +3,7 @@ use sqlez::thread_safe_connection::ThreadSafeConnection; use syn::Error; lazy_static::lazy_static! { - static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false); + static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None); } #[proc_macro] diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 0d35c19d5d..c8b31cd254 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -395,7 +395,7 @@ mod tests { async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_next_id_stability")); + let db = WorkspaceDb(open_memory_db("test_next_id_stability").await); db.write(|conn| { conn.migrate( @@ -442,7 +442,7 @@ mod tests { async fn test_workspace_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_workspace_id_stability")); + let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await); db.write(|conn| { conn.migrate( @@ -523,7 +523,7 @@ mod tests { async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -597,7 +597,7 @@ mod tests { async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + let db = WorkspaceDb(open_memory_db("test_basic_functionality").await); let workspace_1 = SerializedWorkspace { id: 1, @@ -689,7 +689,7 @@ mod tests { async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + let db = WorkspaceDb(open_memory_db("basic_dock_pane").await); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -714,7 +714,7 @@ mod tests { async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split")); + let db = WorkspaceDb(open_memory_db("simple_split").await); // ----------------- // | 1,2 | 5,6 | @@ -766,7 +766,7 @@ mod tests { async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_cleanup_panes")); + let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 9755c2c6ca..584f6392d1 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -162,11 +162,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewFile, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - let task = open_new(&app_state, cx); - cx.spawn(|_| async { - task.await; - }) - .detach(); + open_new(&app_state, cx).detach(); } } }); @@ -174,11 +170,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewWindow, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - let task = open_new(&app_state, cx); - cx.spawn(|_| async { - task.await; - }) - .detach(); + open_new(&app_state, cx).detach(); } } }); @@ -2641,13 +2633,16 @@ pub fn open_paths( }) } -fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { +pub fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { + eprintln!("Open new task spawned"); let (workspace, opened_paths) = task.await; + eprintln!("workspace and path items created"); workspace.update(&mut cx, |_, cx| { if opened_paths.is_empty() { + eprintln!("new file redispatched"); cx.dispatch_action(NewFile); } }) diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 3693a5e580..0a25cfb66f 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -626,7 +626,7 @@ mod tests { use theme::ThemeRegistry; use workspace::{ item::{Item, ItemHandle}, - open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, + open_new, open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, }; #[gpui::test] @@ -762,8 +762,7 @@ mod tests { #[gpui::test] async fn test_new_empty_workspace(cx: &mut TestAppContext) { let app_state = init(cx); - cx.dispatch_global_action(workspace::NewFile); - cx.foreground().run_until_parked(); + cx.update(|cx| open_new(&app_state, cx)).await; let window_id = *cx.window_ids().first().unwrap(); let workspace = cx.root_view::(window_id).unwrap(); From 1b225fa37c72a754b9bc9eaeb84ea5b07862b67c Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 12:34:42 -0800 Subject: [PATCH 181/240] fix test failures --- crates/db/src/db.rs | 9 +++ crates/sqlez/src/thread_safe_connection.rs | 89 +++++++++++++--------- 2 files changed, 64 insertions(+), 34 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 701aa57656..1ac1d1604b 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,6 +4,7 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; +use parking_lot::Mutex; pub use smol; pub use sqlez; pub use sqlez_macros; @@ -59,6 +60,14 @@ pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection< ThreadSafeConnection::::builder(db_name, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + // Serialize queued writes via a mutex and run them synchronously + .with_write_queue_constructor(Box::new(|connection| { + let connection = Mutex::new(connection); + Box::new(move |queued_write| { + let connection = connection.lock(); + queued_write(&connection) + }) + })) .build() .await } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 880a58d194..b17c87d63f 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -13,12 +13,14 @@ use crate::{ const MIGRATION_RETRIES: usize = 10; type QueuedWrite = Box; +type WriteQueueConstructor = + Box Box>; lazy_static! { /// List of queues of tasks by database uri. This lets us serialize writes to the database /// and have a single worker thread per db file. This means many thread safe connections /// (possibly with different migrations) could all be communicating with the same background /// thread. - static ref QUEUES: RwLock, UnboundedSyncSender>> = + static ref QUEUES: RwLock, Box>> = Default::default(); } @@ -38,6 +40,7 @@ unsafe impl Sync for ThreadSafeConnection {} pub struct ThreadSafeConnectionBuilder { db_initialize_query: Option<&'static str>, + write_queue_constructor: Option, connection: ThreadSafeConnection, } @@ -50,6 +53,18 @@ impl ThreadSafeConnectionBuilder { self } + /// Specifies how the thread safe connection should serialize writes. If provided + /// the connection will call the write_queue_constructor for each database file in + /// this process. The constructor is responsible for setting up a background thread or + /// async task which handles queued writes with the provided connection. + pub fn with_write_queue_constructor( + mut self, + write_queue_constructor: WriteQueueConstructor, + ) -> Self { + self.write_queue_constructor = Some(write_queue_constructor); + self + } + /// Queues an initialization query for the database file. This must be infallible /// but may cause changes to the database file such as with `PRAGMA journal_mode` pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { @@ -58,6 +73,38 @@ impl ThreadSafeConnectionBuilder { } pub async fn build(self) -> ThreadSafeConnection { + if !QUEUES.read().contains_key(&self.connection.uri) { + let mut queues = QUEUES.write(); + if !queues.contains_key(&self.connection.uri) { + let mut write_connection = self.connection.create_connection(); + // Enable writes for this connection + write_connection.write = true; + if let Some(mut write_queue_constructor) = self.write_queue_constructor { + let write_channel = write_queue_constructor(write_connection); + queues.insert(self.connection.uri.clone(), write_channel); + } else { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let sender = UnboundedSyncSender::new(sender); + queues.insert( + self.connection.uri.clone(), + Box::new(move |queued_write| { + sender + .send(queued_write) + .expect("Could not send write action to backgorund thread"); + }), + ); + } + } + } + let db_initialize_query = self.db_initialize_query; self.connection @@ -90,6 +137,7 @@ impl ThreadSafeConnection { pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { ThreadSafeConnectionBuilder:: { db_initialize_query: None, + write_queue_constructor: None, connection: Self { uri: Arc::from(uri), persistent, @@ -112,48 +160,21 @@ impl ThreadSafeConnection { Connection::open_memory(Some(self.uri.as_ref())) } - fn queue_write_task(&self, callback: QueuedWrite) { - // Startup write thread for this database if one hasn't already - // been started and insert a channel to queue work for it - if !QUEUES.read().contains_key(&self.uri) { - let mut queues = QUEUES.write(); - if !queues.contains_key(&self.uri) { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); - } - } - - // Grab the queue for this database - let queues = QUEUES.read(); - let write_channel = queues.get(&self.uri).unwrap(); - - write_channel - .send(callback) - .expect("Could not send write action to backgorund thread"); - } - pub fn write( &self, callback: impl 'static + Send + FnOnce(&Connection) -> T, ) -> impl Future { + let queues = QUEUES.read(); + let write_channel = queues + .get(&self.uri) + .expect("Queues are inserted when build is called. This should always succeed"); + // Create a one shot channel for the result of the queued write // so we can await on the result let (sender, reciever) = oneshot::channel(); - self.queue_write_task(Box::new(move |connection| { + write_channel(Box::new(move |connection| { sender.send(callback(connection)).ok(); })); - reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } From f68e8d4664e4322eb88add438d4ca015c0daaffc Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 16:19:46 -0800 Subject: [PATCH 182/240] Address some issues with the sqlez_macros --- Cargo.lock | 1 + crates/db/src/db.rs | 34 +++--- crates/db/src/kvp.rs | 28 ++--- crates/editor/src/items.rs | 38 +++--- crates/editor/src/persistence.rs | 3 +- crates/sqlez/src/thread_safe_connection.rs | 87 ++++++++------ crates/sqlez_macros/Cargo.toml | 3 +- crates/sqlez_macros/src/sqlez_macros.rs | 23 ++-- crates/workspace/src/persistence.rs | 132 ++++++++++----------- crates/workspace/src/workspace.rs | 8 +- 10 files changed, 183 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd1bb4ea0a..4312b7e830 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5609,6 +5609,7 @@ dependencies = [ "proc-macro2", "quote", "sqlez", + "sqlformat", "syn", ] diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 1ac1d1604b..3fc069405d 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,7 +4,6 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; -use parking_lot::Mutex; pub use smol; pub use sqlez; pub use sqlez_macros; @@ -34,7 +33,7 @@ lazy_static::lazy_static! { } /// Open or create a database at the given directory path. -pub async fn open_file_db() -> ThreadSafeConnection { +pub async fn open_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); @@ -56,18 +55,15 @@ pub async fn open_file_db() -> ThreadSafeConnection { .await } -pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection { +#[cfg(any(test, feature = "test-support"))] +pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection { + use sqlez::thread_safe_connection::locking_queue; + ThreadSafeConnection::::builder(db_name, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) // Serialize queued writes via a mutex and run them synchronously - .with_write_queue_constructor(Box::new(|connection| { - let connection = Mutex::new(connection); - Box::new(move |queued_write| { - let connection = connection.lock(); - queued_write(&connection) - }) - })) + .with_write_queue_constructor(locking_queue()) .build() .await } @@ -76,22 +72,24 @@ pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection< #[macro_export] macro_rules! connection { ($id:ident: $t:ident<$d:ty>) => { - pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); impl ::std::ops::Deref for $t { - type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; fn deref(&self) -> &Self::Target { &self.0 } } - ::db::lazy_static::lazy_static! { - pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - $crate::smol::block_on(::db::open_memory_db(stringify!($id))) - } else { - $crate::smol::block_on(::db::open_file_db()) - }); + #[cfg(any(test, feature = "test-support"))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id)))); + } + + #[cfg(not(any(test, feature = "test-support")))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db())); } }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index da796fa469..70ee9f64da 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,25 +1,9 @@ -use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; +use sqlez::domain::Domain; use sqlez_macros::sql; -use crate::{open_file_db, open_memory_db, query}; +use crate::{connection, query}; -pub struct KeyValueStore(ThreadSafeConnection); - -impl std::ops::Deref for KeyValueStore { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -lazy_static::lazy_static! { - pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { - smol::block_on(open_memory_db("KEY_VALUE_STORE")) - } else { - smol::block_on(open_file_db()) - }); -} +connection!(KEY_VALUE_STORE: KeyValueStore); impl Domain for KeyValueStore { fn name() -> &'static str { @@ -27,8 +11,10 @@ impl Domain for KeyValueStore { } fn migrations() -> &'static [&'static str] { + // Legacy migrations using rusqlite may have already created kv_store during alpha, + // migrations must be infallible so this must have 'IF NOT EXISTS' &[sql!( - CREATE TABLE kv_store( + CREATE TABLE IF NOT EXISTS kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; @@ -62,7 +48,7 @@ mod tests { #[gpui::test] async fn test_kvp() { - let db = KeyValueStore(crate::open_memory_db("test_kvp").await); + let db = KeyValueStore(crate::open_test_db("test_kvp").await); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index e724156fae..afe659af61 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -602,31 +602,37 @@ impl Item for Editor { item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { - if let Some(project_item) = project.update(cx, |project, cx| { + let project_item: Result<_> = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path - let path = DB.get_path(item_id, workspace_id).ok()?; + let path = DB + .get_path(item_id, workspace_id)? + .context("No path stored for this editor")?; - let (worktree, path) = project.find_local_worktree(&path, cx)?; + let (worktree, path) = project + .find_local_worktree(&path, cx) + .with_context(|| format!("No worktree for path: {path:?}"))?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), path: path.into(), }; - Some(project.open_path(project_path, cx)) - }) { - cx.spawn(|pane, mut cx| async move { - let (_, project_item) = project_item.await?; - let buffer = project_item - .downcast::() - .context("Project item at stored path was not a buffer")?; + Ok(project.open_path(project_path, cx)) + }); - Ok(cx.update(|cx| { - cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) - })) + project_item + .map(|project_item| { + cx.spawn(|pane, mut cx| async move { + let (_, project_item) = project_item.await?; + let buffer = project_item + .downcast::() + .context("Project item at stored path was not a buffer")?; + + Ok(cx.update(|cx| { + cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) + })) + }) }) - } else { - Task::ready(Err(anyhow!("Could not load file from stored path"))) - } + .unwrap_or_else(|error| Task::ready(Err(error))) } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 22b0f158c1..3416f479e7 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -23,7 +23,6 @@ impl Domain for Editor { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE - ) STRICT; )] } @@ -31,7 +30,7 @@ impl Domain for Editor { impl EditorDb { query! { - pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { + pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { SELECT path FROM editors WHERE item_id = ? AND workspace_id = ? } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index b17c87d63f..82697d1f90 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,6 +1,6 @@ use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; @@ -73,37 +73,8 @@ impl ThreadSafeConnectionBuilder { } pub async fn build(self) -> ThreadSafeConnection { - if !QUEUES.read().contains_key(&self.connection.uri) { - let mut queues = QUEUES.write(); - if !queues.contains_key(&self.connection.uri) { - let mut write_connection = self.connection.create_connection(); - // Enable writes for this connection - write_connection.write = true; - if let Some(mut write_queue_constructor) = self.write_queue_constructor { - let write_channel = write_queue_constructor(write_connection); - queues.insert(self.connection.uri.clone(), write_channel); - } else { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - let sender = UnboundedSyncSender::new(sender); - queues.insert( - self.connection.uri.clone(), - Box::new(move |queued_write| { - sender - .send(queued_write) - .expect("Could not send write action to backgorund thread"); - }), - ); - } - } - } + self.connection + .initialize_queues(self.write_queue_constructor); let db_initialize_query = self.db_initialize_query; @@ -134,6 +105,40 @@ impl ThreadSafeConnectionBuilder { } impl ThreadSafeConnection { + fn initialize_queues(&self, write_queue_constructor: Option) { + if !QUEUES.read().contains_key(&self.uri) { + let mut queues = QUEUES.write(); + if !queues.contains_key(&self.uri) { + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + if let Some(mut write_queue_constructor) = write_queue_constructor { + let write_channel = write_queue_constructor(write_connection); + queues.insert(self.uri.clone(), write_channel); + } else { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let sender = UnboundedSyncSender::new(sender); + queues.insert( + self.uri.clone(), + Box::new(move |queued_write| { + sender + .send(queued_write) + .expect("Could not send write action to backgorund thread"); + }), + ); + } + } + } + } + pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { ThreadSafeConnectionBuilder:: { db_initialize_query: None, @@ -208,14 +213,18 @@ impl ThreadSafeConnection<()> { uri: &str, persistent: bool, connection_initialize_query: Option<&'static str>, + write_queue_constructor: Option, ) -> Self { - Self { + let connection = Self { uri: Arc::from(uri), persistent, connection_initialize_query, connections: Default::default(), _migrator: PhantomData, - } + }; + + connection.initialize_queues(write_queue_constructor); + connection } } @@ -243,6 +252,16 @@ impl Deref for ThreadSafeConnection { } } +pub fn locking_queue() -> WriteQueueConstructor { + Box::new(|connection| { + let connection = Mutex::new(connection); + Box::new(move |queued_write| { + let connection = connection.lock(); + queued_write(&connection) + }) + }) +} + #[cfg(test)] mod test { use indoc::indoc; diff --git a/crates/sqlez_macros/Cargo.toml b/crates/sqlez_macros/Cargo.toml index 413a3d30f5..423b494500 100644 --- a/crates/sqlez_macros/Cargo.toml +++ b/crates/sqlez_macros/Cargo.toml @@ -13,4 +13,5 @@ syn = "1.0" quote = "1.0" proc-macro2 = "1.0" lazy_static = "1.4" -sqlez = { path = "../sqlez" } \ No newline at end of file +sqlez = { path = "../sqlez" } +sqlformat = "0.2" \ No newline at end of file diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index 532503a3e6..c937e704ae 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -1,9 +1,11 @@ use proc_macro::{Delimiter, Span, TokenStream, TokenTree}; -use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez::thread_safe_connection::{locking_queue, ThreadSafeConnection}; use syn::Error; lazy_static::lazy_static! { - static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None); + static ref SQLITE: ThreadSafeConnection = { + ThreadSafeConnection::new(":memory:", false, None, Some(locking_queue())) + }; } #[proc_macro] @@ -20,6 +22,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream { } let error = SQLITE.sql_has_syntax_error(sql.trim()); + let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); if let Some((error, error_offset)) = error { let error_span = spans @@ -29,10 +32,10 @@ pub fn sql(tokens: TokenStream) -> TokenStream { .next() .unwrap_or(Span::call_site()); - let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql); + let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) } else { - format!("r#\"{}\"#", &sql).parse().unwrap() + format!("r#\"{}\"#", &formatted_sql).parse().unwrap() } } @@ -61,18 +64,18 @@ fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) { fn open_delimiter(delimiter: Delimiter) -> String { match delimiter { - Delimiter::Parenthesis => "(".to_string(), - Delimiter::Brace => "[".to_string(), - Delimiter::Bracket => "{".to_string(), + Delimiter::Parenthesis => "( ".to_string(), + Delimiter::Brace => "[ ".to_string(), + Delimiter::Bracket => "{ ".to_string(), Delimiter::None => "".to_string(), } } fn close_delimiter(delimiter: Delimiter) -> String { match delimiter { - Delimiter::Parenthesis => ")".to_string(), - Delimiter::Brace => "]".to_string(), - Delimiter::Bracket => "}".to_string(), + Delimiter::Parenthesis => " ) ".to_string(), + Delimiter::Brace => " ] ".to_string(), + Delimiter::Bracket => " } ".to_string(), Delimiter::None => "".to_string(), } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index c8b31cd254..d08c9de9a0 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -7,7 +7,6 @@ use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use indoc::indoc; use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; @@ -106,15 +105,15 @@ impl WorkspaceDb { DockPosition, ) = iife!({ if worktree_roots.len() == 0 { - self.select_row(indoc! {" + self.select_row(sql!( SELECT workspace_id, workspace_location, dock_visible, dock_anchor - FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})?()? + FROM workspaces + ORDER BY timestamp DESC LIMIT 1))?()? } else { - self.select_row_bound(indoc! {" + self.select_row_bound(sql!( SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces - WHERE workspace_location = ?"})?(&workspace_location)? + WHERE workspace_location = ?))?(&workspace_location)? } .context("No workspaces found") }) @@ -142,19 +141,15 @@ impl WorkspaceDb { self.write(move |conn| { conn.with_savepoint("update_worktrees", || { // Clear out panes and pane_groups - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id) .context("Clearing old panes")?; - conn.exec_bound(indoc! {" - DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?( - ( - &workspace.location, - workspace.id.clone(), - ) - ) + conn.exec_bound(sql!( + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? + ))?((&workspace.location, workspace.id.clone())) .context("clearing out old locations")?; // Upsert @@ -184,10 +179,11 @@ impl WorkspaceDb { .context("save pane in save workspace")?; // Complete workspace initialization - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( UPDATE workspaces SET dock_pane = ? - WHERE workspace_id = ?"})?((dock_id, workspace.id)) + WHERE workspace_id = ? + ))?((dock_id, workspace.id)) .context("Finishing initialization with dock pane")?; Ok(()) @@ -203,20 +199,13 @@ impl WorkspaceDb { } } - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { - iife!({ - // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html - Ok::<_, anyhow::Error>( - self.select_bound::( - "SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?(limit)? - .into_iter() - .collect::>(), - ) - }) - .log_err() - .unwrap_or_default() + query! { + pub fn recent_workspaces(limit: usize) -> Result> { + SELECT workspace_id, workspace_location + FROM workspaces + ORDER BY timestamp DESC + LIMIT ? + } } fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { @@ -233,7 +222,7 @@ impl WorkspaceDb { ) -> Result> { type GroupKey = (Option, WorkspaceId); type GroupOrPane = (Option, Option, Option, Option); - self.select_bound::(indoc! {" + self.select_bound::(sql!( SELECT group_id, axis, pane_id, active FROM (SELECT group_id, @@ -243,7 +232,7 @@ impl WorkspaceDb { position, parent_group_id, workspace_id - FROM pane_groups + FROM pane_groups UNION SELECT NULL, @@ -257,7 +246,7 @@ impl WorkspaceDb { JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position - "})?((group_id, workspace_id))? + ))?((group_id, workspace_id))? .into_iter() .map(|(group_id, axis, pane_id, active)| { if let Some((group_id, axis)) = group_id.zip(axis) { @@ -293,10 +282,11 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = conn.select_row_bound::<_, i64>(indoc! {" + let group_id = conn.select_row_bound::<_, i64>(sql!( INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) - RETURNING group_id"})?(( + RETURNING group_id + ))?(( workspace_id, parent_id, position, @@ -318,10 +308,11 @@ impl WorkspaceDb { } fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { - let (pane_id, active) = self.select_row_bound(indoc! {" + let (pane_id, active) = self.select_row_bound(sql!( SELECT pane_id, active FROM panes - WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?( + WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?) + ))?( workspace_id, )? .context("No dock pane for workspace")?; @@ -339,17 +330,19 @@ impl WorkspaceDb { parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = conn.select_row_bound::<_, i64>(indoc! {" + let pane_id = conn.select_row_bound::<_, i64>(sql!( INSERT INTO panes(workspace_id, active) VALUES (?, ?) - RETURNING pane_id"})?((workspace_id, pane.active))? + RETURNING pane_id + ))?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; if !dock { let (parent_id, order) = unzip_option(parent); - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( INSERT INTO center_panes(pane_id, parent_group_id, position) - VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; + VALUES (?, ?, ?) + ))?((pane_id, parent_id, order))?; } Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?; @@ -358,10 +351,11 @@ impl WorkspaceDb { } fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" + Ok(self.select_bound(sql!( SELECT kind, item_id FROM items WHERE pane_id = ? - ORDER BY position"})?(pane_id)?) + ORDER BY position + ))?(pane_id)?) } fn save_items( @@ -370,10 +364,11 @@ impl WorkspaceDb { pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut insert = conn.exec_bound( - "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; + let mut insert = conn.exec_bound(sql!( + INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?) + )).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { + dbg!(item); insert((workspace_id, pane_id, position, item))?; } @@ -386,7 +381,7 @@ mod tests { use std::sync::Arc; - use db::open_memory_db; + use db::open_test_db; use settings::DockAnchor; use super::*; @@ -395,18 +390,19 @@ mod tests { async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_next_id_stability").await); + let db = WorkspaceDb(open_test_db("test_next_id_stability").await); db.write(|conn| { conn.migrate( "test_table", - &[indoc! {" + &[sql!( CREATE TABLE test_table( text TEXT, workspace_id INTEGER, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ) STRICT;"}], + ) STRICT; + )], ) .unwrap(); }) @@ -416,22 +412,22 @@ mod tests { // Assert the empty row got inserted assert_eq!( Some(id), - db.select_row_bound::( - "SELECT workspace_id FROM workspaces WHERE workspace_id = ?" - ) + db.select_row_bound::(sql!( + SELECT workspace_id FROM workspaces WHERE workspace_id = ? + )) .unwrap()(id) .unwrap() ); db.write(move |conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-1", id)) .unwrap() }) .await; let test_text_1 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(1) .unwrap() .unwrap(); @@ -442,19 +438,19 @@ mod tests { async fn test_workspace_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await); + let db = WorkspaceDb(open_test_db("test_workspace_id_stability").await); db.write(|conn| { conn.migrate( "test_table", - &[indoc! {" + &[sql!( CREATE TABLE test_table( text TEXT, workspace_id INTEGER, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ) STRICT;"}], + ) STRICT;)], ) }) .await @@ -479,7 +475,7 @@ mod tests { db.save_workspace(workspace_1.clone()).await; db.write(|conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-1", 1)) .unwrap(); }) @@ -488,7 +484,7 @@ mod tests { db.save_workspace(workspace_2.clone()).await; db.write(|conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-2", 2)) .unwrap(); }) @@ -505,14 +501,14 @@ mod tests { db.save_workspace(workspace_2).await; let test_text_2 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(2) .unwrap() .unwrap(); assert_eq!(test_text_2, "test-text-2"); let test_text_1 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(1) .unwrap() .unwrap(); @@ -523,7 +519,7 @@ mod tests { async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await); + let db = WorkspaceDb(open_test_db("test_full_workspace_serialization").await); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -597,7 +593,7 @@ mod tests { async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality").await); + let db = WorkspaceDb(open_test_db("test_basic_functionality").await); let workspace_1 = SerializedWorkspace { id: 1, @@ -689,7 +685,7 @@ mod tests { async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane").await); + let db = WorkspaceDb(open_test_db("basic_dock_pane").await); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -714,7 +710,7 @@ mod tests { async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split").await); + let db = WorkspaceDb(open_test_db("simple_split").await); // ----------------- // | 1,2 | 5,6 | @@ -766,7 +762,7 @@ mod tests { async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await); + let db = WorkspaceDb(open_test_db("test_cleanup_panes").await); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 584f6392d1..da796b5b44 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2365,7 +2365,6 @@ impl Workspace { .await; // Traverse the splits tree and add to things - let (root, active_pane) = serialized_workspace .center_group .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) @@ -2384,6 +2383,10 @@ impl Workspace { cx.focus(active_pane); } + if workspace.items(cx).next().is_none() { + cx.dispatch_action(NewFile); + } + cx.notify(); }); } @@ -2636,13 +2639,10 @@ pub fn open_paths( pub fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { - eprintln!("Open new task spawned"); let (workspace, opened_paths) = task.await; - eprintln!("workspace and path items created"); workspace.update(&mut cx, |_, cx| { if opened_paths.is_empty() { - eprintln!("new file redispatched"); cx.dispatch_action(NewFile); } }) From 8a48567857cfd5fd77d9350ec53809ac68364076 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 17:28:49 -0800 Subject: [PATCH 183/240] Reactivate the correct item in each pane when deserializing --- crates/db/src/db.rs | 4 +- .../terminal/src/terminal_container_view.rs | 1 - crates/workspace/src/persistence.rs | 67 ++++++++++--------- crates/workspace/src/persistence/model.rs | 31 +++++++-- crates/workspace/src/workspace.rs | 38 ++++++----- 5 files changed, 83 insertions(+), 58 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 3fc069405d..ea355a91a6 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -113,7 +113,6 @@ macro_rules! query { $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.write(|connection| { let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); @@ -143,7 +142,6 @@ macro_rules! query { $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.write(move |connection| { let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); @@ -186,7 +184,7 @@ macro_rules! query { )) } }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index a6c28d4baf..8f4bfeeb53 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -137,7 +137,6 @@ impl TerminalContainer { TerminalContainerContent::Error(view) } }; - // cx.focus(content.handle()); TerminalContainer { content, diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index d08c9de9a0..213033a90f 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -76,6 +76,7 @@ impl Domain for Workspace { pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, + active INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE, @@ -352,7 +353,7 @@ impl WorkspaceDb { fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(sql!( - SELECT kind, item_id FROM items + SELECT kind, item_id, active FROM items WHERE pane_id = ? ORDER BY position ))?(pane_id)?) @@ -365,10 +366,9 @@ impl WorkspaceDb { items: &[SerializedItem], ) -> Result<()> { let mut insert = conn.exec_bound(sql!( - INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?) + INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active) VALUES (?, ?, ?, ?, ?, ?) )).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - dbg!(item); insert((workspace_id, pane_id, position, item))?; } @@ -497,6 +497,7 @@ mod tests { workspace_2.dock_pane.children.push(SerializedItem { kind: Arc::from("Test"), item_id: 10, + active: true, }); db.save_workspace(workspace_2).await; @@ -523,10 +524,10 @@ mod tests { let dock_pane = crate::persistence::model::SerializedPane { children: vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - SerializedItem::new("Terminal", 3), - SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, false), + SerializedItem::new("Terminal", 3, true), + SerializedItem::new("Terminal", 4, false), ], active: false, }; @@ -544,15 +545,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, false), + SerializedItem::new("Terminal", 6, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 7), - SerializedItem::new("Terminal", 8), + SerializedItem::new("Terminal", 7, true), + SerializedItem::new("Terminal", 8, false), ], false, )), @@ -560,8 +561,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 9), - SerializedItem::new("Terminal", 10), + SerializedItem::new("Terminal", 9, false), + SerializedItem::new("Terminal", 10, true), ], false, )), @@ -689,10 +690,10 @@ mod tests { let dock_pane = crate::persistence::model::SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 2), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 2, false), + SerializedItem::new("Terminal", 3, true), ], false, ); @@ -725,15 +726,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 3, true), ], true, )), @@ -741,8 +742,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, true), + SerializedItem::new("Terminal", 6, false), ], false, )), @@ -772,15 +773,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 3, true), ], true, )), @@ -788,8 +789,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, false), + SerializedItem::new("Terminal", 6, true), ], false, )), @@ -807,15 +808,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, true), + SerializedItem::new("Terminal", 3, false), ], true, )), diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index dc6d8ba8ee..c6943ab622 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -147,7 +147,8 @@ impl SerializedPane { workspace: &ViewHandle, cx: &mut AsyncAppContext, ) { - for item in self.children.iter() { + let mut active_item_index = None; + for (index, item) in self.children.iter().enumerate() { let project = project.clone(); let item_handle = pane_handle .update(cx, |_, cx| { @@ -174,6 +175,16 @@ impl SerializedPane { Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); }) } + + if item.active { + active_item_index = Some(index); + } + } + + if let Some(active_item_index) = active_item_index { + pane_handle.update(cx, |pane, cx| { + pane.activate_item(active_item_index, false, false, cx); + }) } } } @@ -186,13 +197,15 @@ pub type ItemId = usize; pub struct SerializedItem { pub kind: Arc, pub item_id: ItemId, + pub active: bool, } impl SerializedItem { - pub fn new(kind: impl AsRef, item_id: ItemId) -> Self { + pub fn new(kind: impl AsRef, item_id: ItemId, active: bool) -> Self { Self { kind: Arc::from(kind.as_ref()), item_id, + active, } } } @@ -203,6 +216,7 @@ impl Default for SerializedItem { SerializedItem { kind: Arc::from("Terminal"), item_id: 100000, + active: false, } } } @@ -210,7 +224,8 @@ impl Default for SerializedItem { impl Bind for &SerializedItem { fn bind(&self, statement: &Statement, start_index: i32) -> Result { let next_index = statement.bind(self.kind.clone(), start_index)?; - statement.bind(self.item_id, next_index) + let next_index = statement.bind(self.item_id, next_index)?; + statement.bind(self.active, next_index) } } @@ -218,7 +233,15 @@ impl Column for SerializedItem { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let (kind, next_index) = Arc::::column(statement, start_index)?; let (item_id, next_index) = ItemId::column(statement, next_index)?; - Ok((SerializedItem { kind, item_id }, next_index)) + let (active, next_index) = bool::column(statement, next_index)?; + Ok(( + SerializedItem { + kind, + item_id, + active, + }, + next_index, + )) } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index da796b5b44..82d95389d8 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2292,12 +2292,14 @@ impl Workspace { ) -> SerializedPane { let (items, active) = { let pane = pane_handle.read(cx); + let active_item_id = pane.active_item().map(|item| item.id()); ( pane.items() .filter_map(|item_handle| { Some(SerializedItem { kind: Arc::from(item_handle.serialized_item_kind()?), item_id: item_handle.id(), + active: Some(item_handle.id()) == active_item_id, }) }) .collect::>(), @@ -2308,8 +2310,6 @@ impl Workspace { SerializedPane::new(items, active) } - let dock_pane = serialize_pane_handle(self.dock.pane(), cx); - fn build_serialized_pane_group( pane_group: &Member, cx: &AppContext, @@ -2327,19 +2327,25 @@ impl Workspace { } } } - let center_group = build_serialized_pane_group(&self.center.root, cx); - let serialized_workspace = SerializedWorkspace { - id: self.database_id, - location: self.location(cx), - dock_position: self.dock.position(), - dock_pane, - center_group, - }; + let location = self.location(cx); - cx.background() - .spawn(persistence::DB.save_workspace(serialized_workspace)) - .detach(); + if !location.paths().is_empty() { + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + let center_group = build_serialized_pane_group(&self.center.root, cx); + + let serialized_workspace = SerializedWorkspace { + id: self.database_id, + location: self.location(cx), + dock_position: self.dock.position(), + dock_pane, + center_group, + }; + + cx.background() + .spawn(persistence::DB.save_workspace(serialized_workspace)) + .detach(); + } } fn load_from_serialized_workspace( @@ -2380,13 +2386,11 @@ impl Workspace { Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); if let Some(active_pane) = active_pane { + // Change the focus to the workspace first so that we retrigger focus in on the pane. + cx.focus_self(); cx.focus(active_pane); } - if workspace.items(cx).next().is_none() { - cx.dispatch_action(NewFile); - } - cx.notify(); }); } From b8d423555ba6aa1e965ef7c73a0fbe5a1a33f40b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 12:02:38 -0800 Subject: [PATCH 184/240] Added side bar restoration --- crates/workspace/src/persistence.rs | 27 +++++++++++++++++------ crates/workspace/src/persistence/model.rs | 1 + crates/workspace/src/workspace.rs | 7 ++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 213033a90f..db59141087 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -36,6 +36,7 @@ impl Domain for Workspace { dock_visible INTEGER, // Boolean dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet + project_panel_open INTEGER, //Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; @@ -100,19 +101,20 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, workspace_location, dock_position): ( + let (workspace_id, workspace_location, project_panel_open, dock_position): ( WorkspaceId, WorkspaceLocation, + bool, DockPosition, ) = iife!({ if worktree_roots.len() == 0 { self.select_row(sql!( - SELECT workspace_id, workspace_location, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1))?()? } else { self.select_row_bound(sql!( - SELECT workspace_id, workspace_location, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor FROM workspaces WHERE workspace_location = ?))?(&workspace_location)? } @@ -133,6 +135,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, + project_panel_open }) } @@ -158,18 +161,20 @@ impl WorkspaceDb { INSERT INTO workspaces( workspace_id, workspace_location, + project_panel_open, dock_visible, dock_anchor, timestamp ) - VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + VALUES (?1, ?2, ?3, ?4, ?5, CURRENT_TIMESTAMP) ON CONFLICT DO UPDATE SET workspace_location = ?2, - dock_visible = ?3, - dock_anchor = ?4, + project_panel_open = ?3, + dock_visible = ?4, + dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.dock_position)) + ))?((workspace.id, &workspace.location, workspace.project_panel_open, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -273,6 +278,7 @@ impl WorkspaceDb { .collect::>() } + fn save_pane_group( conn: &Connection, workspace_id: WorkspaceId, @@ -462,6 +468,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: true }; let mut workspace_2 = SerializedWorkspace { @@ -470,6 +477,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false }; db.save_workspace(workspace_1.clone()).await; @@ -575,6 +583,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, + project_panel_open: true }; db.save_workspace(workspace.clone()).await; @@ -602,6 +611,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -610,6 +620,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -645,6 +656,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false }; db.save_workspace(workspace_3.clone()).await; @@ -679,6 +691,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, + project_panel_open: true } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index c6943ab622..c57c992d7b 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -65,6 +65,7 @@ pub struct SerializedWorkspace { pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, + pub project_panel_open: bool, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 82d95389d8..66ef63f27f 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2340,6 +2340,7 @@ impl Workspace { dock_position: self.dock.position(), dock_pane, center_group, + project_panel_open: self.left_sidebar.read(cx).is_open(), }; cx.background() @@ -2383,6 +2384,12 @@ impl Workspace { // Swap workspace center group workspace.center = PaneGroup::with_root(root); + // Note, if this is moved after 'set_dock_position' + // it causes an infinite loop. + if serialized_workspace.project_panel_open { + workspace.toggle_sidebar_item_focus(SidebarSide::Left, 0, cx) + } + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); if let Some(active_pane) = active_pane { From 189a820113dd0409ee7736e370087d6b7792f9d0 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 14:16:38 -0800 Subject: [PATCH 185/240] First draft of graceful corruption restoration --- crates/db/src/db.rs | 79 ++++++++++++++++++++-- crates/sqlez/src/thread_safe_connection.rs | 43 ++++++------ crates/util/src/lib.rs | 7 ++ 3 files changed, 103 insertions(+), 26 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index ea355a91a6..6de51cb0e6 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -2,6 +2,7 @@ pub mod kvp; // Re-export pub use anyhow; +use anyhow::Context; pub use indoc::indoc; pub use lazy_static; pub use smol; @@ -14,9 +15,13 @@ use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::{SystemTime, UNIX_EPOCH}; +use util::{async_iife, ResultExt}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; +// TODO: Add a savepoint to the thread safe connection initialization and migrations + const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; PRAGMA busy_timeout=1; @@ -28,31 +33,90 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( PRAGMA journal_mode=WAL; ); +const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; + lazy_static::lazy_static! { static ref DB_WIPED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. pub async fn open_db() -> ThreadSafeConnection { - // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + let db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + // If WIPE_DB, delete 0-{channel} if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() && !DB_WIPED.load(Ordering::Acquire) { - remove_dir_all(¤t_db_dir).ok(); - DB_WIPED.store(true, Ordering::Relaxed); + remove_dir_all(&db_dir).ok(); + DB_WIPED.store(true, Ordering::Release); } - create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); - let db_path = current_db_dir.join(Path::new("db.sqlite")); + let connection = async_iife!({ + // If no db folder, create one at 0-{channel} + create_dir_all(&db_dir).context("Could not create db directory")?; + let db_path = db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + // Try building a connection + if let Some(connection) = ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + .log_err() { + return Ok(connection) + } + + let backup_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect( + "System clock is set before the unix timestamp, Zed does not support this region of spacetime" + ) + .as_millis(); + + // If failed, move 0-{channel} to {current unix timestamp}-{channel} + let backup_db_dir = (*DB_DIR).join(Path::new(&format!( + "{}{}", + backup_timestamp, + *RELEASE_CHANNEL_NAME + ))); + + std::fs::rename(&db_dir, backup_db_dir) + .context("Failed clean up corrupted database, panicking.")?; + + // TODO: Set a constant with the failed timestamp and error so we can notify the user + + // Create a new 0-{channel} + create_dir_all(&db_dir).context("Should be able to create the database directory")?; + let db_path = db_dir.join(Path::new("db.sqlite")); + + // Try again + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + }).await.log_err(); + + if let Some(connection) = connection { + return connection; + } + + // TODO: Set another constant so that we can escalate the notification + + // If still failed, create an in memory db with a known name + open_fallback_db().await +} + +async fn open_fallback_db() -> ThreadSafeConnection { + ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) .build() .await + .expect( + "Fallback in memory database failed. Likely initialization queries or migrations have fundamental errors", + ) } #[cfg(any(test, feature = "test-support"))] @@ -66,6 +130,7 @@ pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection .with_write_queue_constructor(locking_queue()) .build() .await + .unwrap() } /// Implements a basic DB wrapper for a given domain diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 82697d1f90..4849e785b5 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; use parking_lot::{Mutex, RwLock}; @@ -72,7 +73,7 @@ impl ThreadSafeConnectionBuilder { self } - pub async fn build(self) -> ThreadSafeConnection { + pub async fn build(self) -> anyhow::Result> { self.connection .initialize_queues(self.write_queue_constructor); @@ -81,26 +82,33 @@ impl ThreadSafeConnectionBuilder { self.connection .write(move |connection| { if let Some(db_initialize_query) = db_initialize_query { - connection.exec(db_initialize_query).expect(&format!( - "Db initialize query failed to execute: {}", - db_initialize_query - ))() - .unwrap(); + connection.exec(db_initialize_query).with_context(|| { + format!( + "Db initialize query failed to execute: {}", + db_initialize_query + ) + })?()?; } - let mut failure_result = None; + // Retry failed migrations in case they were run in parallel from different + // processes. This gives a best attempt at migrating before bailing + let mut migration_result = + anyhow::Result::<()>::Err(anyhow::anyhow!("Migration never run")); + for _ in 0..MIGRATION_RETRIES { - failure_result = Some(M::migrate(connection)); - if failure_result.as_ref().unwrap().is_ok() { + migration_result = connection + .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); + + if migration_result.is_ok() { break; } } - failure_result.unwrap().expect("Migration failed"); + migration_result }) - .await; + .await?; - self.connection + Ok(self.connection) } } @@ -240,10 +248,6 @@ impl Clone for ThreadSafeConnection { } } -// TODO: -// 1. When migration or initialization fails, move the corrupted db to a holding place and create a new one -// 2. If the new db also fails, downgrade to a shared in memory db -// 3. In either case notify the user about what went wrong impl Deref for ThreadSafeConnection { type Target = Connection; @@ -265,7 +269,7 @@ pub fn locking_queue() -> WriteQueueConstructor { #[cfg(test)] mod test { use indoc::indoc; - use lazy_static::__Deref; + use std::ops::Deref; use std::thread; use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; @@ -295,7 +299,8 @@ mod test { PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; "}); - let _ = smol::block_on(builder.build()).deref(); + + let _ = smol::block_on(builder.build()).unwrap().deref(); })); } @@ -341,6 +346,6 @@ mod test { ThreadSafeConnection::::builder("wild_zed_lost_failure", false) .with_connection_initialize_query("PRAGMA FOREIGN_KEYS=true"); - smol::block_on(builder.build()); + smol::block_on(builder.build()).unwrap(); } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 78536f01d0..0e83bb5f19 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -223,6 +223,13 @@ macro_rules! iife { }; } +#[macro_export] +macro_rules! async_iife { + ($block:block) => { + (|| async move { $block })() + }; +} + #[cfg(test)] mod tests { use super::*; From 5e240f98f0b80a5f2ebd902c690957e11a7d63b6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 18:31:05 -0800 Subject: [PATCH 186/240] Reworked thread safe connection be threadsafer,,,, again Co-Authored-By: kay@zed.dev --- crates/db/src/db.rs | 559 ++++++++------------- crates/db/src/kvp.rs | 29 +- crates/db/src/query.rs | 314 ++++++++++++ crates/editor/src/persistence.rs | 27 +- crates/sqlez/src/bindable.rs | 164 +++--- crates/sqlez/src/connection.rs | 14 +- crates/sqlez/src/domain.rs | 4 +- crates/sqlez/src/migrations.rs | 3 + crates/sqlez/src/thread_safe_connection.rs | 143 +++--- crates/terminal/src/persistence.rs | 19 +- crates/workspace/src/persistence.rs | 44 +- crates/workspace/src/workspace.rs | 5 +- 12 files changed, 741 insertions(+), 584 deletions(-) create mode 100644 crates/db/src/query.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6de51cb0e6..6c6688b0d1 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,26 +1,27 @@ pub mod kvp; +pub mod query; // Re-export pub use anyhow; use anyhow::Context; pub use indoc::indoc; pub use lazy_static; +use parking_lot::{Mutex, RwLock}; pub use smol; pub use sqlez; pub use sqlez_macros; +pub use util::channel::{RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; +pub use util::paths::DB_DIR; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; use util::{async_iife, ResultExt}; -use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; -use util::paths::DB_DIR; - -// TODO: Add a savepoint to the thread safe connection initialization and migrations +use util::channel::ReleaseChannel; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; @@ -36,79 +37,117 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; lazy_static::lazy_static! { - static ref DB_WIPED: AtomicBool = AtomicBool::new(false); + static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); + static ref DB_WIPED: RwLock = RwLock::new(false); + pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. -pub async fn open_db() -> ThreadSafeConnection { - let db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); +/// This will retry a couple times if there are failures. If opening fails once, the db directory +/// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. +/// In either case, static variables are set so that the user can be notified. +pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { + let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel.name()))); // If WIPE_DB, delete 0-{channel} - if *RELEASE_CHANNEL == ReleaseChannel::Dev - && std::env::var("WIPE_DB").is_ok() - && !DB_WIPED.load(Ordering::Acquire) + if release_channel == &ReleaseChannel::Dev + && wipe_db + && !*DB_WIPED.read() { - remove_dir_all(&db_dir).ok(); - DB_WIPED.store(true, Ordering::Release); + let mut db_wiped = DB_WIPED.write(); + if !*db_wiped { + remove_dir_all(&main_db_dir).ok(); + + *db_wiped = true; + } } let connection = async_iife!({ + // Note: This still has a race condition where 1 set of migrations succeeds + // (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal)) + // This will cause the first connection to have the database taken out + // from under it. This *should* be fine though. The second dabatase failure will + // cause errors in the log and so should be observed by developers while writing + // soon-to-be good migrations. If user databases are corrupted, we toss them out + // and try again from a blank. As long as running all migrations from start to end + // is ok, this race condition will never be triggered. + // + // Basically: Don't ever push invalid migrations to stable or everyone will have + // a bad time. + // If no db folder, create one at 0-{channel} - create_dir_all(&db_dir).context("Could not create db directory")?; - let db_path = db_dir.join(Path::new("db.sqlite")); - - // Try building a connection - if let Some(connection) = ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) - .with_db_initialization_query(DB_INITIALIZE_QUERY) - .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) - .build() - .await - .log_err() { - return Ok(connection) + create_dir_all(&main_db_dir).context("Could not create db directory")?; + let db_path = main_db_dir.join(Path::new("db.sqlite")); + + // Optimistically open databases in parallel + if !DB_FILE_OPERATIONS.is_locked() { + // Try building a connection + if let Some(connection) = open_main_db(&db_path).await { + return Ok(connection) + }; } + // Take a lock in the failure case so that we move the db once per process instead + // of potentially multiple times from different threads. This shouldn't happen in the + // normal path + let _lock = DB_FILE_OPERATIONS.lock(); + if let Some(connection) = open_main_db(&db_path).await { + return Ok(connection) + }; + let backup_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) - .expect( - "System clock is set before the unix timestamp, Zed does not support this region of spacetime" - ) + .expect("System clock is set before the unix timestamp, Zed does not support this region of spacetime") .as_millis(); // If failed, move 0-{channel} to {current unix timestamp}-{channel} - let backup_db_dir = (*DB_DIR).join(Path::new(&format!( - "{}{}", + let backup_db_dir = db_dir.join(Path::new(&format!( + "{}-{}", backup_timestamp, - *RELEASE_CHANNEL_NAME + release_channel.name(), ))); - std::fs::rename(&db_dir, backup_db_dir) + std::fs::rename(&main_db_dir, &backup_db_dir) .context("Failed clean up corrupted database, panicking.")?; - // TODO: Set a constant with the failed timestamp and error so we can notify the user - + // Set a static ref with the failed timestamp and error so we can notify the user + { + let mut guard = BACKUP_DB_PATH.write(); + *guard = Some(backup_db_dir); + } + // Create a new 0-{channel} - create_dir_all(&db_dir).context("Should be able to create the database directory")?; - let db_path = db_dir.join(Path::new("db.sqlite")); + create_dir_all(&main_db_dir).context("Should be able to create the database directory")?; + let db_path = main_db_dir.join(Path::new("db.sqlite")); // Try again - ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) - .with_db_initialization_query(DB_INITIALIZE_QUERY) - .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) - .build() - .await + open_main_db(&db_path).await.context("Could not newly created db") }).await.log_err(); - if let Some(connection) = connection { + if let Some(connection) = connection { return connection; } - // TODO: Set another constant so that we can escalate the notification + // Set another static ref so that we can escalate the notification + ALL_FILE_DB_FAILED.store(true, Ordering::Release); // If still failed, create an in memory db with a known name open_fallback_db().await } +async fn open_main_db(db_path: &PathBuf) -> Option> { + println!("Opening main db"); + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + .log_err() +} + async fn open_fallback_db() -> ThreadSafeConnection { + println!("Opening fallback db"); ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) @@ -135,17 +174,27 @@ pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection /// Implements a basic DB wrapper for a given domain #[macro_export] -macro_rules! connection { - ($id:ident: $t:ident<$d:ty>) => { - pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); +macro_rules! define_connection { + (pub static ref $id:ident: $t:ident<()> = $migrations:expr;) => { + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>); impl ::std::ops::Deref for $t { - type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>; fn deref(&self) -> &Self::Target { &self.0 } } + + impl $crate::sqlez::domain::Domain for $t { + fn name() -> &'static str { + stringify!($t) + } + + fn migrations() -> &'static [&'static str] { + $migrations + } + } #[cfg(any(test, feature = "test-support"))] $crate::lazy_static::lazy_static! { @@ -154,322 +203,124 @@ macro_rules! connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db())); + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); + } + }; + (pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => { + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<( $($d),+, $t )>); + + impl ::std::ops::Deref for $t { + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<($($d),+, $t)>; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl $crate::sqlez::domain::Domain for $t { + fn name() -> &'static str { + stringify!($t) + } + + fn migrations() -> &'static [&'static str] { + $migrations + } + } + + #[cfg(any(test, feature = "test-support"))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id)))); + } + + #[cfg(not(any(test, feature = "test-support")))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; } -#[macro_export] -macro_rules! query { - ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; +#[cfg(test)] +mod tests { + use std::thread; - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + use sqlez::domain::Domain; + use sqlez_macros::sql; + use tempdir::TempDir; + use util::channel::ReleaseChannel; - self.exec(sql_stmt)?().context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt, - )) + use crate::open_db; + + enum TestDB {} + + impl Domain for TestDB { + fn name() -> &'static str { + "db_tests" } - }; - ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.exec(sql_stmt)?().context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await + fn migrations() -> &'static [&'static str] { + &[sql!( + CREATE TABLE test(value); + )] } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; + } + + // Test that wipe_db exists and works and gives a new db + #[test] + fn test_wipe_db() { + env_logger::try_init().ok(); + + smol::block_on(async { + let tempdir = TempDir::new("DbTests").unwrap(); + + let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + test_db.write(|connection| + connection.exec(sql!( + INSERT INTO test(value) VALUES (10) + )).unwrap()().unwrap() + ).await; + drop(test_db); + + let mut guards = vec![]; + for _ in 0..5 { + let path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || smol::block_on(async { + let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; + + assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + })); + + guards.push(guard); + } + + for guard in guards { + guard.join().unwrap(); + } + }) + } - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.write(move |connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.exec_bound::<$arg_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.write(move |connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select::<$return_type>(sql_stmt)?(()) - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { - pub async fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select::<$return_type>(sql_stmt)?(()) - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - - } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row::<$return_type>(indoc! { $sql })?() - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; + // Test a file system failure (like in create_dir_all()) + #[test] + fn test_file_system_failure() { + + } + + // Test happy path where everything exists and opens + #[test] + fn test_open_db() { + + } + + // Test bad migration panics + #[test] + fn test_bad_migration_panics() { + + } + + /// Test that DB exists but corrupted (causing recreate) + #[test] + fn test_db_corruption() { + + + // open_db(db_dir, release_channel) + } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 70ee9f64da..0b0cdd9aa1 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,26 +1,15 @@ -use sqlez::domain::Domain; use sqlez_macros::sql; -use crate::{connection, query}; +use crate::{define_connection, query}; -connection!(KEY_VALUE_STORE: KeyValueStore); - -impl Domain for KeyValueStore { - fn name() -> &'static str { - "kvp" - } - - fn migrations() -> &'static [&'static str] { - // Legacy migrations using rusqlite may have already created kv_store during alpha, - // migrations must be infallible so this must have 'IF NOT EXISTS' - &[sql!( - CREATE TABLE IF NOT EXISTS kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - )] - } -} +define_connection!(pub static ref KEY_VALUE_STORE: KeyValueStore<()> = + &[sql!( + CREATE TABLE IF NOT EXISTS kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + )]; +); impl KeyValueStore { query! { diff --git a/crates/db/src/query.rs b/crates/db/src/query.rs new file mode 100644 index 0000000000..731fca15cb --- /dev/null +++ b/crates/db/src/query.rs @@ -0,0 +1,314 @@ +#[macro_export] +macro_rules! query { + ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec(sql_stmt)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt, + )) + } + }; + ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec(sql_stmt)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<$arg_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select::<$return_type>(sql_stmt)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select::<$return_type>(sql_stmt)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(indoc! { $sql })?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; +} diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 3416f479e7..31ada105af 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,19 +1,11 @@ use std::path::PathBuf; -use crate::Editor; use db::sqlez_macros::sql; -use db::{connection, query}; -use sqlez::domain::Domain; -use workspace::{ItemId, Workspace, WorkspaceId}; +use db::{define_connection, query}; +use workspace::{ItemId, WorkspaceDb, WorkspaceId}; -connection!(DB: EditorDb<(Workspace, Editor)>); - -impl Domain for Editor { - fn name() -> &'static str { - "editor" - } - - fn migrations() -> &'static [&'static str] { +define_connection!( + pub static ref DB: EditorDb = &[sql! ( CREATE TABLE editors( item_id INTEGER NOT NULL, @@ -21,12 +13,11 @@ impl Domain for Editor { path BLOB NOT NULL, PRIMARY KEY(item_id, workspace_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE - ) STRICT; - )] - } -} + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + )]; +); impl EditorDb { query! { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index ffef7814f9..3649037e50 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -137,13 +137,6 @@ impl Column for usize { } } -impl Bind for () { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_null(start_index)?; - Ok(start_index + 1) - } -} - impl Bind for &str { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_text(start_index, self)?; @@ -179,78 +172,6 @@ impl Column for String { } } -impl Bind for (T1, T2) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - self.1.bind(statement, next_index) - } -} - -impl Column for (T1, T2) { - fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - Ok(((first, second), next_index)) - } -} - -impl Bind for (T1, T2, T3) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - self.2.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - Ok(((first, second, third), next_index)) - } -} - -impl Bind for (T1, T2, T3, T4) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - let next_index = self.2.bind(statement, next_index)?; - self.3.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3, T4) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - let (fourth, next_index) = T4::column(statement, next_index)?; - Ok(((first, second, third, fourth), next_index)) - } -} - -impl Bind for (T1, T2, T3, T4, T5) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - let next_index = self.2.bind(statement, next_index)?; - let next_index = self.3.bind(statement, next_index)?; - self.4.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3, T4, T5) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - let (fourth, next_index) = T4::column(statement, next_index)?; - let (fifth, next_index) = T5::column(statement, next_index)?; - Ok(((first, second, third, fourth, fifth), next_index)) - } -} - impl Bind for Option { fn bind(&self, statement: &Statement, start_index: i32) -> Result { if let Some(this) = self { @@ -344,3 +265,88 @@ impl Column for PathBuf { )) } } + +/// Unit impls do nothing. This simplifies query macros +impl Bind for () { + fn bind(&self, _statement: &Statement, start_index: i32) -> Result { + Ok(start_index) + } +} + +impl Column for () { + fn column(_statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + Ok(((), start_index)) + } +} + +impl Bind for (T1, T2) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + self.1.bind(statement, next_index) + } +} + +impl Column for (T1, T2) { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + Ok(((first, second), next_index)) + } +} + +impl Bind for (T1, T2, T3) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + self.2.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + Ok(((first, second, third), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + self.3.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, fourth), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4, T5) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + let next_index = self.3.bind(statement, next_index)?; + self.4.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4, T5) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + let (fifth, next_index) = T5::column(statement, next_index)?; + Ok(((first, second, third, fourth, fifth), next_index)) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 0456266594..3342845d14 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -1,4 +1,5 @@ use std::{ + cell::RefCell, ffi::{CStr, CString}, marker::PhantomData, path::Path, @@ -11,7 +12,7 @@ use libsqlite3_sys::*; pub struct Connection { pub(crate) sqlite3: *mut sqlite3, persistent: bool, - pub(crate) write: bool, + pub(crate) write: RefCell, _sqlite: PhantomData, } unsafe impl Send for Connection {} @@ -21,7 +22,7 @@ impl Connection { let mut connection = Self { sqlite3: 0 as *mut _, persistent, - write: true, + write: RefCell::new(true), _sqlite: PhantomData, }; @@ -64,7 +65,7 @@ impl Connection { } pub fn can_write(&self) -> bool { - self.write + *self.write.borrow() } pub fn backup_main(&self, destination: &Connection) -> Result<()> { @@ -152,6 +153,13 @@ impl Connection { )) } } + + pub(crate) fn with_write(&self, callback: impl FnOnce(&Connection) -> T) -> T { + *self.write.borrow_mut() = true; + let result = callback(self); + *self.write.borrow_mut() = false; + result + } } impl Drop for Connection { diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index 3a477b2bc9..a83f4e18d6 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,11 +1,11 @@ use crate::connection::Connection; -pub trait Domain { +pub trait Domain: 'static { fn name() -> &'static str; fn migrations() -> &'static [&'static str]; } -pub trait Migrator { +pub trait Migrator: 'static { fn migrate(connection: &Connection) -> anyhow::Result<()>; } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 41c505f85b..aa8d5fe00b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -12,6 +12,7 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { self.with_savepoint("migrating", || { + println!("Processing domain"); // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( @@ -43,11 +44,13 @@ impl Connection { {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue + println!("Migration already run"); continue; } } self.exec(migration)?()?; + println!("Ran migration"); store_completed_migration((domain, index, *migration))?; } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 4849e785b5..77ba3406a2 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -5,17 +5,13 @@ use parking_lot::{Mutex, RwLock}; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; -use crate::{ - connection::Connection, - domain::{Domain, Migrator}, - util::UnboundedSyncSender, -}; +use crate::{connection::Connection, domain::Migrator, util::UnboundedSyncSender}; const MIGRATION_RETRIES: usize = 10; -type QueuedWrite = Box; +type QueuedWrite = Box; type WriteQueueConstructor = - Box Box>; + Box Box>; lazy_static! { /// List of queues of tasks by database uri. This lets us serialize writes to the database /// and have a single worker thread per db file. This means many thread safe connections @@ -28,18 +24,18 @@ lazy_static! { /// Thread safe connection to a given database file or in memory db. This can be cloned, shared, static, /// whatever. It derefs to a synchronous connection by thread that is read only. A write capable connection /// may be accessed by passing a callback to the `write` function which will queue the callback -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, connection_initialize_query: Option<&'static str>, connections: Arc>, - _migrator: PhantomData, + _migrator: PhantomData<*mut M>, } -unsafe impl Send for ThreadSafeConnection {} -unsafe impl Sync for ThreadSafeConnection {} +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} -pub struct ThreadSafeConnectionBuilder { +pub struct ThreadSafeConnectionBuilder { db_initialize_query: Option<&'static str>, write_queue_constructor: Option, connection: ThreadSafeConnection, @@ -54,6 +50,13 @@ impl ThreadSafeConnectionBuilder { self } + /// Queues an initialization query for the database file. This must be infallible + /// but may cause changes to the database file such as with `PRAGMA journal_mode` + pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { + self.db_initialize_query = Some(initialize_query); + self + } + /// Specifies how the thread safe connection should serialize writes. If provided /// the connection will call the write_queue_constructor for each database file in /// this process. The constructor is responsible for setting up a background thread or @@ -66,13 +69,6 @@ impl ThreadSafeConnectionBuilder { self } - /// Queues an initialization query for the database file. This must be infallible - /// but may cause changes to the database file such as with `PRAGMA journal_mode` - pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { - self.db_initialize_query = Some(initialize_query); - self - } - pub async fn build(self) -> anyhow::Result> { self.connection .initialize_queues(self.write_queue_constructor); @@ -100,6 +96,7 @@ impl ThreadSafeConnectionBuilder { .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); if migration_result.is_ok() { + println!("Migration succeded"); break; } } @@ -113,38 +110,17 @@ impl ThreadSafeConnectionBuilder { } impl ThreadSafeConnection { - fn initialize_queues(&self, write_queue_constructor: Option) { + fn initialize_queues(&self, write_queue_constructor: Option) -> bool { if !QUEUES.read().contains_key(&self.uri) { let mut queues = QUEUES.write(); if !queues.contains_key(&self.uri) { - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - if let Some(mut write_queue_constructor) = write_queue_constructor { - let write_channel = write_queue_constructor(write_connection); - queues.insert(self.uri.clone(), write_channel); - } else { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - let sender = UnboundedSyncSender::new(sender); - queues.insert( - self.uri.clone(), - Box::new(move |queued_write| { - sender - .send(queued_write) - .expect("Could not send write action to backgorund thread"); - }), - ); - } + let mut write_queue_constructor = + write_queue_constructor.unwrap_or(background_thread_queue()); + queues.insert(self.uri.clone(), write_queue_constructor()); + return true; } } + return false; } pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { @@ -163,20 +139,21 @@ impl ThreadSafeConnection { /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. - fn open_file(&self) -> Connection { - Connection::open_file(self.uri.as_ref()) + fn open_file(uri: &str) -> Connection { + Connection::open_file(uri) } /// Opens a shared memory connection using the file path as the identifier. This is internal /// and only called from the deref function. - fn open_shared_memory(&self) -> Connection { - Connection::open_memory(Some(self.uri.as_ref())) + fn open_shared_memory(uri: &str) -> Connection { + Connection::open_memory(Some(uri)) } pub fn write( &self, callback: impl 'static + Send + FnOnce(&Connection) -> T, ) -> impl Future { + // Check and invalidate queue and maybe recreate queue let queues = QUEUES.read(); let write_channel = queues .get(&self.uri) @@ -185,24 +162,32 @@ impl ThreadSafeConnection { // Create a one shot channel for the result of the queued write // so we can await on the result let (sender, reciever) = oneshot::channel(); - write_channel(Box::new(move |connection| { - sender.send(callback(connection)).ok(); + + let thread_safe_connection = (*self).clone(); + write_channel(Box::new(move || { + let connection = thread_safe_connection.deref(); + let result = connection.with_write(|connection| callback(connection)); + sender.send(result).ok(); })); reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } - pub(crate) fn create_connection(&self) -> Connection { - let mut connection = if self.persistent { - self.open_file() + pub(crate) fn create_connection( + persistent: bool, + uri: &str, + connection_initialize_query: Option<&'static str>, + ) -> Connection { + let mut connection = if persistent { + Self::open_file(uri) } else { - self.open_shared_memory() + Self::open_shared_memory(uri) }; // Disallow writes on the connection. The only writes allowed for thread safe connections // are from the background thread that can serialize them. - connection.write = false; + *connection.write.get_mut() = false; - if let Some(initialize_query) = self.connection_initialize_query { + if let Some(initialize_query) = connection_initialize_query { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query @@ -236,7 +221,7 @@ impl ThreadSafeConnection<()> { } } -impl Clone for ThreadSafeConnection { +impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), @@ -252,16 +237,41 @@ impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { - self.connections.get_or(|| self.create_connection()) + self.connections.get_or(|| { + Self::create_connection(self.persistent, &self.uri, self.connection_initialize_query) + }) } } -pub fn locking_queue() -> WriteQueueConstructor { - Box::new(|connection| { - let connection = Mutex::new(connection); +pub fn background_thread_queue() -> WriteQueueConstructor { + use std::sync::mpsc::channel; + + Box::new(|| { + let (sender, reciever) = channel::(); + + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write() + } + }); + + let sender = UnboundedSyncSender::new(sender); Box::new(move |queued_write| { - let connection = connection.lock(); - queued_write(&connection) + sender + .send(queued_write) + .expect("Could not send write action to background thread"); + }) + }) +} + +pub fn locking_queue() -> WriteQueueConstructor { + Box::new(|| { + let mutex = Mutex::new(()); + Box::new(move |queued_write| { + eprintln!("Write started"); + let _ = mutex.lock(); + queued_write(); + eprintln!("Write finished"); }) }) } @@ -269,7 +279,8 @@ pub fn locking_queue() -> WriteQueueConstructor { #[cfg(test)] mod test { use indoc::indoc; - use std::ops::Deref; + use lazy_static::__Deref; + use std::thread; use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index f9cfb6fc01..1669a3a546 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,19 +1,11 @@ use std::path::PathBuf; -use db::{connection, query, sqlez::domain::Domain, sqlez_macros::sql}; +use db::{define_connection, query, sqlez_macros::sql}; -use workspace::{ItemId, Workspace, WorkspaceId}; +use workspace::{ItemId, WorkspaceDb, WorkspaceId}; -use crate::Terminal; - -connection!(TERMINAL_CONNECTION: TerminalDb<(Workspace, Terminal)>); - -impl Domain for Terminal { - fn name() -> &'static str { - "terminal" - } - - fn migrations() -> &'static [&'static str] { +define_connection! { + pub static ref TERMINAL_CONNECTION: TerminalDb = &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, @@ -23,8 +15,7 @@ impl Domain for Terminal { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; - )] - } + )]; } impl TerminalDb { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index db59141087..a0cc48ca1c 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,30 +5,21 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; +use db::{define_connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; use crate::WorkspaceId; -use super::Workspace; - use model::{ GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceLocation, }; -connection!(DB: WorkspaceDb); - -impl Domain for Workspace { - fn name() -> &'static str { - "workspace" - } - - fn migrations() -> &'static [&'static str] { +define_connection! { + pub static ref DB: WorkspaceDb<()> = &[sql!( CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, @@ -40,7 +31,7 @@ impl Domain for Workspace { timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -48,29 +39,29 @@ impl Domain for Workspace { position INTEGER, // NULL indicates that this is a root node axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, // Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE + ON DELETE CASCADE + ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, parent_group_id INTEGER, // NULL means that this is a root pane position INTEGER, // NULL means that this is a root pane FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, + ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, @@ -79,14 +70,13 @@ impl Domain for Workspace { position INTEGER NOT NULL, active INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, + ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; - )] - } + )]; } impl WorkspaceDb { @@ -149,7 +139,7 @@ impl WorkspaceDb { UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id) - .context("Clearing old panes")?; + .expect("Clearing old panes"); conn.exec_bound(sql!( DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 66ef63f27f..8e9131839d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -44,8 +44,11 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -pub use persistence::model::{ItemId, WorkspaceLocation}; use persistence::{model::SerializedItem, DB}; +pub use persistence::{ + model::{ItemId, WorkspaceLocation}, + WorkspaceDb, +}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; From 5262e8c77ef2d453abb1e8922a2da6403986ff8a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 2 Dec 2022 12:43:02 -0800 Subject: [PATCH 187/240] CHANGE LOCK TO NOT BE DROPPED INSTANTLY. DANG U RUST co-authored-by: kay@zed.dev --- crates/db/Cargo.toml | 2 +- crates/db/src/db.rs | 9 ++-- crates/sqlez/Cargo.toml | 2 +- crates/sqlez/src/thread_safe_connection.rs | 6 +-- crates/sqlez_macros/src/sqlez_macros.rs | 48 ++++++++++++++-------- crates/workspace/Cargo.toml | 1 + 6 files changed, 39 insertions(+), 29 deletions(-) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 69c90e02f9..8e12b06027 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -27,5 +27,5 @@ smol = "1.2" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } +env_logger = "0.9.1" tempdir = { version = "0.3.7" } -env_logger = "0.9.1" \ No newline at end of file diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6c6688b0d1..7b214cb3be 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -40,7 +40,7 @@ lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); - pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. @@ -58,7 +58,6 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas let mut db_wiped = DB_WIPED.write(); if !*db_wiped { remove_dir_all(&main_db_dir).ok(); - *db_wiped = true; } } @@ -71,7 +70,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // cause errors in the log and so should be observed by developers while writing // soon-to-be good migrations. If user databases are corrupted, we toss them out // and try again from a blank. As long as running all migrations from start to end - // is ok, this race condition will never be triggered. + // on a blank database is ok, this race condition will never be triggered. // // Basically: Don't ever push invalid migrations to stable or everyone will have // a bad time. @@ -137,7 +136,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas } async fn open_main_db(db_path: &PathBuf) -> Option> { - println!("Opening main db"); + log::info!("Opening main db"); ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) @@ -147,7 +146,7 @@ async fn open_main_db(db_path: &PathBuf) -> Option() -> ThreadSafeConnection { - println!("Opening fallback db"); + log::info!("Opening fallback db"); ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index 8a7f1ba415..c6c018b924 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -13,4 +13,4 @@ smol = "1.2" thread_local = "1.1.4" lazy_static = "1.4" parking_lot = "0.11.1" -futures = "0.3" \ No newline at end of file +futures = "0.3" diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 77ba3406a2..7b89827979 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -266,12 +266,10 @@ pub fn background_thread_queue() -> WriteQueueConstructor { pub fn locking_queue() -> WriteQueueConstructor { Box::new(|| { - let mutex = Mutex::new(()); + let write_mutex = Mutex::new(()); Box::new(move |queued_write| { - eprintln!("Write started"); - let _ = mutex.lock(); + let _lock = write_mutex.lock(); queued_write(); - eprintln!("Write finished"); }) }) } diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index c937e704ae..429f45db7e 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -10,9 +10,37 @@ lazy_static::lazy_static! { #[proc_macro] pub fn sql(tokens: TokenStream) -> TokenStream { + let (spans, sql) = make_sql(tokens); + + let error = SQLITE.sql_has_syntax_error(sql.trim()); + let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); + + if let Some((error, error_offset)) = error { + create_error(spans, error_offset, error, &formatted_sql) + } else { + format!("r#\"{}\"#", &formatted_sql).parse().unwrap() + } +} + +fn create_error( + spans: Vec<(usize, Span)>, + error_offset: usize, + error: String, + formatted_sql: &String, +) -> TokenStream { + let error_span = spans + .into_iter() + .skip_while(|(offset, _)| offset <= &error_offset) + .map(|(_, span)| span) + .next() + .unwrap_or(Span::call_site()); + let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); + TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) +} + +fn make_sql(tokens: TokenStream) -> (Vec<(usize, Span)>, String) { let mut sql_tokens = vec![]; flatten_stream(tokens.clone(), &mut sql_tokens); - // Lookup of spans by offset at the end of the token let mut spans: Vec<(usize, Span)> = Vec::new(); let mut sql = String::new(); @@ -20,23 +48,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream { sql.push_str(&token_text); spans.push((sql.len(), span)); } - - let error = SQLITE.sql_has_syntax_error(sql.trim()); - let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); - - if let Some((error, error_offset)) = error { - let error_span = spans - .into_iter() - .skip_while(|(offset, _)| offset <= &error_offset) - .map(|(_, span)| span) - .next() - .unwrap_or(Span::call_site()); - - let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); - TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) - } else { - format!("r#\"{}\"#", &formatted_sql).parse().unwrap() - } + (spans, sql) } /// This method exists to normalize the representation of groups diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index b67ccdeeb7..917f821e4a 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -54,3 +54,4 @@ gpui = { path = "../gpui", features = ["test-support"] } project = { path = "../project", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } fs = { path = "../fs", features = ["test-support"] } +db = { path = "../db", features = ["test-support"] } \ No newline at end of file From ffcad4e4e2cfd1f8514117357b3185fa4b414e0a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 2 Dec 2022 14:30:26 -0800 Subject: [PATCH 188/240] WIP fixing dock problems --- crates/auto_update/src/update_notification.rs | 2 +- crates/client/src/telemetry.rs | 2 +- crates/db/src/db.rs | 181 +++++++++++------- crates/sqlez/src/migrations.rs | 3 - crates/sqlez/src/thread_safe_connection.rs | 1 - crates/util/src/channel.rs | 10 +- crates/workspace/Cargo.toml | 1 - crates/workspace/src/dock.rs | 8 + crates/workspace/src/persistence.rs | 33 ++-- crates/workspace/src/persistence/model.rs | 29 ++- crates/workspace/src/workspace.rs | 99 ++++++---- crates/zed/src/zed.rs | 2 +- 12 files changed, 234 insertions(+), 137 deletions(-) diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 9963ae65b8..5fbdf17422 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -30,7 +30,7 @@ impl View for UpdateNotification { let theme = cx.global::().theme.clone(); let theme = &theme.update_notification; - let app_name = cx.global::().name(); + let app_name = cx.global::().display_name(); MouseEventHandler::::new(0, cx, |state, cx| { Flex::column() diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index a81f33c604..ce8b713996 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -106,7 +106,7 @@ impl Telemetry { pub fn new(client: Arc, cx: &AppContext) -> Arc { let platform = cx.platform(); let release_channel = if cx.has_global::() { - Some(cx.global::().name()) + Some(cx.global::().display_name()) } else { None }; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 7b214cb3be..c146336132 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -36,6 +36,8 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; +const DB_FILE_NAME: &'static str = "db.sqlite"; + lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); static ref DB_WIPED: RwLock = RwLock::new(false); @@ -48,7 +50,8 @@ lazy_static::lazy_static! { /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { - let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel.name()))); + let release_channel_name = release_channel.dev_name(); + let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); // If WIPE_DB, delete 0-{channel} if release_channel == &ReleaseChannel::Dev @@ -77,7 +80,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // If no db folder, create one at 0-{channel} create_dir_all(&main_db_dir).context("Could not create db directory")?; - let db_path = main_db_dir.join(Path::new("db.sqlite")); + let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); // Optimistically open databases in parallel if !DB_FILE_OPERATIONS.is_locked() { @@ -104,7 +107,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas let backup_db_dir = db_dir.join(Path::new(&format!( "{}-{}", backup_timestamp, - release_channel.name(), + release_channel_name, ))); std::fs::rename(&main_db_dir, &backup_db_dir) @@ -118,7 +121,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // Create a new 0-{channel} create_dir_all(&main_db_dir).context("Should be able to create the database directory")?; - let db_path = main_db_dir.join(Path::new("db.sqlite")); + let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); // Try again open_main_db(&db_path).await.context("Could not newly created db") @@ -240,86 +243,130 @@ macro_rules! define_connection { #[cfg(test)] mod tests { - use std::thread; + use std::{thread, fs}; - use sqlez::domain::Domain; + use sqlez::{domain::Domain, connection::Connection}; use sqlez_macros::sql; use tempdir::TempDir; use util::channel::ReleaseChannel; - use crate::open_db; - - enum TestDB {} - - impl Domain for TestDB { - fn name() -> &'static str { - "db_tests" - } - - fn migrations() -> &'static [&'static str] { - &[sql!( - CREATE TABLE test(value); - )] - } - } + use crate::{open_db, DB_FILE_NAME}; // Test that wipe_db exists and works and gives a new db - #[test] - fn test_wipe_db() { - env_logger::try_init().ok(); + #[gpui::test] + async fn test_wipe_db() { + enum TestDB {} - smol::block_on(async { - let tempdir = TempDir::new("DbTests").unwrap(); + impl Domain for TestDB { + fn name() -> &'static str { + "db_tests" + } - let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - test_db.write(|connection| - connection.exec(sql!( - INSERT INTO test(value) VALUES (10) - )).unwrap()().unwrap() - ).await; - drop(test_db); - - let mut guards = vec![]; - for _ in 0..5 { - let path = tempdir.path().to_path_buf(); - let guard = thread::spawn(move || smol::block_on(async { - let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - - assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - })); + fn migrations() -> &'static [&'static str] { + &[sql!( + CREATE TABLE test(value); + )] + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + + // Create a db and insert a marker value + let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + test_db.write(|connection| + connection.exec(sql!( + INSERT INTO test(value) VALUES (10) + )).unwrap()().unwrap() + ).await; + drop(test_db); + + // Opening db with wipe clears once and removes the marker value + let mut guards = vec![]; + for _ in 0..5 { + let path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || smol::block_on(async { + let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - guards.push(guard); + assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + })); + + guards.push(guard); + } + + for guard in guards { + guard.join().unwrap(); + } + } + + // Test bad migration panics + #[gpui::test] + #[should_panic] + async fn test_bad_migration_panics() { + enum BadDB {} + + impl Domain for BadDB { + fn name() -> &'static str { + "db_tests" } - for guard in guards { - guard.join().unwrap(); + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);), + // failure because test already exists + sql!(CREATE TABLE test(value);)] } - }) - } - - // Test a file system failure (like in create_dir_all()) - #[test] - fn test_file_system_failure() { - - } - - // Test happy path where everything exists and opens - #[test] - fn test_open_db() { - - } - - // Test bad migration panics - #[test] - fn test_bad_migration_panics() { - + } + + let tempdir = TempDir::new("DbTests").unwrap(); + let _bad_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } /// Test that DB exists but corrupted (causing recreate) - #[test] - fn test_db_corruption() { + #[gpui::test] + async fn test_db_corruption() { + enum CorruptedDB {} + impl Domain for CorruptedDB { + fn name() -> &'static str { + "db_tests" + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);)] + } + } - // open_db(db_dir, release_channel) + enum GoodDB {} + + impl Domain for GoodDB { + fn name() -> &'static str { + "db_tests" //Notice same name + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test2(value);)] //But different migration + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + { + let corrupt_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(corrupt_db.persistent()); + } + + let good_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + + let mut corrupted_backup_dir = fs::read_dir( + tempdir.path() + ).unwrap().find(|entry| { + !entry.as_ref().unwrap().file_name().to_str().unwrap().starts_with("0") + } + ).unwrap().unwrap().path(); + corrupted_backup_dir.push(DB_FILE_NAME); + + dbg!(&corrupted_backup_dir); + + let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); + assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index aa8d5fe00b..41c505f85b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -12,7 +12,6 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { self.with_savepoint("migrating", || { - println!("Processing domain"); // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( @@ -44,13 +43,11 @@ impl Connection { {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue - println!("Migration already run"); continue; } } self.exec(migration)?()?; - println!("Ran migration"); store_completed_migration((domain, index, *migration))?; } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 7b89827979..51d0707fd8 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -96,7 +96,6 @@ impl ThreadSafeConnectionBuilder { .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); if migration_result.is_ok() { - println!("Migration succeded"); break; } } diff --git a/crates/util/src/channel.rs b/crates/util/src/channel.rs index ab5b53b4ab..3edf26dc95 100644 --- a/crates/util/src/channel.rs +++ b/crates/util/src/channel.rs @@ -22,11 +22,19 @@ pub enum ReleaseChannel { } impl ReleaseChannel { - pub fn name(&self) -> &'static str { + pub fn display_name(&self) -> &'static str { match self { ReleaseChannel::Dev => "Zed Dev", ReleaseChannel::Preview => "Zed Preview", ReleaseChannel::Stable => "Zed", } } + + pub fn dev_name(&self) -> &'static str { + match self { + ReleaseChannel::Dev => "dev", + ReleaseChannel::Preview => "preview", + ReleaseChannel::Stable => "stable", + } + } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 917f821e4a..5894a2a44e 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -46,7 +46,6 @@ serde_json = { version = "1.0", features = ["preserve_order"] } smallvec = { version = "1.6", features = ["union"] } indoc = "1.0.4" - [dev-dependencies] call = { path = "../call", features = ["test-support"] } client = { path = "../client", features = ["test-support"] } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 0879166bbe..9b1342ecd9 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -175,16 +175,21 @@ impl Dock { new_position: DockPosition, cx: &mut ViewContext, ) { + dbg!("starting", &new_position); workspace.dock.position = new_position; // Tell the pane about the new anchor position workspace.dock.pane.update(cx, |pane, cx| { + dbg!("setting docked"); pane.set_docked(Some(new_position.anchor()), cx) }); if workspace.dock.position.is_visible() { + dbg!("dock is visible"); // Close the right sidebar if the dock is on the right side and the right sidebar is open if workspace.dock.position.anchor() == DockAnchor::Right { + dbg!("dock anchor is right"); if workspace.right_sidebar().read(cx).is_open() { + dbg!("Toggling right sidebar"); workspace.toggle_sidebar(SidebarSide::Right, cx); } } @@ -194,8 +199,10 @@ impl Dock { if pane.read(cx).items().next().is_none() { let item_to_add = (workspace.dock.default_item_factory)(workspace, cx); // Adding the item focuses the pane by default + dbg!("Adding item to dock"); Pane::add_item(workspace, &pane, item_to_add, true, true, None, cx); } else { + dbg!("just focusing dock"); cx.focus(pane); } } else if let Some(last_active_center_pane) = workspace @@ -207,6 +214,7 @@ impl Dock { } cx.emit(crate::Event::DockAnchorChanged); workspace.serialize_workspace(cx); + dbg!("Serializing workspace after dock position changed"); cx.notify(); } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index a0cc48ca1c..2d4ae919f9 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -27,7 +27,7 @@ define_connection! { dock_visible INTEGER, // Boolean dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet - project_panel_open INTEGER, //Boolean + left_sidebar_open INTEGER, //Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; @@ -91,7 +91,7 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, workspace_location, project_panel_open, dock_position): ( + let (workspace_id, workspace_location, left_sidebar_open, dock_position): ( WorkspaceId, WorkspaceLocation, bool, @@ -99,12 +99,12 @@ impl WorkspaceDb { ) = iife!({ if worktree_roots.len() == 0 { self.select_row(sql!( - SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, left_sidebar_open, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1))?()? } else { self.select_row_bound(sql!( - SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, left_sidebar_open, dock_visible, dock_anchor FROM workspaces WHERE workspace_location = ?))?(&workspace_location)? } @@ -125,7 +125,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, - project_panel_open + left_sidebar_open }) } @@ -151,7 +151,7 @@ impl WorkspaceDb { INSERT INTO workspaces( workspace_id, workspace_location, - project_panel_open, + left_sidebar_open, dock_visible, dock_anchor, timestamp @@ -160,11 +160,11 @@ impl WorkspaceDb { ON CONFLICT DO UPDATE SET workspace_location = ?2, - project_panel_open = ?3, + left_sidebar_open = ?3, dock_visible = ?4, dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.project_panel_open, workspace.dock_position)) + ))?((workspace.id, &workspace.location, workspace.left_sidebar_open, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -198,7 +198,8 @@ impl WorkspaceDb { query! { pub fn recent_workspaces(limit: usize) -> Result> { SELECT workspace_id, workspace_location - FROM workspaces + FROM workspaces + WHERE workspace_location IS NOT NULL ORDER BY timestamp DESC LIMIT ? } @@ -458,7 +459,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: true + left_sidebar_open: true }; let mut workspace_2 = SerializedWorkspace { @@ -467,7 +468,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false + left_sidebar_open: false }; db.save_workspace(workspace_1.clone()).await; @@ -573,7 +574,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, - project_panel_open: true + left_sidebar_open: true }; db.save_workspace(workspace.clone()).await; @@ -601,7 +602,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: true, + left_sidebar_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -610,7 +611,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false, + left_sidebar_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -646,7 +647,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false + left_sidebar_open: false }; db.save_workspace(workspace_3.clone()).await; @@ -681,7 +682,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, - project_panel_open: true + left_sidebar_open: true } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index c57c992d7b..c75488561f 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -65,7 +65,7 @@ pub struct SerializedWorkspace { pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, - pub project_panel_open: bool, + pub left_sidebar_open: bool, } #[derive(Debug, PartialEq, Eq, Clone)] @@ -95,26 +95,33 @@ impl SerializedPaneGroup { workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, - ) -> (Member, Option>) { + ) -> Option<(Member, Option>)> { match self { SerializedPaneGroup::Group { axis, children } => { let mut current_active_pane = None; let mut members = Vec::new(); for child in children { - let (new_member, active_pane) = child + if let Some((new_member, active_pane)) = child .deserialize(project, workspace_id, workspace, cx) - .await; - members.push(new_member); + .await + { + members.push(new_member); - current_active_pane = current_active_pane.or(active_pane); + current_active_pane = current_active_pane.or(active_pane); + } } - ( + + if members.is_empty() { + return None; + } + + Some(( Member::Axis(PaneAxis { axis: *axis, members, }), current_active_pane, - ) + )) } SerializedPaneGroup::Pane(serialized_pane) => { let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); @@ -123,7 +130,11 @@ impl SerializedPaneGroup { .deserialize_to(project, &pane, workspace_id, workspace, cx) .await; - (Member::Pane(pane.clone()), active.then(|| pane)) + if pane.read_with(cx, |pane, _| pane.items().next().is_some()) { + Some((Member::Pane(pane.clone()), active.then(|| pane))) + } else { + None + } } } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 8e9131839d..5fb804e66d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1244,6 +1244,8 @@ impl Workspace { Dock::hide_on_sidebar_shown(self, sidebar_side, cx); } + self.serialize_workspace(cx); + cx.focus_self(); cx.notify(); } @@ -1275,6 +1277,9 @@ impl Workspace { } else { cx.focus_self(); } + + self.serialize_workspace(cx); + cx.notify(); } @@ -1302,6 +1307,9 @@ impl Workspace { cx.focus(active_item.to_any()); } } + + self.serialize_workspace(cx); + cx.notify(); } @@ -2268,13 +2276,20 @@ impl Workspace { self.database_id } - fn location(&self, cx: &AppContext) -> WorkspaceLocation { - self.project() - .read(cx) - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect::>() - .into() + fn location(&self, cx: &AppContext) -> Option { + let project = self.project().read(cx); + + if project.is_local() { + Some( + project + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into(), + ) + } else { + None + } } fn remove_panes(&mut self, member: Member, cx: &mut ViewContext) { @@ -2331,24 +2346,24 @@ impl Workspace { } } - let location = self.location(cx); + if let Some(location) = self.location(cx) { + if !location.paths().is_empty() { + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + let center_group = build_serialized_pane_group(&self.center.root, cx); - if !location.paths().is_empty() { - let dock_pane = serialize_pane_handle(self.dock.pane(), cx); - let center_group = build_serialized_pane_group(&self.center.root, cx); + let serialized_workspace = SerializedWorkspace { + id: self.database_id, + location, + dock_position: self.dock.position(), + dock_pane, + center_group, + left_sidebar_open: self.left_sidebar.read(cx).is_open(), + }; - let serialized_workspace = SerializedWorkspace { - id: self.database_id, - location: self.location(cx), - dock_position: self.dock.position(), - dock_pane, - center_group, - project_panel_open: self.left_sidebar.read(cx).is_open(), - }; - - cx.background() - .spawn(persistence::DB.save_workspace(serialized_workspace)) - .detach(); + cx.background() + .spawn(persistence::DB.save_workspace(serialized_workspace)) + .detach(); + } } } @@ -2375,34 +2390,46 @@ impl Workspace { .await; // Traverse the splits tree and add to things - let (root, active_pane) = serialized_workspace + let center_group = serialized_workspace .center_group .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) .await; // Remove old panes from workspace panes list workspace.update(&mut cx, |workspace, cx| { - workspace.remove_panes(workspace.center.root.clone(), cx); + if let Some((center_group, active_pane)) = center_group { + workspace.remove_panes(workspace.center.root.clone(), cx); - // Swap workspace center group - workspace.center = PaneGroup::with_root(root); + // Swap workspace center group + workspace.center = PaneGroup::with_root(center_group); + + // Change the focus to the workspace first so that we retrigger focus in on the pane. + cx.focus_self(); + + if let Some(active_pane) = active_pane { + cx.focus(active_pane); + } else { + cx.focus(workspace.panes.last().unwrap().clone()); + } + } else { + cx.focus_self(); + } // Note, if this is moved after 'set_dock_position' // it causes an infinite loop. - if serialized_workspace.project_panel_open { - workspace.toggle_sidebar_item_focus(SidebarSide::Left, 0, cx) + if workspace.left_sidebar().read(cx).is_open() + != serialized_workspace.left_sidebar_open + { + workspace.toggle_sidebar(SidebarSide::Left, cx); } - Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); - - if let Some(active_pane) = active_pane { - // Change the focus to the workspace first so that we retrigger focus in on the pane. - cx.focus_self(); - cx.focus(active_pane); - } + // Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); cx.notify(); }); + + // Serialize ourself to make sure our timestamps and any pane / item changes are replicated + workspace.read_with(&cx, |workspace, cx| workspace.serialize_workspace(cx)) } }) .detach(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 0a25cfb66f..d86e449ff2 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -377,7 +377,7 @@ fn quit(_: &Quit, cx: &mut gpui::MutableAppContext) { } fn about(_: &mut Workspace, _: &About, cx: &mut gpui::ViewContext) { - let app_name = cx.global::().name(); + let app_name = cx.global::().display_name(); let version = env!("CARGO_PKG_VERSION"); cx.prompt( gpui::PromptLevel::Info, From a1f273278b758bd4837eafd2517042751b7fc654 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:03:46 -0800 Subject: [PATCH 189/240] Added user notifications --- crates/auto_update/src/update_notification.rs | 4 +- crates/collab_ui/src/contact_notification.rs | 2 +- crates/theme/src/theme.rs | 8 + crates/workspace/src/notifications.rs | 280 ++++++++++++++++++ crates/workspace/src/workspace.rs | 116 ++++---- styles/src/styleTree/app.ts | 2 + .../styleTree/simpleMessageNotification.ts | 31 ++ 7 files changed, 375 insertions(+), 68 deletions(-) create mode 100644 crates/workspace/src/notifications.rs create mode 100644 styles/src/styleTree/simpleMessageNotification.ts diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 5fbdf17422..d6f94c708d 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -7,7 +7,7 @@ use gpui::{ use menu::Cancel; use settings::Settings; use util::channel::ReleaseChannel; -use workspace::Notification; +use workspace::notifications::Notification; pub struct UpdateNotification { version: AppVersion, @@ -28,7 +28,7 @@ impl View for UpdateNotification { fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> gpui::ElementBox { let theme = cx.global::().theme.clone(); - let theme = &theme.update_notification; + let theme = &theme.simple_message_notification; let app_name = cx.global::().display_name(); diff --git a/crates/collab_ui/src/contact_notification.rs b/crates/collab_ui/src/contact_notification.rs index f543a01446..6f0cfc68c7 100644 --- a/crates/collab_ui/src/contact_notification.rs +++ b/crates/collab_ui/src/contact_notification.rs @@ -6,7 +6,7 @@ use gpui::{ elements::*, impl_internal_actions, Entity, ModelHandle, MutableAppContext, RenderContext, View, ViewContext, }; -use workspace::Notification; +use workspace::notifications::Notification; impl_internal_actions!(contact_notifications, [Dismiss, RespondToContactRequest]); diff --git a/crates/theme/src/theme.rs b/crates/theme/src/theme.rs index 8d2a2df18e..bf6cb57adb 100644 --- a/crates/theme/src/theme.rs +++ b/crates/theme/src/theme.rs @@ -31,6 +31,7 @@ pub struct Theme { pub shared_screen: ContainerStyle, pub contact_notification: ContactNotification, pub update_notification: UpdateNotification, + pub simple_message_notification: MessageNotification, pub project_shared_notification: ProjectSharedNotification, pub incoming_call_notification: IncomingCallNotification, pub tooltip: TooltipStyle, @@ -478,6 +479,13 @@ pub struct UpdateNotification { pub dismiss_button: Interactive, } +#[derive(Deserialize, Default)] +pub struct MessageNotification { + pub message: ContainedText, + pub action_message: Interactive, + pub dismiss_button: Interactive, +} + #[derive(Deserialize, Default)] pub struct ProjectSharedNotification { pub window_height: f32, diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs new file mode 100644 index 0000000000..91656727d0 --- /dev/null +++ b/crates/workspace/src/notifications.rs @@ -0,0 +1,280 @@ +use std::{any::TypeId, ops::DerefMut}; + +use collections::HashSet; +use gpui::{AnyViewHandle, Entity, MutableAppContext, View, ViewContext, ViewHandle}; + +use crate::Workspace; + +pub fn init(cx: &mut MutableAppContext) { + cx.set_global(NotificationTracker::new()); + simple_message_notification::init(cx); +} + +pub trait Notification: View { + fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; +} + +pub trait NotificationHandle { + fn id(&self) -> usize; + fn to_any(&self) -> AnyViewHandle; +} + +impl NotificationHandle for ViewHandle { + fn id(&self) -> usize { + self.id() + } + + fn to_any(&self) -> AnyViewHandle { + self.into() + } +} + +impl From<&dyn NotificationHandle> for AnyViewHandle { + fn from(val: &dyn NotificationHandle) -> Self { + val.to_any() + } +} + +struct NotificationTracker { + notifications_sent: HashSet, +} + +impl std::ops::Deref for NotificationTracker { + type Target = HashSet; + + fn deref(&self) -> &Self::Target { + &self.notifications_sent + } +} + +impl DerefMut for NotificationTracker { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.notifications_sent + } +} + +impl NotificationTracker { + fn new() -> Self { + Self { + notifications_sent: HashSet::default(), + } + } +} + +impl Workspace { + pub fn show_notification_once( + &mut self, + id: usize, + cx: &mut ViewContext, + build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, + ) { + if !cx + .global::() + .contains(&TypeId::of::()) + { + cx.update_global::(|tracker, _| { + tracker.insert(TypeId::of::()) + }); + + self.show_notification::(id, cx, build_notification) + } + } + + pub fn show_notification( + &mut self, + id: usize, + cx: &mut ViewContext, + build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, + ) { + let type_id = TypeId::of::(); + if self + .notifications + .iter() + .all(|(existing_type_id, existing_id, _)| { + (*existing_type_id, *existing_id) != (type_id, id) + }) + { + let notification = build_notification(cx); + cx.subscribe(¬ification, move |this, handle, event, cx| { + if handle.read(cx).should_dismiss_notification_on_event(event) { + this.dismiss_notification(type_id, id, cx); + } + }) + .detach(); + self.notifications + .push((type_id, id, Box::new(notification))); + cx.notify(); + } + } + + fn dismiss_notification(&mut self, type_id: TypeId, id: usize, cx: &mut ViewContext) { + self.notifications + .retain(|(existing_type_id, existing_id, _)| { + if (*existing_type_id, *existing_id) == (type_id, id) { + cx.notify(); + false + } else { + true + } + }); + } +} + +pub mod simple_message_notification { + use std::process::Command; + + use gpui::{ + actions, + elements::{Flex, MouseEventHandler, Padding, ParentElement, Svg, Text}, + impl_actions, Action, CursorStyle, Element, Entity, MouseButton, MutableAppContext, View, + ViewContext, + }; + use menu::Cancel; + use serde::Deserialize; + use settings::Settings; + + use crate::Workspace; + + use super::Notification; + + actions!(message_notifications, [CancelMessageNotification]); + + #[derive(Clone, Default, Deserialize, PartialEq)] + pub struct OsOpen(pub String); + + impl_actions!(message_notifications, [OsOpen]); + + pub fn init(cx: &mut MutableAppContext) { + cx.add_action(MessageNotification::dismiss); + cx.add_action( + |_workspace: &mut Workspace, open_action: &OsOpen, _cx: &mut ViewContext| { + #[cfg(target_os = "macos")] + { + let mut command = Command::new("open"); + command.arg(open_action.0.clone()); + + command.spawn().ok(); + } + }, + ) + } + + pub struct MessageNotification { + message: String, + click_action: Box, + click_message: String, + } + + pub enum MessageNotificationEvent { + Dismiss, + } + + impl Entity for MessageNotification { + type Event = MessageNotificationEvent; + } + + impl MessageNotification { + pub fn new, A: Action, S2: AsRef>( + message: S1, + click_action: A, + click_message: S2, + ) -> Self { + Self { + message: message.as_ref().to_string(), + click_action: Box::new(click_action) as Box, + click_message: click_message.as_ref().to_string(), + } + } + + pub fn dismiss(&mut self, _: &CancelMessageNotification, cx: &mut ViewContext) { + cx.emit(MessageNotificationEvent::Dismiss); + } + } + + impl View for MessageNotification { + fn ui_name() -> &'static str { + "MessageNotification" + } + + fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> gpui::ElementBox { + let theme = cx.global::().theme.clone(); + let theme = &theme.update_notification; + + enum MessageNotificationTag {} + + let click_action = self.click_action.boxed_clone(); + let click_message = self.click_message.clone(); + let message = self.message.clone(); + + MouseEventHandler::::new(0, cx, |state, cx| { + Flex::column() + .with_child( + Flex::row() + .with_child( + Text::new(message, theme.message.text.clone()) + .contained() + .with_style(theme.message.container) + .aligned() + .top() + .left() + .flex(1., true) + .boxed(), + ) + .with_child( + MouseEventHandler::::new(0, cx, |state, _| { + let style = theme.dismiss_button.style_for(state, false); + Svg::new("icons/x_mark_8.svg") + .with_color(style.color) + .constrained() + .with_width(style.icon_width) + .aligned() + .contained() + .with_style(style.container) + .constrained() + .with_width(style.button_width) + .with_height(style.button_width) + .boxed() + }) + .with_padding(Padding::uniform(5.)) + .on_click(MouseButton::Left, move |_, cx| { + cx.dispatch_action(CancelMessageNotification) + }) + .aligned() + .constrained() + .with_height( + cx.font_cache().line_height(theme.message.text.font_size), + ) + .aligned() + .top() + .flex_float() + .boxed(), + ) + .boxed(), + ) + .with_child({ + let style = theme.action_message.style_for(state, false); + + Text::new(click_message, style.text.clone()) + .contained() + .with_style(style.container) + .boxed() + }) + .contained() + .boxed() + }) + .with_cursor_style(CursorStyle::PointingHand) + .on_click(MouseButton::Left, move |_, cx| { + cx.dispatch_any_action(click_action.boxed_clone()) + }) + .boxed() + } + } + + impl Notification for MessageNotification { + fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool { + match event { + MessageNotificationEvent::Dismiss => true, + } + } + } +} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 5fb804e66d..ed00e4f14d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -4,6 +4,7 @@ /// specific locations. pub mod dock; pub mod item; +pub mod notifications; pub mod pane; pub mod pane_group; mod persistence; @@ -41,7 +42,9 @@ use gpui::{ }; use item::{FollowableItem, FollowableItemHandle, Item, ItemHandle, ProjectItem}; use language::LanguageRegistry; + use log::{error, warn}; +use notifications::NotificationHandle; pub use pane::*; pub use pane_group::*; use persistence::{model::SerializedItem, DB}; @@ -61,7 +64,10 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}; +use crate::{ + notifications::simple_message_notification::{MessageNotification, OsOpen}, + persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}, +}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -151,6 +157,7 @@ impl_actions!(workspace, [ActivatePane]); pub fn init(app_state: Arc, cx: &mut MutableAppContext) { pane::init(cx); dock::init(cx); + notifications::init(cx); cx.add_global_action(open); cx.add_global_action({ @@ -453,31 +460,6 @@ impl DelayedDebouncedEditAction { } } -pub trait Notification: View { - fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; -} - -pub trait NotificationHandle { - fn id(&self) -> usize; - fn to_any(&self) -> AnyViewHandle; -} - -impl NotificationHandle for ViewHandle { - fn id(&self) -> usize { - self.id() - } - - fn to_any(&self) -> AnyViewHandle { - self.into() - } -} - -impl From<&dyn NotificationHandle> for AnyViewHandle { - fn from(val: &dyn NotificationHandle) -> Self { - val.to_any() - } -} - #[derive(Default)] struct LeaderState { followers: HashSet, @@ -732,6 +714,8 @@ impl Workspace { workspace }); + notify_if_database_failed(&workspace, &mut cx); + // Call open path for each of the project paths // (this will bring them to the front if they were in the serialized workspace) debug_assert!(paths_to_open.len() == project_paths.len()); @@ -1115,45 +1099,6 @@ impl Workspace { } } - pub fn show_notification( - &mut self, - id: usize, - cx: &mut ViewContext, - build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, - ) { - let type_id = TypeId::of::(); - if self - .notifications - .iter() - .all(|(existing_type_id, existing_id, _)| { - (*existing_type_id, *existing_id) != (type_id, id) - }) - { - let notification = build_notification(cx); - cx.subscribe(¬ification, move |this, handle, event, cx| { - if handle.read(cx).should_dismiss_notification_on_event(event) { - this.dismiss_notification(type_id, id, cx); - } - }) - .detach(); - self.notifications - .push((type_id, id, Box::new(notification))); - cx.notify(); - } - } - - fn dismiss_notification(&mut self, type_id: TypeId, id: usize, cx: &mut ViewContext) { - self.notifications - .retain(|(existing_type_id, existing_id, _)| { - if (*existing_type_id, *existing_id) == (type_id, id) { - cx.notify(); - false - } else { - true - } - }); - } - pub fn items<'a>( &'a self, cx: &'a AppContext, @@ -2436,6 +2381,47 @@ impl Workspace { } } +fn notify_if_database_failed(workspace: &ViewHandle, cx: &mut AsyncAppContext) { + if (*db::ALL_FILE_DB_FAILED).load(std::sync::atomic::Ordering::Acquire) { + workspace.update(cx, |workspace, cx| { + workspace.show_notification_once(0, cx, |cx| { + cx.add_view(|_| { + MessageNotification::new( + indoc::indoc! {" + Failed to load any database file :( + "}, + OsOpen("https://github.com/zed-industries/feedback/issues/new?assignees=&labels=defect%2Ctriage&template=2_bug_report.yml".to_string()), + "Click to let us know about this error" + ) + }) + }); + }); + } else { + let backup_path = (*db::BACKUP_DB_PATH).read(); + if let Some(backup_path) = &*backup_path { + workspace.update(cx, |workspace, cx| { + workspace.show_notification_once(0, cx, |cx| { + cx.add_view(|_| { + let backup_path = backup_path.to_string_lossy(); + MessageNotification::new( + format!( + indoc::indoc! {" + Database file was corrupted :( + Old database backed up to: + {} + "}, + backup_path + ), + OsOpen(backup_path.to_string()), + "Click to show old database in finder", + ) + }) + }); + }); + } + } +} + impl Entity for Workspace { type Event = Event; } diff --git a/styles/src/styleTree/app.ts b/styles/src/styleTree/app.ts index bd3d157168..267d830506 100644 --- a/styles/src/styleTree/app.ts +++ b/styles/src/styleTree/app.ts @@ -12,6 +12,7 @@ import sharedScreen from "./sharedScreen"; import projectDiagnostics from "./projectDiagnostics"; import contactNotification from "./contactNotification"; import updateNotification from "./updateNotification"; +import simpleMessageNotification from "./simpleMessageNotification"; import projectSharedNotification from "./projectSharedNotification"; import tooltip from "./tooltip"; import terminal from "./terminal"; @@ -47,6 +48,7 @@ export default function app(colorScheme: ColorScheme): Object { }, }, updateNotification: updateNotification(colorScheme), + simpleMessageNotification: simpleMessageNotification(colorScheme), tooltip: tooltip(colorScheme), terminal: terminal(colorScheme), colorScheme: { diff --git a/styles/src/styleTree/simpleMessageNotification.ts b/styles/src/styleTree/simpleMessageNotification.ts new file mode 100644 index 0000000000..76ff5e1ca5 --- /dev/null +++ b/styles/src/styleTree/simpleMessageNotification.ts @@ -0,0 +1,31 @@ +import { ColorScheme } from "../themes/common/colorScheme"; +import { foreground, text } from "./components"; + +const headerPadding = 8; + +export default function simpleMessageNotification(colorScheme: ColorScheme): Object { + let layer = colorScheme.middle; + return { + message: { + ...text(layer, "sans", { size: "md" }), + margin: { left: headerPadding, right: headerPadding }, + }, + actionMessage: { + ...text(layer, "sans", { size: "md" }), + margin: { left: headerPadding, top: 6, bottom: 6 }, + hover: { + color: foreground(layer, "hovered"), + }, + }, + dismissButton: { + color: foreground(layer), + iconWidth: 8, + iconHeight: 8, + buttonWidth: 8, + buttonHeight: 8, + hover: { + color: foreground(layer, "hovered"), + }, + }, + }; +} From 80e035cc2cc015ac2d95adffa0857e1fce4de123 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:12:07 -0800 Subject: [PATCH 190/240] Fixed bad rebase --- crates/zed/src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index c95b7c4071..97a19b6d86 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -204,10 +204,10 @@ fn main() { } fn init_paths() { - std::fs::create_dir_all(&*zed::paths::CONFIG_DIR).expect("could not create config path"); - std::fs::create_dir_all(&*zed::paths::LANGUAGES_DIR).expect("could not create languages path"); - std::fs::create_dir_all(&*zed::paths::DB_DIR).expect("could not create database path"); - std::fs::create_dir_all(&*zed::paths::LOGS_DIR).expect("could not create logs path"); + std::fs::create_dir_all(&*util::paths::CONFIG_DIR).expect("could not create config path"); + std::fs::create_dir_all(&*util::paths::LANGUAGES_DIR).expect("could not create languages path"); + std::fs::create_dir_all(&*util::paths::DB_DIR).expect("could not create database path"); + std::fs::create_dir_all(&*util::paths::LOGS_DIR).expect("could not create logs path"); } fn init_logger() { From 4288f1087355ace3f071f6822404db56a24d111c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:13:02 -0800 Subject: [PATCH 191/240] And library change --- crates/journal/src/journal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/journal/src/journal.rs b/crates/journal/src/journal.rs index 3cde8e504e..ef1dbdc15c 100644 --- a/crates/journal/src/journal.rs +++ b/crates/journal/src/journal.rs @@ -115,7 +115,7 @@ mod tests { #[test] fn test_heading_entry_defaults_to_hour_12() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &None); let expected_heading_entry = "# 3:00 PM"; @@ -124,7 +124,7 @@ mod tests { #[test] fn test_heading_entry_is_hour_12() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &Some(HourFormat::Hour12)); let expected_heading_entry = "# 3:00 PM"; @@ -133,7 +133,7 @@ mod tests { #[test] fn test_heading_entry_is_hour_24() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &Some(HourFormat::Hour24)); let expected_heading_entry = "# 15:00"; From d609237c32ea310b08c9971a223e1014747d1f8e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:26:37 -0800 Subject: [PATCH 192/240] Found db parallelism problem :( --- crates/db/src/db.rs | 165 ++++++++++++++++++++++++++++---------------- 1 file changed, 107 insertions(+), 58 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index c146336132..9712f2e375 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -16,7 +16,7 @@ pub use util::paths::DB_DIR; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use sqlez_macros::sql; -use std::fs::{create_dir_all, remove_dir_all}; +use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; @@ -40,7 +40,7 @@ const DB_FILE_NAME: &'static str = "db.sqlite"; lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); - static ref DB_WIPED: RwLock = RwLock::new(false); + // static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } @@ -49,21 +49,21 @@ lazy_static::lazy_static! { /// This will retry a couple times if there are failures. If opening fails once, the db directory /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. -pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { +pub async fn open_db(db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { let release_channel_name = release_channel.dev_name(); let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); - // If WIPE_DB, delete 0-{channel} - if release_channel == &ReleaseChannel::Dev - && wipe_db - && !*DB_WIPED.read() - { - let mut db_wiped = DB_WIPED.write(); - if !*db_wiped { - remove_dir_all(&main_db_dir).ok(); - *db_wiped = true; - } - } + // // If WIPE_DB, delete 0-{channel} + // if release_channel == &ReleaseChannel::Dev + // && wipe_db + // && !*DB_WIPED.read() + // { + // let mut db_wiped = DB_WIPED.write(); + // if !*db_wiped { + // remove_dir_all(&main_db_dir).ok(); + // *db_wiped = true; + // } + // } let connection = async_iife!({ // Note: This still has a race condition where 1 set of migrations succeeds @@ -205,7 +205,7 @@ macro_rules! define_connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; (pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => { @@ -236,67 +236,66 @@ macro_rules! define_connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; } #[cfg(test)] mod tests { - use std::{thread, fs}; + use std::{fs, thread}; use sqlez::{domain::Domain, connection::Connection}; use sqlez_macros::sql; use tempdir::TempDir; - use util::channel::ReleaseChannel; use crate::{open_db, DB_FILE_NAME}; - // Test that wipe_db exists and works and gives a new db - #[gpui::test] - async fn test_wipe_db() { - enum TestDB {} + // // Test that wipe_db exists and works and gives a new db + // #[gpui::test] + // async fn test_wipe_db() { + // enum TestDB {} - impl Domain for TestDB { - fn name() -> &'static str { - "db_tests" - } + // impl Domain for TestDB { + // fn name() -> &'static str { + // "db_tests" + // } - fn migrations() -> &'static [&'static str] { - &[sql!( - CREATE TABLE test(value); - )] - } - } + // fn migrations() -> &'static [&'static str] { + // &[sql!( + // CREATE TABLE test(value); + // )] + // } + // } - let tempdir = TempDir::new("DbTests").unwrap(); + // let tempdir = TempDir::new("DbTests").unwrap(); - // Create a db and insert a marker value - let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - test_db.write(|connection| - connection.exec(sql!( - INSERT INTO test(value) VALUES (10) - )).unwrap()().unwrap() - ).await; - drop(test_db); + // // Create a db and insert a marker value + // let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + // test_db.write(|connection| + // connection.exec(sql!( + // INSERT INTO test(value) VALUES (10) + // )).unwrap()().unwrap() + // ).await; + // drop(test_db); - // Opening db with wipe clears once and removes the marker value - let mut guards = vec![]; - for _ in 0..5 { - let path = tempdir.path().to_path_buf(); - let guard = thread::spawn(move || smol::block_on(async { - let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; + // // Opening db with wipe clears once and removes the marker value + // let mut guards = vec![]; + // for _ in 0..5 { + // let path = tempdir.path().to_path_buf(); + // let guard = thread::spawn(move || smol::block_on(async { + // let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - })); + // assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + // })); - guards.push(guard); - } + // guards.push(guard); + // } - for guard in guards { - guard.join().unwrap(); - } - } + // for guard in guards { + // guard.join().unwrap(); + // } + // } // Test bad migration panics #[gpui::test] @@ -317,7 +316,7 @@ mod tests { } let tempdir = TempDir::new("DbTests").unwrap(); - let _bad_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let _bad_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } /// Test that DB exists but corrupted (causing recreate) @@ -349,11 +348,11 @@ mod tests { let tempdir = TempDir::new("DbTests").unwrap(); { - let corrupt_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - let good_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); let mut corrupted_backup_dir = fs::read_dir( @@ -369,4 +368,54 @@ mod tests { let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); } + + /// Test that DB exists but corrupted (causing recreate) + #[gpui::test] + async fn test_simultaneous_db_corruption() { + enum CorruptedDB {} + + impl Domain for CorruptedDB { + fn name() -> &'static str { + "db_tests" + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);)] + } + } + + enum GoodDB {} + + impl Domain for GoodDB { + fn name() -> &'static str { + "db_tests" //Notice same name + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test2(value);)] //But different migration + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + { + let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(corrupt_db.persistent()); + } + + let mut guards = vec![]; + for _ in 0..10 { + let tmp_path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || { + let good_db = smol::block_on(open_db::(tmp_path.as_path(), &util::channel::ReleaseChannel::Dev)); + assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + }); + + guards.push(guard); + + } + + for guard in guards.into_iter() { + assert!(guard.join().is_ok()); + } + } } From 1ce08631580d8897de92a1357342284159e2b46e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:27:45 -0800 Subject: [PATCH 193/240] Removed old code --- crates/db/src/db.rs | 59 --------------------------------------------- 1 file changed, 59 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9712f2e375..878d2430e2 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -40,7 +40,6 @@ const DB_FILE_NAME: &'static str = "db.sqlite"; lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); - // static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } @@ -53,18 +52,6 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel let release_channel_name = release_channel.dev_name(); let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); - // // If WIPE_DB, delete 0-{channel} - // if release_channel == &ReleaseChannel::Dev - // && wipe_db - // && !*DB_WIPED.read() - // { - // let mut db_wiped = DB_WIPED.write(); - // if !*db_wiped { - // remove_dir_all(&main_db_dir).ok(); - // *db_wiped = true; - // } - // } - let connection = async_iife!({ // Note: This still has a race condition where 1 set of migrations succeeds // (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal)) @@ -250,52 +237,6 @@ mod tests { use tempdir::TempDir; use crate::{open_db, DB_FILE_NAME}; - - // // Test that wipe_db exists and works and gives a new db - // #[gpui::test] - // async fn test_wipe_db() { - // enum TestDB {} - - // impl Domain for TestDB { - // fn name() -> &'static str { - // "db_tests" - // } - - // fn migrations() -> &'static [&'static str] { - // &[sql!( - // CREATE TABLE test(value); - // )] - // } - // } - - // let tempdir = TempDir::new("DbTests").unwrap(); - - // // Create a db and insert a marker value - // let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - // test_db.write(|connection| - // connection.exec(sql!( - // INSERT INTO test(value) VALUES (10) - // )).unwrap()().unwrap() - // ).await; - // drop(test_db); - - // // Opening db with wipe clears once and removes the marker value - // let mut guards = vec![]; - // for _ in 0..5 { - // let path = tempdir.path().to_path_buf(); - // let guard = thread::spawn(move || smol::block_on(async { - // let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - - // assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - // })); - - // guards.push(guard); - // } - - // for guard in guards { - // guard.join().unwrap(); - // } - // } // Test bad migration panics #[gpui::test] From 55eb0a37424e4e756f10b25a9fbae4f33f4fa638 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:46:35 -0800 Subject: [PATCH 194/240] Fixed and error message and properly initialized the DB --- crates/db/src/db.rs | 9 ++++++--- crates/sqlez/src/thread_safe_connection.rs | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 878d2430e2..704ac4049d 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -24,14 +24,14 @@ use util::{async_iife, ResultExt}; use util::channel::ReleaseChannel; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( - PRAGMA synchronous=NORMAL; - PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; ); const DB_INITIALIZE_QUERY: &'static str = sql!( PRAGMA journal_mode=WAL; + PRAGMA busy_timeout=1; + PRAGMA case_sensitive_like=TRUE; + PRAGMA synchronous=NORMAL; ); const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; @@ -293,6 +293,7 @@ mod tests { assert!(corrupt_db.persistent()); } + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); @@ -339,10 +340,12 @@ mod tests { let tempdir = TempDir::new("DbTests").unwrap(); { + // Setup the bad database let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } + // Try to connect to it a bunch of times at once let mut guards = vec![]; for _ in 0..10 { let tmp_path = tempdir.path().to_path_buf(); diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 51d0707fd8..2c51b776ed 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -168,7 +168,7 @@ impl ThreadSafeConnection { let result = connection.with_write(|connection| callback(connection)); sender.send(result).ok(); })); - reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) + reciever.map(|response| response.expect("Write queue unexpectedly closed")) } pub(crate) fn create_connection( From 0ed731780a113934f37f9ab0a5f428dd288692b0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 09:46:03 +0100 Subject: [PATCH 195/240] Remove duplication between `transaction` and `room_transaction` --- crates/collab/src/db.rs | 57 +++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index e667930cad..3066260bc4 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2133,21 +2133,7 @@ impl Database { { let body = async { loop { - let tx = self.pool.begin().await?; - - // In Postgres, serializable transactions are opt-in - if let DatabaseBackend::Postgres = self.pool.get_database_backend() { - tx.execute(Statement::from_string( - DatabaseBackend::Postgres, - "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), - )) - .await?; - } - - let mut tx = Arc::new(Some(tx)); - let result = f(TransactionHandle(tx.clone())).await; - let tx = Arc::get_mut(&mut tx).unwrap().take().unwrap(); - + let (tx, result) = self.with_transaction(&f).await?; match result { Ok(result) => { tx.commit().await?; @@ -2196,21 +2182,7 @@ impl Database { { let body = async { loop { - let tx = self.pool.begin().await?; - - // In Postgres, serializable transactions are opt-in - if let DatabaseBackend::Postgres = self.pool.get_database_backend() { - tx.execute(Statement::from_string( - DatabaseBackend::Postgres, - "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), - )) - .await?; - } - - let mut tx = Arc::new(Some(tx)); - let result = f(TransactionHandle(tx.clone())).await; - let tx = Arc::get_mut(&mut tx).unwrap().take().unwrap(); - + let (tx, result) = self.with_transaction(&f).await?; match result { Ok((room_id, data)) => { let lock = self.rooms.entry(room_id).or_default().clone(); @@ -2257,6 +2229,31 @@ impl Database { body.await } } + + async fn with_transaction(&self, f: &F) -> Result<(DatabaseTransaction, Result)> + where + F: Send + Fn(TransactionHandle) -> Fut, + Fut: Send + Future>, + { + let tx = self.pool.begin().await?; + + // In Postgres, serializable transactions are opt-in + if let DatabaseBackend::Postgres = self.pool.get_database_backend() { + tx.execute(Statement::from_string( + DatabaseBackend::Postgres, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), + )) + .await?; + } + + let mut tx = Arc::new(Some(tx)); + let result = f(TransactionHandle(tx.clone())).await; + let Some(tx) = Arc::get_mut(&mut tx).and_then(|tx| tx.take()) else { + return Err(anyhow!("couldn't complete transaction because it's still in use"))?; + }; + + Ok((tx, result)) + } } struct TransactionHandle(Arc>); From d97a8364adc2340ff4388ad21333ef52961e4426 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 10:49:53 +0100 Subject: [PATCH 196/240] Retry transactions if there's a serialization failure during commit --- crates/collab/src/db.rs | 163 +++++++++++++++++++++------------------- 1 file changed, 87 insertions(+), 76 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 3066260bc4..bc074e30df 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2131,47 +2131,30 @@ impl Database { F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { - let body = async { - loop { - let (tx, result) = self.with_transaction(&f).await?; - match result { - Ok(result) => { - tx.commit().await?; - return Ok(result); - } - Err(error) => { - tx.rollback().await?; - match error { - Error::Database( - DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) - | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), - ) if error - .as_database_error() - .and_then(|error| error.code()) - .as_deref() - == Some("40001") => - { + loop { + let (tx, result) = self.run(self.with_transaction(&f)).await?; + match result { + Ok(result) => { + match self.run(async move { Ok(tx.commit().await?) }).await { + Ok(()) => return Ok(result), + Err(error) => { + if is_serialization_error(&error) { // Retry (don't break the loop) + } else { + return Err(error); } - error @ _ => return Err(error), } } } + Err(error) => { + self.run(tx.rollback()).await?; + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } + } } - }; - - #[cfg(test)] - { - if let Some(background) = self.background.as_ref() { - background.simulate_random_delay().await; - } - - self.runtime.as_ref().unwrap().block_on(body) - } - - #[cfg(not(test))] - { - body.await } } @@ -2180,53 +2163,38 @@ impl Database { F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { - let body = async { - loop { - let (tx, result) = self.with_transaction(&f).await?; - match result { - Ok((room_id, data)) => { - let lock = self.rooms.entry(room_id).or_default().clone(); - let _guard = lock.lock_owned().await; - tx.commit().await?; - return Ok(RoomGuard { - data, - _guard, - _not_send: PhantomData, - }); - } - Err(error) => { - tx.rollback().await?; - match error { - Error::Database( - DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) - | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), - ) if error - .as_database_error() - .and_then(|error| error.code()) - .as_deref() - == Some("40001") => - { + loop { + let (tx, result) = self.run(self.with_transaction(&f)).await?; + match result { + Ok((room_id, data)) => { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + match self.run(async move { Ok(tx.commit().await?) }).await { + Ok(()) => { + return Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, + }); + } + Err(error) => { + if is_serialization_error(&error) { // Retry (don't break the loop) + } else { + return Err(error); } - error @ _ => return Err(error), } } } + Err(error) => { + self.run(tx.rollback()).await?; + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } + } } - }; - - #[cfg(test)] - { - if let Some(background) = self.background.as_ref() { - background.simulate_random_delay().await; - } - - self.runtime.as_ref().unwrap().block_on(body) - } - - #[cfg(not(test))] - { - body.await } } @@ -2254,6 +2222,49 @@ impl Database { Ok((tx, result)) } + + async fn run(&self, future: F) -> T + where + F: Future, + { + #[cfg(test)] + { + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + let result = self.runtime.as_ref().unwrap().block_on(future); + + if let Some(background) = self.background.as_ref() { + background.simulate_random_delay().await; + } + + result + } + + #[cfg(not(test))] + { + future.await + } + } +} + +fn is_serialization_error(error: &Error) -> bool { + const SERIALIZATION_FAILURE_CODE: &'static str = "40001"; + match error { + Error::Database( + DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error)) + | DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)), + ) if error + .as_database_error() + .and_then(|error| error.code()) + .as_deref() + == Some(SERIALIZATION_FAILURE_CODE) => + { + true + } + _ => false, + } } struct TransactionHandle(Arc>); From d3c411677ababde3c562c005def58978eb6a944c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 12:03:45 +0100 Subject: [PATCH 197/240] Remove random pauses to prevent the database from deadlocking --- crates/collab/src/db.rs | 108 +++++++++++++------------ crates/collab/src/integration_tests.rs | 8 +- 2 files changed, 62 insertions(+), 54 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index bc074e30df..dfd1d7e65a 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -2131,31 +2131,35 @@ impl Database { F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { - loop { - let (tx, result) = self.run(self.with_transaction(&f)).await?; - match result { - Ok(result) => { - match self.run(async move { Ok(tx.commit().await?) }).await { - Ok(()) => return Ok(result), - Err(error) => { - if is_serialization_error(&error) { - // Retry (don't break the loop) - } else { - return Err(error); + let body = async { + loop { + let (tx, result) = self.with_transaction(&f).await?; + match result { + Ok(result) => { + match tx.commit().await.map_err(Into::into) { + Ok(()) => return Ok(result), + Err(error) => { + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } } } } - } - Err(error) => { - self.run(tx.rollback()).await?; - if is_serialization_error(&error) { - // Retry (don't break the loop) - } else { - return Err(error); + Err(error) => { + tx.rollback().await?; + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } } } } - } + }; + + self.run(body).await } async fn room_transaction(&self, f: F) -> Result> @@ -2163,39 +2167,43 @@ impl Database { F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { - loop { - let (tx, result) = self.run(self.with_transaction(&f)).await?; - match result { - Ok((room_id, data)) => { - let lock = self.rooms.entry(room_id).or_default().clone(); - let _guard = lock.lock_owned().await; - match self.run(async move { Ok(tx.commit().await?) }).await { - Ok(()) => { - return Ok(RoomGuard { - data, - _guard, - _not_send: PhantomData, - }); - } - Err(error) => { - if is_serialization_error(&error) { - // Retry (don't break the loop) - } else { - return Err(error); + let body = async { + loop { + let (tx, result) = self.with_transaction(&f).await?; + match result { + Ok((room_id, data)) => { + let lock = self.rooms.entry(room_id).or_default().clone(); + let _guard = lock.lock_owned().await; + match tx.commit().await.map_err(Into::into) { + Ok(()) => { + return Ok(RoomGuard { + data, + _guard, + _not_send: PhantomData, + }); + } + Err(error) => { + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } } } } - } - Err(error) => { - self.run(tx.rollback()).await?; - if is_serialization_error(&error) { - // Retry (don't break the loop) - } else { - return Err(error); + Err(error) => { + tx.rollback().await?; + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } } } } - } + }; + + self.run(body).await } async fn with_transaction(&self, f: &F) -> Result<(DatabaseTransaction, Result)> @@ -2233,13 +2241,7 @@ impl Database { background.simulate_random_delay().await; } - let result = self.runtime.as_ref().unwrap().block_on(future); - - if let Some(background) = self.background.as_ref() { - background.simulate_random_delay().await; - } - - result + self.runtime.as_ref().unwrap().block_on(future) } #[cfg(not(test))] diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 73f450b833..4ff372efbe 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -5672,7 +5672,13 @@ impl TestServer { async fn start(background: Arc) -> Self { static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0); - let test_db = TestDb::sqlite(background.clone()); + let use_postgres = env::var("USE_POSTGRES").ok(); + let use_postgres = use_postgres.as_deref(); + let test_db = if use_postgres == Some("true") || use_postgres == Some("1") { + TestDb::postgres(background.clone()) + } else { + TestDb::sqlite(background.clone()) + }; let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst); let live_kit_server = live_kit_client::TestServer::create( format!("http://livekit.{}.test", live_kit_server_id), From eec3df09be3825e730b9357b061a9a525f385cb6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 14:56:01 +0100 Subject: [PATCH 198/240] Upgrade sea-orm --- Cargo.lock | 12 ++++++------ crates/collab/src/db.rs | 20 +++++++------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1b8a488f2..a75ca972e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3204,9 +3204,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -5328,9 +5328,9 @@ dependencies = [ [[package]] name = "sea-orm" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3120bc435b8640963ffda698f877610e07e077157e216eb99408d819c344034d" +checksum = "28fc9dad132e450d6320bd5953e70fb88b42785080b591e9be804da69bd8a170" dependencies = [ "async-stream", "async-trait", @@ -5356,9 +5356,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54bacfeb842813c16821e21f9456c358861a448294075184ea1d6307e386d08" +checksum = "66af5d33e04e56dafb2c700f9b1201a39e6c2c77b53ed9ee93244f21f8de6041" dependencies = [ "bae", "heck 0.3.3", diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index dfd1d7e65a..8250a8354f 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -23,9 +23,9 @@ use hyper::StatusCode; use rpc::{proto, ConnectionId}; pub use sea_orm::ConnectOptions; use sea_orm::{ - entity::prelude::*, ActiveValue, ConnectionTrait, DatabaseBackend, DatabaseConnection, - DatabaseTransaction, DbErr, FromQueryResult, IntoActiveModel, JoinType, QueryOrder, - QuerySelect, Statement, TransactionTrait, + entity::prelude::*, ActiveValue, ConnectionTrait, DatabaseConnection, DatabaseTransaction, + DbErr, FromQueryResult, IntoActiveModel, IsolationLevel, JoinType, QueryOrder, QuerySelect, + Statement, TransactionTrait, }; use sea_query::{Alias, Expr, OnConflict, Query}; use serde::{Deserialize, Serialize}; @@ -2211,16 +2211,10 @@ impl Database { F: Send + Fn(TransactionHandle) -> Fut, Fut: Send + Future>, { - let tx = self.pool.begin().await?; - - // In Postgres, serializable transactions are opt-in - if let DatabaseBackend::Postgres = self.pool.get_database_backend() { - tx.execute(Statement::from_string( - DatabaseBackend::Postgres, - "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;".into(), - )) + let tx = self + .pool + .begin_with_config(Some(IsolationLevel::Serializable), None) .await?; - } let mut tx = Arc::new(Some(tx)); let result = f(TransactionHandle(tx.clone())).await; @@ -2584,7 +2578,7 @@ mod test { impl Drop for TestDb { fn drop(&mut self) { let db = self.db.take().unwrap(); - if let DatabaseBackend::Postgres = db.pool.get_database_backend() { + if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() { db.runtime.as_ref().unwrap().block_on(async { use util::ResultExt; let query = " From b97c35a4686f27054e4df92616d79afd86c15e21 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 15:16:06 +0100 Subject: [PATCH 199/240] Remove project_id foreign key from `room_participants` --- .../collab/migrations.sqlite/20221109000000_test_schema.sql | 4 ++-- .../collab/migrations/20221111092550_reconnection_support.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 347db6a71a..90fd8ace12 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -118,8 +118,8 @@ CREATE TABLE "room_participants" ( "answering_connection_id" INTEGER, "answering_connection_epoch" TEXT, "location_kind" INTEGER, - "location_project_id" INTEGER REFERENCES projects (id), - "initial_project_id" INTEGER REFERENCES projects (id), + "location_project_id" INTEGER, + "initial_project_id" INTEGER, "calling_user_id" INTEGER NOT NULL REFERENCES users (id), "calling_connection_id" INTEGER NOT NULL, "calling_connection_epoch" TEXT NOT NULL diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 6278fa7a59..5e8bada2f9 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -80,8 +80,8 @@ CREATE TABLE "room_participants" ( "answering_connection_id" INTEGER, "answering_connection_epoch" UUID, "location_kind" INTEGER, - "location_project_id" INTEGER REFERENCES projects (id), - "initial_project_id" INTEGER REFERENCES projects (id), + "location_project_id" INTEGER, + "initial_project_id" INTEGER, "calling_user_id" INTEGER NOT NULL REFERENCES users (id), "calling_connection_id" INTEGER NOT NULL, "calling_connection_epoch" UUID NOT NULL From be3fb1e9856e11416963716f367ddfda1ca44163 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 17:57:10 +0100 Subject: [PATCH 200/240] Update sea-orm to fix bug on failure to commit transactions Co-Authored-By: Nathan Sobo --- Cargo.lock | 6 ++---- crates/collab/Cargo.toml | 5 +++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a75ca972e2..30c5054576 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5329,8 +5329,7 @@ dependencies = [ [[package]] name = "sea-orm" version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28fc9dad132e450d6320bd5953e70fb88b42785080b591e9be804da69bd8a170" +source = "git+https://github.com/zed-industries/sea-orm?rev=18f4c691085712ad014a51792af75a9044bacee6#18f4c691085712ad014a51792af75a9044bacee6" dependencies = [ "async-stream", "async-trait", @@ -5357,8 +5356,7 @@ dependencies = [ [[package]] name = "sea-orm-macros" version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66af5d33e04e56dafb2c700f9b1201a39e6c2c77b53ed9ee93244f21f8de6041" +source = "git+https://github.com/zed-industries/sea-orm?rev=18f4c691085712ad014a51792af75a9044bacee6#18f4c691085712ad014a51792af75a9044bacee6" dependencies = [ "bae", "heck 0.3.3", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 2238be2257..8725642ae5 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -36,7 +36,8 @@ prometheus = "0.13" rand = "0.8" reqwest = { version = "0.11", features = ["json"], optional = true } scrypt = "0.7" -sea-orm = { version = "0.10", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls"] } +# Remove fork dependency when a version with https://github.com/SeaQL/sea-orm/pull/1283 is released. +sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls"] } sea-query = "0.27" serde = { version = "1.0", features = ["derive", "rc"] } serde_json = "1.0" @@ -74,7 +75,7 @@ env_logger = "0.9" log = { version = "0.4.16", features = ["kv_unstable_serde"] } util = { path = "../util" } lazy_static = "1.4" -sea-orm = { version = "0.10", features = ["sqlx-sqlite"] } +sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-sqlite"] } serde_json = { version = "1.0", features = ["preserve_order"] } sqlx = { version = "0.6", features = ["sqlite"] } unindent = "0.1" From 5443d9cffe17a8faa1299d4852cd4d4c2ff4aa8c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 18:37:01 +0100 Subject: [PATCH 201/240] Return project collaborators and connection IDs in a `RoomGuard` --- crates/collab/src/db.rs | 20 +++++++--- crates/collab/src/rpc.rs | 81 +++++++++++++++++++++------------------- 2 files changed, 57 insertions(+), 44 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 8250a8354f..915acb00eb 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1981,8 +1981,12 @@ impl Database { &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result> { - self.transaction(|tx| async move { + ) -> Result>> { + self.room_transaction(|tx| async move { + let project = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; let collaborators = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.eq(project_id)) .all(&*tx) @@ -1992,7 +1996,7 @@ impl Database { .iter() .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) { - Ok(collaborators) + Ok((project.room_id, collaborators)) } else { Err(anyhow!("no such project"))? } @@ -2004,13 +2008,17 @@ impl Database { &self, project_id: ProjectId, connection_id: ConnectionId, - ) -> Result> { - self.transaction(|tx| async move { + ) -> Result>> { + self.room_transaction(|tx| async move { #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] enum QueryAs { ConnectionId, } + let project = project::Entity::find_by_id(project_id) + .one(&*tx) + .await? + .ok_or_else(|| anyhow!("no such project"))?; let mut db_connection_ids = project_collaborator::Entity::find() .select_only() .column_as( @@ -2028,7 +2036,7 @@ impl Database { } if connection_ids.contains(&connection_id) { - Ok(connection_ids) + Ok((project.room_id, connection_ids)) } else { Err(anyhow!("no such project"))? } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 7f404feffe..79544de6fb 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1245,7 +1245,7 @@ async fn update_language_server( .await?; broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1264,23 +1264,24 @@ where T: EntityMessage + RequestMessage, { let project_id = ProjectId::from_proto(request.remote_entity_id()); - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - let host = collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; + let host_connection_id = { + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + ConnectionId( + collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))? + .connection_id as u32, + ) + }; let payload = session .peer - .forward_request( - session.connection_id, - ConnectionId(host.connection_id as u32), - request, - ) + .forward_request(session.connection_id, host_connection_id, request) .await?; response.send(payload)?; @@ -1293,16 +1294,18 @@ async fn save_buffer( session: Session, ) -> Result<()> { let project_id = ProjectId::from_proto(request.project_id); - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - let host = collaborators - .into_iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - let host_connection_id = ConnectionId(host.connection_id as u32); + let host_connection_id = { + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + ConnectionId(host.connection_id as u32) + }; let response_payload = session .peer .forward_request(session.connection_id, host_connection_id, request.clone()) @@ -1316,7 +1319,7 @@ async fn save_buffer( collaborators .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); let project_connection_ids = collaborators - .into_iter() + .iter() .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); broadcast(host_connection_id, project_connection_ids, |conn_id| { session @@ -1353,7 +1356,7 @@ async fn update_buffer( broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1374,7 +1377,7 @@ async fn update_buffer_file(request: proto::UpdateBufferFile, session: Session) broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1393,7 +1396,7 @@ async fn buffer_reloaded(request: proto::BufferReloaded, session: Session) -> Re .await?; broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1412,7 +1415,7 @@ async fn buffer_saved(request: proto::BufferSaved, session: Session) -> Result<( .await?; broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer @@ -1430,14 +1433,16 @@ async fn follow( let project_id = ProjectId::from_proto(request.project_id); let leader_id = ConnectionId(request.leader_id); let follower_id = session.connection_id; - let project_connection_ids = session - .db() - .await - .project_connection_ids(project_id, session.connection_id) - .await?; + { + let project_connection_ids = session + .db() + .await + .project_connection_ids(project_id, session.connection_id) + .await?; - if !project_connection_ids.contains(&leader_id) { - Err(anyhow!("no such peer"))?; + if !project_connection_ids.contains(&leader_id) { + Err(anyhow!("no such peer"))?; + } } let mut response_payload = session @@ -1691,7 +1696,7 @@ async fn update_diff_base(request: proto::UpdateDiffBase, session: Session) -> R .await?; broadcast( session.connection_id, - project_connection_ids, + project_connection_ids.iter().copied(), |connection_id| { session .peer From 7bbd97cfb96ca176d345831beb490fc6a7b2c76a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 19:07:06 +0100 Subject: [PATCH 202/240] Send diagnostic summaries synchronously --- crates/collab/src/rpc.rs | 10 +++---- crates/project/src/worktree.rs | 48 ++++++++++++++++------------------ crates/rpc/src/proto.rs | 1 - 3 files changed, 26 insertions(+), 33 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 79544de6fb..0136a5fec6 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -201,7 +201,7 @@ impl Server { .add_request_handler(update_worktree) .add_message_handler(start_language_server) .add_message_handler(update_language_server) - .add_request_handler(update_diagnostic_summary) + .add_message_handler(update_diagnostic_summary) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) @@ -1187,14 +1187,13 @@ async fn update_worktree( } async fn update_diagnostic_summary( - request: proto::UpdateDiagnosticSummary, - response: Response, + message: proto::UpdateDiagnosticSummary, session: Session, ) -> Result<()> { let guest_connection_ids = session .db() .await - .update_diagnostic_summary(&request, session.connection_id) + .update_diagnostic_summary(&message, session.connection_id) .await?; broadcast( @@ -1203,11 +1202,10 @@ async fn update_diagnostic_summary( |connection_id| { session .peer - .forward_send(session.connection_id, connection_id, request.clone()) + .forward_send(session.connection_id, connection_id, message.clone()) }, ); - response.send(proto::Ack {})?; Ok(()) } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 409f65f786..4781e17541 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -168,9 +168,7 @@ enum ScanState { struct ShareState { project_id: u64, snapshots_tx: watch::Sender, - diagnostic_summaries_tx: mpsc::UnboundedSender<(Arc, DiagnosticSummary)>, _maintain_remote_snapshot: Task>, - _maintain_remote_diagnostic_summaries: Task<()>, } pub enum Event { @@ -532,9 +530,18 @@ impl LocalWorktree { let updated = !old_summary.is_empty() || !new_summary.is_empty(); if updated { if let Some(share) = self.share.as_ref() { - let _ = share - .diagnostic_summaries_tx - .unbounded_send((worktree_path.clone(), new_summary)); + self.client + .send(proto::UpdateDiagnosticSummary { + project_id: share.project_id, + worktree_id: self.id().to_proto(), + summary: Some(proto::DiagnosticSummary { + path: worktree_path.to_string_lossy().to_string(), + language_server_id: language_server_id as u64, + error_count: new_summary.error_count as u32, + warning_count: new_summary.warning_count as u32, + }), + }) + .log_err(); } } @@ -968,6 +975,16 @@ impl LocalWorktree { let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot()); let worktree_id = cx.model_id() as u64; + for (path, summary) in self.diagnostic_summaries.iter() { + if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary { + project_id, + worktree_id, + summary: Some(summary.to_proto(&path.0)), + }) { + return Task::ready(Err(e)); + } + } + let maintain_remote_snapshot = cx.background().spawn({ let rpc = self.client.clone(); async move { @@ -1017,31 +1034,10 @@ impl LocalWorktree { .log_err() }); - let (diagnostic_summaries_tx, mut diagnostic_summaries_rx) = mpsc::unbounded(); - for (path, summary) in self.diagnostic_summaries.iter() { - let _ = diagnostic_summaries_tx.unbounded_send((path.0.clone(), summary.clone())); - } - let maintain_remote_diagnostic_summaries = cx.background().spawn({ - let rpc = self.client.clone(); - async move { - while let Some((path, summary)) = diagnostic_summaries_rx.next().await { - rpc.request(proto::UpdateDiagnosticSummary { - project_id, - worktree_id, - summary: Some(summary.to_proto(&path)), - }) - .await - .log_err(); - } - } - }); - self.share = Some(ShareState { project_id, snapshots_tx, - diagnostic_summaries_tx, _maintain_remote_snapshot: maintain_remote_snapshot, - _maintain_remote_diagnostic_summaries: maintain_remote_diagnostic_summaries, }); } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 50f3c57f2a..6d9bc9a0aa 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -228,7 +228,6 @@ request_messages!( (ShareProject, ShareProjectResponse), (Test, Test), (UpdateBuffer, Ack), - (UpdateDiagnosticSummary, Ack), (UpdateParticipantLocation, Ack), (UpdateProject, Ack), (UpdateWorktree, Ack), From cd08d289aa8e9790d7ed4b1acf55e59c600ddc01 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 5 Dec 2022 19:45:56 +0100 Subject: [PATCH 203/240] Fix warnings --- crates/workspace/src/workspace.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 45de94b603..a0c353b3f8 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1031,8 +1031,10 @@ impl Workspace { RemoveWorktreeFromProject(worktree_id): &RemoveWorktreeFromProject, cx: &mut ViewContext, ) { - self.project + let future = self + .project .update(cx, |project, cx| project.remove_worktree(*worktree_id, cx)); + cx.foreground().spawn(future).detach(); } fn project_path_for_path( @@ -2862,9 +2864,9 @@ mod tests { ); // Remove a project folder - project.update(cx, |project, cx| { - project.remove_worktree(worktree_id, cx); - }); + project + .update(cx, |project, cx| project.remove_worktree(worktree_id, cx)) + .await; assert_eq!( cx.current_window_title(window_id).as_deref(), Some("one.txt — root2") From 30872d399203643d86cbac3f750e95b8ac90437b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 5 Nov 2022 16:18:03 -0700 Subject: [PATCH 204/240] Added experimental support for scheme, racket, and commonlisp --- Cargo.lock | 30 +++ crates/zed/Cargo.toml | 3 + crates/zed/src/languages.rs | 4 + .../zed/src/languages/commonlisp/config.toml | 9 + crates/zed/src/languages/commonlisp/folds.scm | 3 + .../src/languages/commonlisp/highlights.scm | 189 ++++++++++++++++++ crates/zed/src/languages/installation.rs | 21 ++ crates/zed/src/languages/racket/config.toml | 9 + crates/zed/src/languages/racket/folds.scm | 3 + .../zed/src/languages/racket/highlights.scm | 140 +++++++++++++ .../zed/src/languages/racket/injections.scm | 4 + crates/zed/src/languages/ruby/brackets.scm | 14 -- crates/zed/src/languages/scheme/config.toml | 9 + crates/zed/src/languages/scheme/folds.scm | 3 + .../zed/src/languages/scheme/highlights.scm | 183 +++++++++++++++++ .../zed/src/languages/scheme/injections.scm | 3 + 16 files changed, 613 insertions(+), 14 deletions(-) create mode 100644 crates/zed/src/languages/commonlisp/config.toml create mode 100644 crates/zed/src/languages/commonlisp/folds.scm create mode 100644 crates/zed/src/languages/commonlisp/highlights.scm create mode 100644 crates/zed/src/languages/racket/config.toml create mode 100644 crates/zed/src/languages/racket/folds.scm create mode 100644 crates/zed/src/languages/racket/highlights.scm create mode 100644 crates/zed/src/languages/racket/injections.scm create mode 100644 crates/zed/src/languages/scheme/config.toml create mode 100644 crates/zed/src/languages/scheme/folds.scm create mode 100644 crates/zed/src/languages/scheme/highlights.scm create mode 100644 crates/zed/src/languages/scheme/injections.scm diff --git a/Cargo.lock b/Cargo.lock index 4312b7e830..82fcd2edf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6461,6 +6461,15 @@ dependencies = [ "tree-sitter", ] +[[package]] +name = "tree-sitter-commonlisp" +version = "0.3.0" +source = "git+https://github.com/theHamsta/tree-sitter-commonlisp?rev=c7e814975ab0d0d04333d1f32391c41180c58919#c7e814975ab0d0d04333d1f32391c41180c58919" +dependencies = [ + "cc", + "tree-sitter", +] + [[package]] name = "tree-sitter-cpp" version = "0.20.0" @@ -6566,6 +6575,15 @@ dependencies = [ "tree-sitter", ] +[[package]] +name = "tree-sitter-racket" +version = "0.0.1" +source = "git+https://github.com/6cdh/tree-sitter-racket?rev=69ca563af3bcf9d67220532e0814786f2dc34db1#69ca563af3bcf9d67220532e0814786f2dc34db1" +dependencies = [ + "cc", + "tree-sitter", +] + [[package]] name = "tree-sitter-ruby" version = "0.20.0" @@ -6586,6 +6604,15 @@ dependencies = [ "tree-sitter", ] +[[package]] +name = "tree-sitter-scheme" +version = "0.2.0" +source = "git+https://github.com/6cdh/tree-sitter-scheme?rev=af0fd1fa452cb2562dc7b5c8a8c55551c39273b9#af0fd1fa452cb2562dc7b5c8a8c55551c39273b9" +dependencies = [ + "cc", + "tree-sitter", +] + [[package]] name = "tree-sitter-toml" version = "0.5.1" @@ -7796,6 +7823,7 @@ dependencies = [ "toml", "tree-sitter", "tree-sitter-c", + "tree-sitter-commonlisp", "tree-sitter-cpp", "tree-sitter-css", "tree-sitter-elixir", @@ -7805,8 +7833,10 @@ dependencies = [ "tree-sitter-json 0.20.0", "tree-sitter-markdown", "tree-sitter-python", + "tree-sitter-racket", "tree-sitter-ruby", "tree-sitter-rust", + "tree-sitter-scheme", "tree-sitter-toml", "tree-sitter-typescript", "unindent", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 7fef0aafcf..755d876bec 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -104,6 +104,9 @@ tree-sitter-toml = { git = "https://github.com/tree-sitter/tree-sitter-toml", re tree-sitter-typescript = "0.20.1" tree-sitter-ruby = "0.20.0" tree-sitter-html = "0.19.0" +tree-sitter-scheme = { git = "https://github.com/6cdh/tree-sitter-scheme", rev = "af0fd1fa452cb2562dc7b5c8a8c55551c39273b9"} +tree-sitter-racket = { git = "https://github.com/6cdh/tree-sitter-racket", rev = "69ca563af3bcf9d67220532e0814786f2dc34db1"} +tree-sitter-commonlisp = { git = "https://github.com/theHamsta/tree-sitter-commonlisp", rev = "c7e814975ab0d0d04333d1f32391c41180c58919" } url = "2.2" [dev-dependencies] diff --git a/crates/zed/src/languages.rs b/crates/zed/src/languages.rs index 4c33e7329f..03df3fbab4 100644 --- a/crates/zed/src/languages.rs +++ b/crates/zed/src/languages.rs @@ -14,6 +14,7 @@ mod language_plugin; mod python; mod ruby; mod rust; + mod typescript; // 1. Add tree-sitter-{language} parser to zed crate @@ -127,6 +128,9 @@ pub async fn init(languages: Arc, _executor: Arc) tree_sitter_embedded_template::language(), Some(CachedLspAdapter::new(ruby::RubyLanguageServer).await), ), + ("scheme", tree_sitter_scheme::language(), None), + // ("racket", tree_sitter_racket::language(), None), + ("commonlisp", tree_sitter_commonlisp::language(), None), ] { languages.add(language(name, grammar, lsp_adapter)); } diff --git a/crates/zed/src/languages/commonlisp/config.toml b/crates/zed/src/languages/commonlisp/config.toml new file mode 100644 index 0000000000..a8200f5d32 --- /dev/null +++ b/crates/zed/src/languages/commonlisp/config.toml @@ -0,0 +1,9 @@ +name = "Racket" +path_suffixes = ["lisp", "lsp", "l", "cl"] +line_comment = "; " +autoclose_before = "])" +brackets = [ + { start = "[", end = "]", close = true, newline = true }, + { start = "(", end = ")", close = true, newline = true }, + { start = "'", end = "'", close = false, newline = false }, +] diff --git a/crates/zed/src/languages/commonlisp/folds.scm b/crates/zed/src/languages/commonlisp/folds.scm new file mode 100644 index 0000000000..c140fa39d8 --- /dev/null +++ b/crates/zed/src/languages/commonlisp/folds.scm @@ -0,0 +1,3 @@ +;; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/commonlisp/folds.scm + +(source (list_lit) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/commonlisp/highlights.scm b/crates/zed/src/languages/commonlisp/highlights.scm new file mode 100644 index 0000000000..74b1b8399e --- /dev/null +++ b/crates/zed/src/languages/commonlisp/highlights.scm @@ -0,0 +1,189 @@ +;; Copied from nvim: https://raw.githubusercontent.com/nvim-treesitter/nvim-treesitter/master/queries/commonlisp/highlights.scm + +(sym_lit) @variable + +;; A highlighting for functions/macros in th cl namespace is available in theHamsta/nvim-treesitter-commonlisp +;(list_lit . (sym_lit) @function.builtin (#cl-standard-function? @function.builtin)) +;(list_lit . (sym_lit) @function.builtin (#cl-standard-macro? @function.macro)) + +(dis_expr) @comment + +(defun_keyword) @function.macro +(defun_header + function_name: (_) @function) +(defun_header + lambda_list: (list_lit (sym_lit) @parameter)) +(defun_header + keyword: (defun_keyword "defmethod") + lambda_list: (list_lit (list_lit . (sym_lit) . (sym_lit) @symbol))) +(defun_header + lambda_list: (list_lit (list_lit . (sym_lit) @parameter . (_)))) +(defun_header + specifier: (sym_lit) @symbol) + +[":" "::" "."] @punctuation.special + +[ + (accumulation_verb) + (for_clause_word) + "for" + "and" + "finally" + "thereis" + "always" + "when" + "if" + "unless" + "else" + "do" + "loop" + "below" + "in" + "from" + "across" + "repeat" + "being" + "into" + "with" + "as" + "while" + "until" + "return" + "initially" +] @function.macro +"=" @operator + +(include_reader_macro) @symbol +["#C" "#c"] @number + +[(kwd_lit) (self_referential_reader_macro)] @symbol + +(package_lit + package: (_) @namespace) +"cl" @namespace + +(str_lit) @string + +(num_lit) @number + +((sym_lit) @boolean (#match? @boolean "^(t|T)$")) + +(nil_lit) @constant.builtin + +(comment) @comment + +;; dynamic variables +((sym_lit) @variable.builtin + (#match? @variable.builtin "^[*].+[*]$")) + +;; quote +"'" @string.escape +(format_specifier) @string.escape +(quoting_lit) @string.escape + +;; syntax quote +"`" @string.escape +"," @string.escape +",@" @string.escape +(syn_quoting_lit) @string.escape +(unquoting_lit) @none +(unquote_splicing_lit) @none + + +["(" ")"] @punctuation.bracket + +(block_comment) @comment + + +(with_clause + type: (_) @type) +(for_clause + type: (_) @type) + +;; defun-like things +(list_lit + . + (sym_lit) @function.macro + . + (sym_lit) @function + (#eq? @function.macro "deftest")) + +;;; Macros and Special Operators +(list_lit + . + (sym_lit) @function.macro + ;; For a complete and more efficient version install theHamsta/nvim-treesitter-commonlisp + (#any-of? @function.macro + "let" + "function" + "the" + "unwind-protect" + "labels" + "flet" + "tagbody" + "go" + "symbol-macrolet" + "symbol-macrolet" + "progn" + "prog1" + "error" + "or" + "and" + "defvar" + "defparameter" + "in-package" + "defpackage" + "case" + "ecase" + "typecase" + "etypecase" + "defstruct" + "defclass" + "if" + "when" + "unless" + "cond" + "switch" + "declaim" + "optimize")) + +;; constant +((sym_lit) @constant + (#match? @constant "^[+].+[+]$")) + +(var_quoting_lit + marker: "#'" @symbol + value: (_) @symbol) + +["#" "#p" "#P"] @symbol + +(list_lit + . + (sym_lit) @function.builtin + ;; For a complete and more efficient version install theHamsta/nvim-treesitter-commonlisp + (#any-of? @function.builtin + "mapcar" + "reduce" + "remove-if-not" + "cons" + "car" + "last" + "nth" + "equal" + "cdr" + "first" + "rest" + "format")) + +(list_lit + . + (sym_lit) @operator + (#match? @operator "^([+*-+=<>]|<=|>=|/=)$")) + + +((sym_lit) @symbol +(#match? @symbol "^[&]")) + +[(array_dimension) "#0A" "#0a"] @number + +(char_lit) @character diff --git a/crates/zed/src/languages/installation.rs b/crates/zed/src/languages/installation.rs index 40edbb88d7..2e5705d5a0 100644 --- a/crates/zed/src/languages/installation.rs +++ b/crates/zed/src/languages/installation.rs @@ -35,6 +35,18 @@ pub(crate) struct GithubReleaseAsset { pub browser_download_url: String, } +#[derive(Deserialize)] +pub(crate) struct CodebergReleaseAsset { + pub name: String, + pub assets: Vec, +} + +#[derive(Deserialize)] +pub(crate) struct CodebergRelease { + pub name: String, + pub browser_download_url: String, +} + pub async fn npm_package_latest_version(name: &str) -> Result { let output = smol::process::Command::new("npm") .args(["info", name, "--json"]) @@ -103,3 +115,12 @@ pub(crate) async fn latest_github_release( serde_json::from_slice(body.as_slice()).context("error deserializing latest release")?; Ok(release) } + +// pub(crate) async fn latest_codeberg_release( +// repo_name_with_owner: &str, +// http: Arc, +// ) -> anyhow::Result { +// let mut response = http.get(uri, body, follow_redirects); + +// bail!("unimplemented :("); +// } diff --git a/crates/zed/src/languages/racket/config.toml b/crates/zed/src/languages/racket/config.toml new file mode 100644 index 0000000000..33dd539c41 --- /dev/null +++ b/crates/zed/src/languages/racket/config.toml @@ -0,0 +1,9 @@ +name = "Racket" +path_suffixes = ["rkt"] +line_comment = "; " +autoclose_before = "])" +brackets = [ + { start = "[", end = "]", close = true, newline = true }, + { start = "(", end = ")", close = true, newline = true }, + { start = "'", end = "'", close = false, newline = false }, +] diff --git a/crates/zed/src/languages/racket/folds.scm b/crates/zed/src/languages/racket/folds.scm new file mode 100644 index 0000000000..d85ef45cfb --- /dev/null +++ b/crates/zed/src/languages/racket/folds.scm @@ -0,0 +1,3 @@ +; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/racket/folds.scm + +(program (list) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/racket/highlights.scm b/crates/zed/src/languages/racket/highlights.scm new file mode 100644 index 0000000000..b410479529 --- /dev/null +++ b/crates/zed/src/languages/racket/highlights.scm @@ -0,0 +1,140 @@ +;; Copied from nvim: https://raw.githubusercontent.com/nvim-treesitter/nvim-treesitter/master/queries/racket/highlights.scm + +;; A highlight query can override the highlights queries before it. +;; So the order is important. +;; We should highlight general rules, then highlight special forms. + +;;------------------------------------------------------------------;; +;; Basic highlights ;; +;;------------------------------------------------------------------;; + +(ERROR) @error + +;; basic ;; + +(number) @number +(character) @character +(boolean) @boolean +(keyword) @symbol + +;; string ;; + +[(string) + (here_string) + (byte_string)] @string + +(escape_sequence) @string.escape + +(regex) @string.regex + +;; comment ;; + +[(comment) + (block_comment) + (sexp_comment)] @comment + +;; symbol ;; + +(symbol) @variable + +((symbol) @comment + (#match? @comment "^#[cC][iIsS]$")) + +;; extension ;; + +(extension) @keyword +(lang_name) @variable.builtin + +;; quote ;; + +(quote) @symbol + +;; list ;; + +["(" ")" "[" "]" "{" "}"] @punctuation.bracket + +;; procedure ;; + +(list + . + (symbol) @function) + +;;------------------------------------------------------------------;; +;; Builtin highlights ;; +;;------------------------------------------------------------------;; + +;; The following lists are generated by a racket script: +;; https://gist.github.com/6cdh/65619e761753eb4166d15185a6236040 +;; Don't edit them directly. + +;; keyword ;; + +(list + . + (symbol) @keyword + (#any-of? @keyword + "#%app" "#%datum" "#%declare" "#%expression" "#%module-begin" "#%plain-app" "#%plain-lambda" "#%plain-module-begin" "#%printing-module-begin" "#%provide" "#%require" "#%stratified-body" "#%top" "#%top-interaction" "#%variable-reference" "->" "->*" "->*m" "->d" "->dm" "->i" "->m" "..." ":do-in" "==" "=>" "_" "absent" "abstract" "all-defined-out" "all-from-out" "and" "any" "augment" "augment*" "augment-final" "augment-final*" "augride" "augride*" "begin" "begin-for-syntax" "begin0" "case" "case->" "case->m" "case-lambda" "class" "class*" "class-field-accessor" "class-field-mutator" "class/c" "class/derived" "combine-in" "combine-out" "command-line" "compound-unit" "compound-unit/infer" "cond" "cons/dc" "contract" "contract-out" "contract-pos/neg-doubling" "contract-struct" "contracted" "current-contract-region" "define" "define-compound-unit" "define-compound-unit/infer" "define-contract-struct" "define-custom-hash-types" "define-custom-set-types" "define-for-syntax" "define-local-member-name" "define-logger" "define-match-expander" "define-member-name" "define-module-boundary-contract" "define-namespace-anchor" "define-opt/c" "define-sequence-syntax" "define-serializable-class" "define-serializable-class*" "define-signature" "define-signature-form" "define-splicing-for-clause-syntax" "define-struct" "define-struct/contract" "define-struct/derived" "define-syntax" "define-syntax-rule" "define-syntaxes" "define-unit" "define-unit-binding" "define-unit-from-context" "define-unit/contract" "define-unit/new-import-export" "define-unit/s" "define-values" "define-values-for-export" "define-values-for-syntax" "define-values/invoke-unit" "define-values/invoke-unit/infer" "define/augment" "define/augment-final" "define/augride" "define/contract" "define/final-prop" "define/match" "define/overment" "define/override" "define/override-final" "define/private" "define/public" "define/public-final" "define/pubment" "define/subexpression-pos-prop" "define/subexpression-pos-prop/name" "delay" "delay/idle" "delay/name" "delay/strict" "delay/sync" "delay/thread" "do" "else" "except" "except-in" "except-out" "export" "extends" "failure-cont" "field" "field-bound?" "file" "flat-murec-contract" "flat-rec-contract" "for" "for*" "for*/and" "for*/async" "for*/first" "for*/fold" "for*/fold/derived" "for*/foldr" "for*/foldr/derived" "for*/hash" "for*/hasheq" "for*/hasheqv" "for*/last" "for*/list" "for*/lists" "for*/mutable-set" "for*/mutable-seteq" "for*/mutable-seteqv" "for*/or" "for*/product" "for*/set" "for*/seteq" "for*/seteqv" "for*/stream" "for*/sum" "for*/vector" "for*/weak-set" "for*/weak-seteq" "for*/weak-seteqv" "for-label" "for-meta" "for-space" "for-syntax" "for-template" "for/and" "for/async" "for/first" "for/fold" "for/fold/derived" "for/foldr" "for/foldr/derived" "for/hash" "for/hasheq" "for/hasheqv" "for/last" "for/list" "for/lists" "for/mutable-set" "for/mutable-seteq" "for/mutable-seteqv" "for/or" "for/product" "for/set" "for/seteq" "for/seteqv" "for/stream" "for/sum" "for/vector" "for/weak-set" "for/weak-seteq" "for/weak-seteqv" "gen:custom-write" "gen:dict" "gen:equal+hash" "gen:set" "gen:stream" "generic" "get-field" "hash/dc" "if" "implies" "import" "include" "include-at/relative-to" "include-at/relative-to/reader" "include/reader" "inherit" "inherit-field" "inherit/inner" "inherit/super" "init" "init-depend" "init-field" "init-rest" "inner" "inspect" "instantiate" "interface" "interface*" "invariant-assertion" "invoke-unit" "invoke-unit/infer" "lambda" "lazy" "let" "let*" "let*-values" "let-syntax" "let-syntaxes" "let-values" "let/cc" "let/ec" "letrec" "letrec-syntax" "letrec-syntaxes" "letrec-syntaxes+values" "letrec-values" "lib" "link" "local" "local-require" "log-debug" "log-error" "log-fatal" "log-info" "log-warning" "match" "match*" "match*/derived" "match-define" "match-define-values" "match-lambda" "match-lambda*" "match-lambda**" "match-let" "match-let*" "match-let*-values" "match-let-values" "match-letrec" "match-letrec-values" "match/derived" "match/values" "member-name-key" "mixin" "module" "module*" "module+" "nand" "new" "nor" "object-contract" "object/c" "only" "only-in" "only-meta-in" "only-space-in" "open" "opt/c" "or" "overment" "overment*" "override" "override*" "override-final" "override-final*" "parameterize" "parameterize*" "parameterize-break" "parametric->/c" "place" "place*" "place/context" "planet" "prefix" "prefix-in" "prefix-out" "private" "private*" "prompt-tag/c" "prop:dict/contract" "protect-out" "provide" "provide-signature-elements" "provide/contract" "public" "public*" "public-final" "public-final*" "pubment" "pubment*" "quasiquote" "quasisyntax" "quasisyntax/loc" "quote" "quote-syntax" "quote-syntax/prune" "recontract-out" "recursive-contract" "relative-in" "rename" "rename-in" "rename-inner" "rename-out" "rename-super" "require" "send" "send*" "send+" "send-generic" "send/apply" "send/keyword-apply" "set!" "set!-values" "set-field!" "shared" "stream" "stream*" "stream-cons" "stream-lazy" "struct" "struct*" "struct-copy" "struct-field-index" "struct-guard/c" "struct-out" "struct/c" "struct/contract" "struct/ctc" "struct/dc" "struct/derived" "submod" "super" "super-instantiate" "super-make-object" "super-new" "syntax" "syntax-case" "syntax-case*" "syntax-id-rules" "syntax-rules" "syntax/loc" "tag" "this" "this%" "thunk" "thunk*" "time" "unconstrained-domain->" "unit" "unit-from-context" "unit/c" "unit/new-import-export" "unit/s" "unless" "unquote" "unquote-splicing" "unsyntax" "unsyntax-splicing" "values/drop" "when" "with-continuation-mark" "with-contract" "with-contract-continuation-mark" "with-handlers" "with-handlers*" "with-method" "with-syntax" "~?" "~@" "λ" + )) + +;; builtin procedures + +((symbol) @function.builtin + (#any-of? @function.builtin + "*" "*list/c" "+" "-" "/" "<" "" ">/c" ">=" ">=/c" "abort-current-continuation" "abs" "absolute-path?" "acos" "add-between" "add1" "alarm-evt" "and/c" "andmap" "angle" "any/c" "append" "append*" "append-map" "apply" "argmax" "argmin" "arithmetic-shift" "arity-at-least" "arity-at-least-value" "arity-at-least?" "arity-checking-wrapper" "arity-includes?" "arity=?" "arrow-contract-info" "arrow-contract-info-accepts-arglist" "arrow-contract-info-chaperone-procedure" "arrow-contract-info-check-first-order" "arrow-contract-info?" "asin" "assert-unreachable" "assf" "assoc" "assq" "assv" "atan" "bad-number-of-results" "banner" "base->-doms/c" "base->-rngs/c" "base->?" "between/c" "bitwise-and" "bitwise-bit-field" "bitwise-bit-set?" "bitwise-ior" "bitwise-not" "bitwise-xor" "blame-add-car-context" "blame-add-cdr-context" "blame-add-context" "blame-add-missing-party" "blame-add-nth-arg-context" "blame-add-range-context" "blame-add-unknown-context" "blame-context" "blame-contract" "blame-fmt->-string" "blame-missing-party?" "blame-negative" "blame-original?" "blame-positive" "blame-replace-negative" "blame-replaced-negative?" "blame-source" "blame-swap" "blame-swapped?" "blame-update" "blame-value" "blame?" "boolean=?" "boolean?" "bound-identifier=?" "box" "box-cas!" "box-immutable" "box-immutable/c" "box/c" "box?" "break-enabled" "break-parameterization?" "break-thread" "build-chaperone-contract-property" "build-compound-type-name" "build-contract-property" "build-flat-contract-property" "build-list" "build-path" "build-path/convention-type" "build-string" "build-vector" "byte-pregexp" "byte-pregexp?" "byte-ready?" "byte-regexp" "byte-regexp?" "byte?" "bytes" "bytes->immutable-bytes" "bytes->list" "bytes->path" "bytes->path-element" "bytes->string/latin-1" "bytes->string/locale" "bytes->string/utf-8" "bytes-append" "bytes-append*" "bytes-close-converter" "bytes-convert" "bytes-convert-end" "bytes-converter?" "bytes-copy" "bytes-copy!" "bytes-environment-variable-name?" "bytes-fill!" "bytes-join" "bytes-length" "bytes-no-nuls?" "bytes-open-converter" "bytes-ref" "bytes-set!" "bytes-utf-8-index" "bytes-utf-8-length" "bytes-utf-8-ref" "bytes?" "bytes?" "caaaar" "caaadr" "caaar" "caadar" "caaddr" "caadr" "caar" "cadaar" "cadadr" "cadar" "caddar" "cadddr" "caddr" "cadr" "call-in-continuation" "call-in-nested-thread" "call-with-atomic-output-file" "call-with-break-parameterization" "call-with-composable-continuation" "call-with-continuation-barrier" "call-with-continuation-prompt" "call-with-current-continuation" "call-with-default-reading-parameterization" "call-with-escape-continuation" "call-with-exception-handler" "call-with-file-lock/timeout" "call-with-immediate-continuation-mark" "call-with-input-bytes" "call-with-input-file" "call-with-input-file*" "call-with-input-string" "call-with-output-bytes" "call-with-output-file" "call-with-output-file*" "call-with-output-string" "call-with-parameterization" "call-with-semaphore" "call-with-semaphore/enable-break" "call-with-values" "call/cc" "call/ec" "car" "cartesian-product" "cdaaar" "cdaadr" "cdaar" "cdadar" "cdaddr" "cdadr" "cdar" "cddaar" "cddadr" "cddar" "cdddar" "cddddr" "cdddr" "cddr" "cdr" "ceiling" "channel-get" "channel-put" "channel-put-evt" "channel-put-evt?" "channel-try-get" "channel/c" "channel?" "chaperone-box" "chaperone-channel" "chaperone-continuation-mark-key" "chaperone-contract-property?" "chaperone-contract?" "chaperone-evt" "chaperone-hash" "chaperone-hash-set" "chaperone-of?" "chaperone-procedure" "chaperone-procedure*" "chaperone-prompt-tag" "chaperone-struct" "chaperone-struct-type" "chaperone-vector" "chaperone-vector*" "chaperone?" "char->integer" "char-alphabetic?" "char-blank?" "char-ci<=?" "char-ci=?" "char-ci>?" "char-downcase" "char-foldcase" "char-general-category" "char-graphic?" "char-in" "char-in/c" "char-iso-control?" "char-lower-case?" "char-numeric?" "char-punctuation?" "char-ready?" "char-symbolic?" "char-title-case?" "char-titlecase" "char-upcase" "char-upper-case?" "char-utf-8-length" "char-whitespace?" "char<=?" "char=?" "char>?" "char?" "check-duplicate-identifier" "check-duplicates" "checked-procedure-check-and-extract" "choice-evt" "class->interface" "class-info" "class-seal" "class-unseal" "class?" "cleanse-path" "close-input-port" "close-output-port" "coerce-chaperone-contract" "coerce-chaperone-contracts" "coerce-contract" "coerce-contract/f" "coerce-contracts" "coerce-flat-contract" "coerce-flat-contracts" "collect-garbage" "collection-file-path" "collection-path" "combinations" "combine-output" "compile" "compile-allow-set!-undefined" "compile-context-preservation-enabled" "compile-enforce-module-constants" "compile-syntax" "compile-target-machine?" "compiled-expression-recompile" "compiled-expression?" "compiled-module-expression?" "complete-path?" "complex?" "compose" "compose1" "conjoin" "conjugate" "cons" "cons/c" "cons?" "const" "continuation-mark-key/c" "continuation-mark-key?" "continuation-mark-set->context" "continuation-mark-set->iterator" "continuation-mark-set->list" "continuation-mark-set->list*" "continuation-mark-set-first" "continuation-mark-set?" "continuation-marks" "continuation-prompt-available?" "continuation-prompt-tag?" "continuation?" "contract-custom-write-property-proc" "contract-equivalent?" "contract-exercise" "contract-first-order" "contract-first-order-passes?" "contract-late-neg-projection" "contract-name" "contract-proc" "contract-projection" "contract-property?" "contract-random-generate" "contract-random-generate-env?" "contract-random-generate-fail?" "contract-random-generate-get-current-environment" "contract-random-generate-stash" "contract-random-generate/choose" "contract-stronger?" "contract-struct-exercise" "contract-struct-generate" "contract-struct-late-neg-projection" "contract-struct-list-contract?" "contract-val-first-projection" "contract?" "convert-stream" "copy-directory/files" "copy-file" "copy-port" "cos" "cosh" "count" "current-blame-format" "current-break-parameterization" "current-code-inspector" "current-command-line-arguments" "current-compile" "current-compile-realm" "current-compile-target-machine" "current-compiled-file-roots" "current-continuation-marks" "current-custodian" "current-directory" "current-directory-for-user" "current-drive" "current-environment-variables" "current-error-message-adjuster" "current-error-port" "current-eval" "current-evt-pseudo-random-generator" "current-force-delete-permissions" "current-future" "current-gc-milliseconds" "current-get-interaction-evt" "current-get-interaction-input-port" "current-inexact-milliseconds" "current-inexact-monotonic-milliseconds" "current-input-port" "current-inspector" "current-library-collection-links" "current-library-collection-paths" "current-load" "current-load-extension" "current-load-relative-directory" "current-load/use-compiled" "current-locale" "current-logger" "current-memory-use" "current-milliseconds" "current-module-declare-name" "current-module-declare-source" "current-module-name-resolver" "current-module-path-for-load" "current-namespace" "current-output-port" "current-parameterization" "current-plumber" "current-preserved-thread-cell-values" "current-print" "current-process-milliseconds" "current-prompt-read" "current-pseudo-random-generator" "current-read-interaction" "current-reader-guard" "current-readtable" "current-seconds" "current-security-guard" "current-subprocess-custodian-mode" "current-subprocess-keep-file-descriptors" "current-thread" "current-thread-group" "current-thread-initial-stack-size" "current-write-relative-directory" "curry" "curryr" "custodian-box-value" "custodian-box?" "custodian-limit-memory" "custodian-managed-list" "custodian-memory-accounting-available?" "custodian-require-memory" "custodian-shut-down?" "custodian-shutdown-all" "custodian?" "custom-print-quotable-accessor" "custom-print-quotable?" "custom-write-accessor" "custom-write-property-proc" "custom-write?" "date" "date*" "date*-nanosecond" "date*-time-zone-name" "date*?" "date-day" "date-dst?" "date-hour" "date-minute" "date-month" "date-second" "date-time-zone-offset" "date-week-day" "date-year" "date-year-day" "date?" "datum->syntax" "datum-intern-literal" "default-continuation-prompt-tag" "degrees->radians" "delete-directory" "delete-directory/files" "delete-file" "denominator" "dict->list" "dict-can-functional-set?" "dict-can-remove-keys?" "dict-clear" "dict-clear!" "dict-copy" "dict-count" "dict-empty?" "dict-for-each" "dict-has-key?" "dict-implements/c" "dict-implements?" "dict-iter-contract" "dict-iterate-first" "dict-iterate-key" "dict-iterate-next" "dict-iterate-value" "dict-key-contract" "dict-keys" "dict-map" "dict-mutable?" "dict-ref" "dict-ref!" "dict-remove" "dict-remove!" "dict-set" "dict-set!" "dict-set*" "dict-set*!" "dict-update" "dict-update!" "dict-value-contract" "dict-values" "dict?" "directory-exists?" "directory-list" "disjoin" "display" "display-lines" "display-lines-to-file" "display-to-file" "displayln" "double-flonum?" "drop" "drop-common-prefix" "drop-right" "dropf" "dropf-right" "dump-memory-stats" "dup-input-port" "dup-output-port" "dynamic->*" "dynamic-get-field" "dynamic-object/c" "dynamic-place" "dynamic-place*" "dynamic-require" "dynamic-require-for-syntax" "dynamic-send" "dynamic-set-field!" "dynamic-wind" "eighth" "empty?" "environment-variables-copy" "environment-variables-names" "environment-variables-ref" "environment-variables-set!" "environment-variables?" "eof-evt" "eof-object?" "ephemeron-value" "ephemeron?" "eprintf" "eq-contract-val" "eq-contract?" "eq-hash-code" "eq?" "equal-contract-val" "equal-contract?" "equal-hash-code" "equal-secondary-hash-code" "equal?" "equal?/recur" "eqv-hash-code" "eqv?" "error" "error-contract->adjusted-string" "error-display-handler" "error-escape-handler" "error-message->adjusted-string" "error-print-context-length" "error-print-source-location" "error-print-width" "error-syntax->string-handler" "error-value->string-handler" "eval" "eval-jit-enabled" "eval-syntax" "even?" "evt/c" "evt?" "exact->inexact" "exact-ceiling" "exact-floor" "exact-integer?" "exact-nonnegative-integer?" "exact-positive-integer?" "exact-round" "exact-truncate" "exact?" "executable-yield-handler" "exit" "exit-handler" "exn" "exn-continuation-marks" "exn-message" "exn:break" "exn:break-continuation" "exn:break:hang-up" "exn:break:hang-up?" "exn:break:terminate" "exn:break:terminate?" "exn:break?" "exn:fail" "exn:fail:contract" "exn:fail:contract:arity" "exn:fail:contract:arity?" "exn:fail:contract:blame" "exn:fail:contract:blame-object" "exn:fail:contract:blame?" "exn:fail:contract:continuation" "exn:fail:contract:continuation?" "exn:fail:contract:divide-by-zero" "exn:fail:contract:divide-by-zero?" "exn:fail:contract:non-fixnum-result" "exn:fail:contract:non-fixnum-result?" "exn:fail:contract:variable" "exn:fail:contract:variable-id" "exn:fail:contract:variable?" "exn:fail:contract?" "exn:fail:filesystem" "exn:fail:filesystem:errno" "exn:fail:filesystem:errno-errno" "exn:fail:filesystem:errno?" "exn:fail:filesystem:exists" "exn:fail:filesystem:exists?" "exn:fail:filesystem:missing-module" "exn:fail:filesystem:missing-module-path" "exn:fail:filesystem:missing-module?" "exn:fail:filesystem:version" "exn:fail:filesystem:version?" "exn:fail:filesystem?" "exn:fail:network" "exn:fail:network:errno" "exn:fail:network:errno-errno" "exn:fail:network:errno?" "exn:fail:network?" "exn:fail:object" "exn:fail:object?" "exn:fail:out-of-memory" "exn:fail:out-of-memory?" "exn:fail:read" "exn:fail:read-srclocs" "exn:fail:read:eof" "exn:fail:read:eof?" "exn:fail:read:non-char" "exn:fail:read:non-char?" "exn:fail:read?" "exn:fail:syntax" "exn:fail:syntax-exprs" "exn:fail:syntax:missing-module" "exn:fail:syntax:missing-module-path" "exn:fail:syntax:missing-module?" "exn:fail:syntax:unbound" "exn:fail:syntax:unbound?" "exn:fail:syntax?" "exn:fail:unsupported" "exn:fail:unsupported?" "exn:fail:user" "exn:fail:user?" "exn:fail?" "exn:misc:match?" "exn:missing-module-accessor" "exn:missing-module?" "exn:srclocs-accessor" "exn:srclocs?" "exn?" "exp" "expand" "expand-once" "expand-syntax" "expand-syntax-once" "expand-syntax-to-top-form" "expand-to-top-form" "expand-user-path" "explode-path" "expt" "false?" "field-names" "fifth" "file->bytes" "file->bytes-lines" "file->lines" "file->list" "file->string" "file->value" "file-exists?" "file-name-from-path" "file-or-directory-identity" "file-or-directory-modify-seconds" "file-or-directory-permissions" "file-or-directory-stat" "file-or-directory-type" "file-position" "file-position*" "file-size" "file-stream-buffer-mode" "file-stream-port?" "file-truncate" "filename-extension" "filesystem-change-evt" "filesystem-change-evt-cancel" "filesystem-change-evt?" "filesystem-root-list" "filter" "filter-map" "filter-not" "filter-read-input-port" "find-compiled-file-roots" "find-executable-path" "find-files" "find-library-collection-links" "find-library-collection-paths" "find-relative-path" "find-system-path" "findf" "first" "first-or/c" "fixnum?" "flat-contract" "flat-contract-predicate" "flat-contract-property?" "flat-contract-with-explanation" "flat-contract?" "flat-named-contract" "flatten" "floating-point-bytes->real" "flonum?" "floor" "flush-output" "fold-files" "foldl" "foldr" "for-each" "force" "format" "fourth" "fprintf" "free-identifier=?" "free-label-identifier=?" "free-template-identifier=?" "free-transformer-identifier=?" "fsemaphore-count" "fsemaphore-post" "fsemaphore-try-wait?" "fsemaphore-wait" "fsemaphore?" "future" "future?" "futures-enabled?" "gcd" "generate-member-key" "generate-temporaries" "generic-set?" "generic?" "gensym" "get-output-bytes" "get-output-string" "get-preference" "get/build-late-neg-projection" "get/build-val-first-projection" "getenv" "global-port-print-handler" "group-by" "guard-evt" "handle-evt" "handle-evt?" "has-blame?" "has-contract?" "hash" "hash->list" "hash-clear" "hash-clear!" "hash-copy" "hash-copy-clear" "hash-count" "hash-empty?" "hash-ephemeron?" "hash-eq?" "hash-equal?" "hash-eqv?" "hash-for-each" "hash-has-key?" "hash-iterate-first" "hash-iterate-key" "hash-iterate-key+value" "hash-iterate-next" "hash-iterate-pair" "hash-iterate-value" "hash-keys" "hash-keys-subset?" "hash-map" "hash-placeholder?" "hash-ref" "hash-ref!" "hash-ref-key" "hash-remove" "hash-remove!" "hash-set" "hash-set!" "hash-set*" "hash-set*!" "hash-strong?" "hash-update" "hash-update!" "hash-values" "hash-weak?" "hash/c" "hash?" "hasheq" "hasheqv" "identifier-binding" "identifier-binding-portal-syntax" "identifier-binding-symbol" "identifier-distinct-binding" "identifier-label-binding" "identifier-prune-lexical-context" "identifier-prune-to-source-module" "identifier-remove-from-definition-context" "identifier-template-binding" "identifier-transformer-binding" "identifier?" "identity" "if/c" "imag-part" "immutable?" "impersonate-box" "impersonate-channel" "impersonate-continuation-mark-key" "impersonate-hash" "impersonate-hash-set" "impersonate-procedure" "impersonate-procedure*" "impersonate-prompt-tag" "impersonate-struct" "impersonate-vector" "impersonate-vector*" "impersonator-contract?" "impersonator-ephemeron" "impersonator-of?" "impersonator-property-accessor-procedure?" "impersonator-property?" "impersonator?" "implementation?" "implementation?/c" "in-bytes" "in-bytes-lines" "in-combinations" "in-cycle" "in-dict" "in-dict-keys" "in-dict-pairs" "in-dict-values" "in-directory" "in-ephemeron-hash" "in-ephemeron-hash-keys" "in-ephemeron-hash-pairs" "in-ephemeron-hash-values" "in-hash" "in-hash-keys" "in-hash-pairs" "in-hash-values" "in-immutable-hash" "in-immutable-hash-keys" "in-immutable-hash-pairs" "in-immutable-hash-values" "in-immutable-set" "in-inclusive-range" "in-indexed" "in-input-port-bytes" "in-input-port-chars" "in-lines" "in-list" "in-mlist" "in-mutable-hash" "in-mutable-hash-keys" "in-mutable-hash-pairs" "in-mutable-hash-values" "in-mutable-set" "in-naturals" "in-parallel" "in-permutations" "in-port" "in-producer" "in-range" "in-sequences" "in-set" "in-slice" "in-stream" "in-string" "in-syntax" "in-value" "in-values*-sequence" "in-values-sequence" "in-vector" "in-weak-hash" "in-weak-hash-keys" "in-weak-hash-pairs" "in-weak-hash-values" "in-weak-set" "inclusive-range" "index-of" "index-where" "indexes-of" "indexes-where" "inexact->exact" "inexact-real?" "inexact?" "infinite?" "input-port-append" "input-port?" "inspector-superior?" "inspector?" "instanceof/c" "integer->char" "integer->integer-bytes" "integer-bytes->integer" "integer-in" "integer-length" "integer-sqrt" "integer-sqrt/remainder" "integer?" "interface->method-names" "interface-extension?" "interface?" "internal-definition-context-add-scopes" "internal-definition-context-binding-identifiers" "internal-definition-context-introduce" "internal-definition-context-seal" "internal-definition-context-splice-binding-identifier" "internal-definition-context?" "is-a?" "is-a?/c" "keyword->string" "keyword-apply" "keyword-apply/dict" "keywordbytes" "list->mutable-set" "list->mutable-seteq" "list->mutable-seteqv" "list->set" "list->seteq" "list->seteqv" "list->string" "list->vector" "list->weak-set" "list->weak-seteq" "list->weak-seteqv" "list-contract?" "list-prefix?" "list-ref" "list-set" "list-tail" "list-update" "list/c" "list?" "listen-port-number?" "listof" "load" "load-extension" "load-on-demand-enabled" "load-relative" "load-relative-extension" "load/cd" "load/use-compiled" "local-expand" "local-expand/capture-lifts" "local-transformer-expand" "local-transformer-expand/capture-lifts" "locale-string-encoding" "log" "log-all-levels" "log-level-evt" "log-level?" "log-max-level" "log-message" "log-receiver?" "logger-name" "logger?" "magnitude" "make-arity-at-least" "make-base-empty-namespace" "make-base-namespace" "make-bytes" "make-channel" "make-chaperone-contract" "make-continuation-mark-key" "make-continuation-prompt-tag" "make-contract" "make-custodian" "make-custodian-box" "make-custom-hash" "make-custom-hash-types" "make-custom-set" "make-custom-set-types" "make-date" "make-date*" "make-derived-parameter" "make-directory" "make-directory*" "make-do-sequence" "make-empty-namespace" "make-environment-variables" "make-ephemeron" "make-ephemeron-hash" "make-ephemeron-hasheq" "make-ephemeron-hasheqv" "make-exn" "make-exn:break" "make-exn:break:hang-up" "make-exn:break:terminate" "make-exn:fail" "make-exn:fail:contract" "make-exn:fail:contract:arity" "make-exn:fail:contract:blame" "make-exn:fail:contract:continuation" "make-exn:fail:contract:divide-by-zero" "make-exn:fail:contract:non-fixnum-result" "make-exn:fail:contract:variable" "make-exn:fail:filesystem" "make-exn:fail:filesystem:errno" "make-exn:fail:filesystem:exists" "make-exn:fail:filesystem:missing-module" "make-exn:fail:filesystem:version" "make-exn:fail:network" "make-exn:fail:network:errno" "make-exn:fail:object" "make-exn:fail:out-of-memory" "make-exn:fail:read" "make-exn:fail:read:eof" "make-exn:fail:read:non-char" "make-exn:fail:syntax" "make-exn:fail:syntax:missing-module" "make-exn:fail:syntax:unbound" "make-exn:fail:unsupported" "make-exn:fail:user" "make-file-or-directory-link" "make-flat-contract" "make-fsemaphore" "make-generic" "make-handle-get-preference-locked" "make-hash" "make-hash-placeholder" "make-hasheq" "make-hasheq-placeholder" "make-hasheqv" "make-hasheqv-placeholder" "make-immutable-custom-hash" "make-immutable-hash" "make-immutable-hasheq" "make-immutable-hasheqv" "make-impersonator-property" "make-input-port" "make-input-port/read-to-peek" "make-inspector" "make-interned-syntax-introducer" "make-keyword-procedure" "make-known-char-range-list" "make-limited-input-port" "make-list" "make-lock-file-name" "make-log-receiver" "make-logger" "make-mixin-contract" "make-mutable-custom-set" "make-none/c" "make-object" "make-output-port" "make-parameter" "make-parent-directory*" "make-phantom-bytes" "make-pipe" "make-pipe-with-specials" "make-placeholder" "make-plumber" "make-polar" "make-portal-syntax" "make-prefab-struct" "make-primitive-class" "make-proj-contract" "make-pseudo-random-generator" "make-reader-graph" "make-readtable" "make-rectangular" "make-rename-transformer" "make-resolved-module-path" "make-security-guard" "make-semaphore" "make-set!-transformer" "make-shared-bytes" "make-sibling-inspector" "make-special-comment" "make-srcloc" "make-string" "make-struct-field-accessor" "make-struct-field-mutator" "make-struct-type" "make-struct-type-property" "make-syntax-delta-introducer" "make-syntax-introducer" "make-temporary-directory" "make-temporary-directory*" "make-temporary-file" "make-temporary-file*" "make-tentative-pretty-print-output-port" "make-thread-cell" "make-thread-group" "make-vector" "make-weak-box" "make-weak-custom-hash" "make-weak-custom-set" "make-weak-hash" "make-weak-hasheq" "make-weak-hasheqv" "make-will-executor" "map" "match-equality-test" "matches-arity-exactly?" "max" "mcar" "mcdr" "mcons" "member" "member-name-key-hash-code" "member-name-key=?" "member-name-key?" "memf" "memory-order-acquire" "memory-order-release" "memq" "memv" "merge-input" "method-in-interface?" "min" "module->exports" "module->imports" "module->indirect-exports" "module->language-info" "module->namespace" "module->realm" "module-compiled-cross-phase-persistent?" "module-compiled-exports" "module-compiled-imports" "module-compiled-indirect-exports" "module-compiled-language-info" "module-compiled-name" "module-compiled-realm" "module-compiled-submodules" "module-declared?" "module-path-index-join" "module-path-index-resolve" "module-path-index-split" "module-path-index-submodule" "module-path-index?" "module-path?" "module-predefined?" "module-provide-protected?" "modulo" "mpair?" "mutable-set" "mutable-seteq" "mutable-seteqv" "n->th" "nack-guard-evt" "namespace-anchor->empty-namespace" "namespace-anchor->namespace" "namespace-anchor?" "namespace-attach-module" "namespace-attach-module-declaration" "namespace-base-phase" "namespace-call-with-registry-lock" "namespace-mapped-symbols" "namespace-module-identifier" "namespace-module-registry" "namespace-require" "namespace-require/constant" "namespace-require/copy" "namespace-require/expansion-time" "namespace-set-variable-value!" "namespace-symbol->identifier" "namespace-syntax-introduce" "namespace-undefine-variable!" "namespace-unprotect-module" "namespace-variable-value" "namespace?" "nan?" "natural-number/c" "natural?" "negate" "negative-integer?" "negative?" "new-∀/c" "new-∃/c" "newline" "ninth" "non-empty-listof" "non-empty-string?" "none/c" "nonnegative-integer?" "nonpositive-integer?" "normal-case-path" "normalize-arity" "normalize-path" "normalized-arity?" "not" "not/c" "null?" "number->string" "number?" "numerator" "object->vector" "object-info" "object-interface" "object-method-arity-includes?" "object-name" "object-or-false=?" "object=-hash-code" "object=?" "object?" "odd?" "one-of/c" "open-input-bytes" "open-input-file" "open-input-output-file" "open-input-string" "open-output-bytes" "open-output-file" "open-output-nowhere" "open-output-string" "or/c" "order-of-magnitude" "ormap" "output-port?" "pair?" "parameter-procedure=?" "parameter/c" "parameter?" "parameterization?" "parse-command-line" "partition" "path->bytes" "path->complete-path" "path->directory-path" "path->string" "path-add-extension" "path-add-suffix" "path-convention-type" "path-element->bytes" "path-element->string" "path-element?" "path-for-some-system?" "path-get-extension" "path-has-extension?" "path-list-string->path-list" "path-only" "path-replace-extension" "path-replace-suffix" "path-string?" "pathbytes" "port->bytes-lines" "port->lines" "port->list" "port->string" "port-closed-evt" "port-closed?" "port-commit-peeked" "port-count-lines!" "port-count-lines-enabled" "port-counts-lines?" "port-display-handler" "port-file-identity" "port-file-unlock" "port-next-location" "port-number?" "port-print-handler" "port-progress-evt" "port-provides-progress-evts?" "port-read-handler" "port-try-file-lock?" "port-waiting-peer?" "port-write-handler" "port-writes-atomic?" "port-writes-special?" "port?" "portal-syntax-content" "portal-syntax?" "positive-integer?" "positive?" "prefab-key->struct-type" "prefab-key?" "prefab-struct-key" "preferences-lock-file-mode" "pregexp" "pregexp?" "pretty-display" "pretty-format" "pretty-print" "pretty-print-.-symbol-without-bars" "pretty-print-abbreviate-read-macros" "pretty-print-columns" "pretty-print-current-style-table" "pretty-print-depth" "pretty-print-exact-as-decimal" "pretty-print-extend-style-table" "pretty-print-handler" "pretty-print-newline" "pretty-print-post-print-hook" "pretty-print-pre-print-hook" "pretty-print-print-hook" "pretty-print-print-line" "pretty-print-remap-stylable" "pretty-print-show-inexactness" "pretty-print-size-hook" "pretty-print-style-table?" "pretty-printing" "pretty-write" "primitive-closure?" "primitive-result-arity" "primitive?" "print" "print-as-expression" "print-boolean-long-form" "print-box" "print-graph" "print-hash-table" "print-mpair-curly-braces" "print-pair-curly-braces" "print-reader-abbreviations" "print-struct" "print-syntax-width" "print-unreadable" "print-value-columns" "print-vector-length" "printable/c" "printf" "println" "procedure->method" "procedure-arity" "procedure-arity-includes/c" "procedure-arity-includes?" "procedure-arity-mask" "procedure-arity?" "procedure-closure-contents-eq?" "procedure-extract-target" "procedure-impersonator*?" "procedure-keywords" "procedure-realm" "procedure-reduce-arity" "procedure-reduce-arity-mask" "procedure-reduce-keyword-arity" "procedure-reduce-keyword-arity-mask" "procedure-rename" "procedure-result-arity" "procedure-specialize" "procedure-struct-type?" "procedure?" "process" "process*" "process*/ports" "process/ports" "processor-count" "progress-evt?" "promise-forced?" "promise-running?" "promise/c" "promise/name?" "promise?" "prop:arrow-contract-get-info" "prop:arrow-contract?" "prop:orc-contract-get-subcontracts" "prop:orc-contract?" "prop:recursive-contract-unroll" "prop:recursive-contract?" "proper-subset?" "property/c" "pseudo-random-generator->vector" "pseudo-random-generator-vector?" "pseudo-random-generator?" "put-preferences" "putenv" "quotient" "quotient/remainder" "radians->degrees" "raise" "raise-argument-error" "raise-argument-error*" "raise-arguments-error" "raise-arguments-error*" "raise-arity-error" "raise-arity-error*" "raise-arity-mask-error" "raise-arity-mask-error*" "raise-blame-error" "raise-contract-error" "raise-mismatch-error" "raise-not-cons-blame-error" "raise-range-error" "raise-range-error*" "raise-result-arity-error" "raise-result-arity-error*" "raise-result-error" "raise-result-error*" "raise-syntax-error" "raise-type-error" "raise-user-error" "random" "random-seed" "range" "rational?" "rationalize" "read" "read-accept-bar-quote" "read-accept-box" "read-accept-compiled" "read-accept-dot" "read-accept-graph" "read-accept-infix-dot" "read-accept-lang" "read-accept-quasiquote" "read-accept-reader" "read-byte" "read-byte-or-special" "read-bytes" "read-bytes!" "read-bytes!-evt" "read-bytes-avail!" "read-bytes-avail!*" "read-bytes-avail!-evt" "read-bytes-avail!/enable-break" "read-bytes-evt" "read-bytes-line" "read-bytes-line-evt" "read-case-sensitive" "read-cdot" "read-char" "read-char-or-special" "read-curly-brace-as-paren" "read-curly-brace-with-tag" "read-decimal-as-inexact" "read-eval-print-loop" "read-installation-configuration-table" "read-language" "read-line" "read-line-evt" "read-on-demand-source" "read-single-flonum" "read-square-bracket-as-paren" "read-square-bracket-with-tag" "read-string" "read-string!" "read-string!-evt" "read-string-evt" "read-syntax" "read-syntax-accept-graph" "read-syntax/recursive" "read/recursive" "readtable-mapping" "readtable?" "real->decimal-string" "real->double-flonum" "real->floating-point-bytes" "real->single-flonum" "real-in" "real-part" "real?" "reencode-input-port" "reencode-output-port" "regexp" "regexp-match" "regexp-match*" "regexp-match-evt" "regexp-match-exact?" "regexp-match-peek" "regexp-match-peek-immediate" "regexp-match-peek-positions" "regexp-match-peek-positions*" "regexp-match-peek-positions-immediate" "regexp-match-peek-positions-immediate/end" "regexp-match-peek-positions/end" "regexp-match-positions" "regexp-match-positions*" "regexp-match-positions/end" "regexp-match/end" "regexp-match?" "regexp-max-lookbehind" "regexp-quote" "regexp-replace" "regexp-replace*" "regexp-replace-quote" "regexp-replaces" "regexp-split" "regexp-try-match" "regexp?" "relative-path?" "relocate-input-port" "relocate-output-port" "remainder" "remf" "remf*" "remove" "remove*" "remove-duplicates" "remq" "remq*" "remv" "remv*" "rename-contract" "rename-file-or-directory" "rename-transformer-target" "rename-transformer?" "replace-evt" "reroot-path" "resolve-path" "resolved-module-path-name" "resolved-module-path?" "rest" "reverse" "round" "second" "seconds->date" "security-guard?" "semaphore-peek-evt" "semaphore-peek-evt?" "semaphore-post" "semaphore-try-wait?" "semaphore-wait" "semaphore-wait/enable-break" "semaphore?" "sequence->list" "sequence->stream" "sequence-add-between" "sequence-andmap" "sequence-append" "sequence-count" "sequence-filter" "sequence-fold" "sequence-for-each" "sequence-generate" "sequence-generate*" "sequence-length" "sequence-map" "sequence-ormap" "sequence-ref" "sequence-tail" "sequence/c" "sequence?" "set" "set!-transformer-procedure" "set!-transformer?" "set->list" "set->stream" "set-add" "set-add!" "set-box!" "set-box*!" "set-clear" "set-clear!" "set-copy" "set-copy-clear" "set-count" "set-empty?" "set-eq?" "set-equal?" "set-eqv?" "set-first" "set-for-each" "set-implements/c" "set-implements?" "set-intersect" "set-intersect!" "set-map" "set-mcar!" "set-mcdr!" "set-member?" "set-mutable?" "set-phantom-bytes!" "set-port-next-location!" "set-remove" "set-remove!" "set-rest" "set-subtract" "set-subtract!" "set-symmetric-difference" "set-symmetric-difference!" "set-union" "set-union!" "set-weak?" "set/c" "set=?" "set?" "seteq" "seteqv" "seventh" "sgn" "sha1-bytes" "sha224-bytes" "sha256-bytes" "shared-bytes" "shell-execute" "shrink-path-wrt" "shuffle" "simple-form-path" "simplify-path" "sin" "single-flonum-available?" "single-flonum?" "sinh" "sixth" "skip-projection-wrapper?" "sleep" "some-system-path->string" "sort" "special-comment-value" "special-comment?" "special-filter-input-port" "split-at" "split-at-right" "split-common-prefix" "split-path" "splitf-at" "splitf-at-right" "sqr" "sqrt" "srcloc" "srcloc->string" "srcloc-column" "srcloc-line" "srcloc-position" "srcloc-source" "srcloc-span" "srcloc?" "stop-after" "stop-before" "stream->list" "stream-add-between" "stream-andmap" "stream-append" "stream-count" "stream-empty?" "stream-filter" "stream-first" "stream-fold" "stream-for-each" "stream-force" "stream-length" "stream-map" "stream-ormap" "stream-ref" "stream-rest" "stream-tail" "stream-take" "stream/c" "stream?" "string" "string->bytes/latin-1" "string->bytes/locale" "string->bytes/utf-8" "string->immutable-string" "string->keyword" "string->list" "string->number" "string->path" "string->path-element" "string->some-system-path" "string->symbol" "string->uninterned-symbol" "string->unreadable-symbol" "string-append" "string-append*" "string-append-immutable" "string-ci<=?" "string-ci=?" "string-ci>?" "string-contains?" "string-copy" "string-copy!" "string-downcase" "string-environment-variable-name?" "string-fill!" "string-foldcase" "string-join" "string-len/c" "string-length" "string-locale-ci?" "string-locale-downcase" "string-locale-upcase" "string-locale?" "string-no-nuls?" "string-normalize-nfc" "string-normalize-nfd" "string-normalize-nfkc" "string-normalize-nfkd" "string-normalize-spaces" "string-port?" "string-prefix?" "string-ref" "string-replace" "string-set!" "string-split" "string-suffix?" "string-titlecase" "string-trim" "string-upcase" "string-utf-8-length" "string<=?" "string=?" "string>?" "string?" "struct->vector" "struct-accessor-procedure?" "struct-constructor-procedure?" "struct-info" "struct-mutator-procedure?" "struct-predicate-procedure?" "struct-type-authentic?" "struct-type-info" "struct-type-make-constructor" "struct-type-make-predicate" "struct-type-property-accessor-procedure?" "struct-type-property-predicate-procedure?" "struct-type-property/c" "struct-type-property?" "struct-type-sealed?" "struct-type?" "struct?" "sub1" "subbytes" "subclass?" "subclass?/c" "subprocess" "subprocess-group-enabled" "subprocess-kill" "subprocess-pid" "subprocess-status" "subprocess-wait" "subprocess?" "subset?" "substring" "suggest/c" "symbol->string" "symbol-interned?" "symbol-unreadable?" "symboldatum" "syntax->list" "syntax-arm" "syntax-binding-set" "syntax-binding-set->syntax" "syntax-binding-set-extend" "syntax-binding-set?" "syntax-column" "syntax-debug-info" "syntax-deserialize" "syntax-disarm" "syntax-e" "syntax-line" "syntax-local-apply-transformer" "syntax-local-bind-syntaxes" "syntax-local-certifier" "syntax-local-context" "syntax-local-expand-expression" "syntax-local-get-shadower" "syntax-local-identifier-as-binding" "syntax-local-introduce" "syntax-local-lift-context" "syntax-local-lift-expression" "syntax-local-lift-module" "syntax-local-lift-module-end-declaration" "syntax-local-lift-provide" "syntax-local-lift-require" "syntax-local-lift-values-expression" "syntax-local-make-definition-context" "syntax-local-make-delta-introducer" "syntax-local-module-defined-identifiers" "syntax-local-module-exports" "syntax-local-module-interned-scope-symbols" "syntax-local-module-required-identifiers" "syntax-local-name" "syntax-local-phase-level" "syntax-local-submodules" "syntax-local-transforming-module-provides?" "syntax-local-value" "syntax-local-value/immediate" "syntax-original?" "syntax-position" "syntax-property" "syntax-property-preserved?" "syntax-property-remove" "syntax-property-symbol-keys" "syntax-protect" "syntax-rearm" "syntax-recertify" "syntax-serialize" "syntax-shift-phase-level" "syntax-source" "syntax-source-module" "syntax-span" "syntax-taint" "syntax-tainted?" "syntax-track-origin" "syntax-transforming-module-expression?" "syntax-transforming-with-lifts?" "syntax-transforming?" "syntax/c" "syntax?" "system" "system*" "system*/exit-code" "system-big-endian?" "system-idle-evt" "system-language+country" "system-library-subpath" "system-path-convention-type" "system-type" "system/exit-code" "tail-marks-match?" "take" "take-common-prefix" "take-right" "takef" "takef-right" "tan" "tanh" "tcp-abandon-port" "tcp-accept" "tcp-accept-evt" "tcp-accept-ready?" "tcp-accept/enable-break" "tcp-addresses" "tcp-close" "tcp-connect" "tcp-connect/enable-break" "tcp-listen" "tcp-listener?" "tcp-port?" "tentative-pretty-print-port-cancel" "tentative-pretty-print-port-transfer" "tenth" "terminal-port?" "third" "thread" "thread-cell-ref" "thread-cell-set!" "thread-cell-values?" "thread-cell?" "thread-dead-evt" "thread-dead?" "thread-group?" "thread-receive" "thread-receive-evt" "thread-resume" "thread-resume-evt" "thread-rewind-receive" "thread-running?" "thread-send" "thread-suspend" "thread-suspend-evt" "thread-try-receive" "thread-wait" "thread/suspend-to-kill" "thread?" "time-apply" "touch" "transplant-input-port" "transplant-output-port" "truncate" "udp-addresses" "udp-bind!" "udp-bound?" "udp-close" "udp-connect!" "udp-connected?" "udp-multicast-interface" "udp-multicast-join-group!" "udp-multicast-leave-group!" "udp-multicast-loopback?" "udp-multicast-set-interface!" "udp-multicast-set-loopback!" "udp-multicast-set-ttl!" "udp-multicast-ttl" "udp-open-socket" "udp-receive!" "udp-receive!*" "udp-receive!-evt" "udp-receive!/enable-break" "udp-receive-ready-evt" "udp-send" "udp-send*" "udp-send-evt" "udp-send-ready-evt" "udp-send-to" "udp-send-to*" "udp-send-to-evt" "udp-send-to/enable-break" "udp-send/enable-break" "udp-set-receive-buffer-size!" "udp-set-ttl!" "udp-ttl" "udp?" "unbox" "unbox*" "uncaught-exception-handler" "unit?" "unquoted-printing-string" "unquoted-printing-string-value" "unquoted-printing-string?" "unsupplied-arg?" "use-collection-link-paths" "use-compiled-file-check" "use-compiled-file-paths" "use-user-specific-search-paths" "value-blame" "value-contract" "values" "variable-reference->empty-namespace" "variable-reference->module-base-phase" "variable-reference->module-declaration-inspector" "variable-reference->module-path-index" "variable-reference->module-source" "variable-reference->namespace" "variable-reference->phase" "variable-reference->resolved-module-path" "variable-reference-constant?" "variable-reference-from-unsafe?" "variable-reference?" "vector" "vector*-length" "vector*-ref" "vector*-set!" "vector->immutable-vector" "vector->list" "vector->pseudo-random-generator" "vector->pseudo-random-generator!" "vector->values" "vector-append" "vector-argmax" "vector-argmin" "vector-cas!" "vector-copy" "vector-copy!" "vector-count" "vector-drop" "vector-drop-right" "vector-empty?" "vector-fill!" "vector-filter" "vector-filter-not" "vector-immutable" "vector-immutable/c" "vector-immutableof" "vector-length" "vector-map" "vector-map!" "vector-member" "vector-memq" "vector-memv" "vector-ref" "vector-set!" "vector-set*!" "vector-set-performance-stats!" "vector-sort" "vector-sort!" "vector-split-at" "vector-split-at-right" "vector-take" "vector-take-right" "vector/c" "vector?" "vectorof" "version" "void" "void?" "weak-box-value" "weak-box?" "weak-set" "weak-seteq" "weak-seteqv" "will-execute" "will-executor?" "will-register" "will-try-execute" "with-input-from-bytes" "with-input-from-file" "with-input-from-string" "with-output-to-bytes" "with-output-to-file" "with-output-to-string" "would-be-future" "wrap-evt" "write" "write-byte" "write-bytes" "write-bytes-avail" "write-bytes-avail*" "write-bytes-avail-evt" "write-bytes-avail/enable-break" "write-char" "write-special" "write-special-avail*" "write-special-evt" "write-string" "write-to-file" "writeln" "xor" "zero?" "~.a" "~.s" "~.v" "~a" "~e" "~r" "~s" "~v" + )) + +;; operators ;; + +((symbol) @operator + (#any-of? @operator + "+" "-" "*" "/" "=" "<=" ">=" "<" ">")) + +;; builtin variables ;; + +((symbol) @variable.builtin + (#any-of? @variable.builtin + "always-evt" "block-device-type-bits" "character-device-type-bits" "check-tail-contract" "contract-continuation-mark-key" "contract-random-generate-fail" "directory-type-bits" "empty" "empty-sequence" "empty-stream" "eof" "equal<%>" "error-message-adjuster-key" "externalizable<%>" "failure-result/c" "false" "false/c" "fifo-type-bits" "file-type-bits" "for-clause-syntax-protect" "group-execute-bit" "group-permission-bits" "group-read-bit" "group-write-bit" "impersonator-prop:application-mark" "impersonator-prop:blame" "impersonator-prop:contracted" "legacy-match-expander?" "match-...-nesting" "match-expander?" "mixin-contract" "never-evt" "null" "object%" "other-execute-bit" "other-permission-bits" "other-read-bit" "other-write-bit" "pi" "pi.f" "predicate/c" "printable<%>" "prop:arity-string" "prop:arrow-contract" "prop:authentic" "prop:blame" "prop:chaperone-contract" "prop:checked-procedure" "prop:contract" "prop:contracted" "prop:custom-print-quotable" "prop:custom-write" "prop:dict" "prop:equal+hash" "prop:evt" "prop:exn:missing-module" "prop:exn:srclocs" "prop:expansion-contexts" "prop:flat-contract" "prop:impersonator-of" "prop:input-port" "prop:legacy-match-expander" "prop:liberal-define-context" "prop:match-expander" "prop:object-name" "prop:orc-contract" "prop:output-port" "prop:place-location" "prop:procedure" "prop:recursive-contract" "prop:rename-transformer" "prop:sealed" "prop:sequence" "prop:set!-transformer" "prop:stream" "regular-file-type-bits" "set-group-id-bit" "set-user-id-bit" "socket-type-bits" "sticky-bit" "struct:arity-at-least" "struct:arrow-contract-info" "struct:date" "struct:date*" "struct:exn" "struct:exn:break" "struct:exn:break:hang-up" "struct:exn:break:terminate" "struct:exn:fail" "struct:exn:fail:contract" "struct:exn:fail:contract:arity" "struct:exn:fail:contract:blame" "struct:exn:fail:contract:continuation" "struct:exn:fail:contract:divide-by-zero" "struct:exn:fail:contract:non-fixnum-result" "struct:exn:fail:contract:variable" "struct:exn:fail:filesystem" "struct:exn:fail:filesystem:errno" "struct:exn:fail:filesystem:exists" "struct:exn:fail:filesystem:missing-module" "struct:exn:fail:filesystem:version" "struct:exn:fail:network" "struct:exn:fail:network:errno" "struct:exn:fail:object" "struct:exn:fail:out-of-memory" "struct:exn:fail:read" "struct:exn:fail:read:eof" "struct:exn:fail:read:non-char" "struct:exn:fail:syntax" "struct:exn:fail:syntax:missing-module" "struct:exn:fail:syntax:unbound" "struct:exn:fail:unsupported" "struct:exn:fail:user" "struct:srcloc" "symbolic-link-type-bits" "syntax-local-match-introduce" "syntax-pattern-variable?" "the-unsupplied-arg" "true" "unspecified-dom" "user-execute-bit" "user-permission-bits" "user-read-bit" "user-write-bit" "writable<%>" + )) + +(dot) @variable.builtin + +;;------------------------------------------------------------------;; +;; Special cases ;; +;;------------------------------------------------------------------;; + +(list + "[" + (symbol) @variable + "]") + +(list + . + (symbol) @_p + . + (list + (symbol) @variable) + (#any-of? @_p + "lambda" "λ" "define-values" "define-syntaxes" "define-values-for-export" + "define-values-for-syntax" + )) + +;;------------------------------------------------------------------;; +;; Solve conflicts ;; +;;------------------------------------------------------------------;; + +;; See `:h treesitter`, and search `priority` + +(list + . + (symbol) @include + (#eq? @include "require") + (#set! "priority" 101)) + +(quote + . + (symbol) + (#set! "priority" 105)) @symbol + +((sexp_comment) @comment + (#set! "priority" 110)) + diff --git a/crates/zed/src/languages/racket/injections.scm b/crates/zed/src/languages/racket/injections.scm new file mode 100644 index 0000000000..9bfa09db91 --- /dev/null +++ b/crates/zed/src/languages/racket/injections.scm @@ -0,0 +1,4 @@ +; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/racket/injections.scm + +[(comment) + (block_comment)] @comment \ No newline at end of file diff --git a/crates/zed/src/languages/ruby/brackets.scm b/crates/zed/src/languages/ruby/brackets.scm index f5129f8f31..e69de29bb2 100644 --- a/crates/zed/src/languages/ruby/brackets.scm +++ b/crates/zed/src/languages/ruby/brackets.scm @@ -1,14 +0,0 @@ -("[" @open "]" @close) -("{" @open "}" @close) -("\"" @open "\"" @close) -("do" @open "end" @close) - -(block_parameters "|" @open "|" @close) -(interpolation "#{" @open "}" @close) - -(if "if" @open "end" @close) -(unless "unless" @open "end" @close) -(begin "begin" @open "end" @close) -(module "module" @open "end" @close) -(_ . "def" @open "end" @close) -(_ . "class" @open "end" @close) diff --git a/crates/zed/src/languages/scheme/config.toml b/crates/zed/src/languages/scheme/config.toml new file mode 100644 index 0000000000..c23099a551 --- /dev/null +++ b/crates/zed/src/languages/scheme/config.toml @@ -0,0 +1,9 @@ +name = "Scheme" +path_suffixes = ["scm", "ss", "mjs"] +line_comment = "; " +autoclose_before = "])" +brackets = [ + { start = "[", end = "]", close = true, newline = true }, + { start = "(", end = ")", close = true, newline = true }, + { start = "'", end = "'", close = false, newline = false }, +] diff --git a/crates/zed/src/languages/scheme/folds.scm b/crates/zed/src/languages/scheme/folds.scm new file mode 100644 index 0000000000..e8ba2f269c --- /dev/null +++ b/crates/zed/src/languages/scheme/folds.scm @@ -0,0 +1,3 @@ +; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/folds.scm + +(program (list) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/scheme/highlights.scm b/crates/zed/src/languages/scheme/highlights.scm new file mode 100644 index 0000000000..222675d3fc --- /dev/null +++ b/crates/zed/src/languages/scheme/highlights.scm @@ -0,0 +1,183 @@ +;; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/highlights.scm + +;; A highlight query can override the highlights queries before it. +;; So the order is important. +;; We should highlight general rules, then highlight special forms. + +(number) @number +(character) @character +(boolean) @boolean +(string) @string +[(comment) + (block_comment)] @comment + +;; highlight for datum comment +;; copied from ../clojure/highlights.scm +([(comment) (directive)] @comment + (#set! "priority" 105)) + +(escape_sequence) @string.escape + +["(" ")" "[" "]" "{" "}"] @punctuation.bracket + +;; variables + +(symbol) @variable +((symbol) @variable.builtin + (#any-of? @variable.builtin "..." ".")) + +;; procedure + +(list + . + (symbol) @function) + +;; special forms + +(list + "[" + (symbol)+ @variable + "]") + +(list + . + (symbol) @_f + . + (list + (symbol) @variable) + (#any-of? @_f "lambda" "λ")) + +(list + . + (symbol) @_f + . + (list + (list + (symbol) @variable)) + (#any-of? @_f + "let" "let*" "let-syntax" "let-values" "let*-values" "letrec" "letrec*" "letrec-syntax")) + +;; operators + +((symbol) @operator + (#any-of? @operator + "+" "-" "*" "/" "=" "<=" ">=" "<" ">")) + +;; keyword + +((symbol) @keyword + (#any-of? @keyword + "define" "lambda" "λ" "begin" "do" "define-syntax" + "and" "or" + "if" "cond" "case" "when" "unless" "else" "=>" + "let" "let*" "let-syntax" "let-values" "let*-values" "letrec" "letrec*" "letrec-syntax" + "set!" + "syntax-rules" "identifier-syntax" + "quote" "unquote" "quote-splicing" "quasiquote" "unquote-splicing" + "delay" + "assert" + "library" "export" "import" "rename" "only" "except" "prefix")) + +((symbol) @conditional + (#any-of? @conditional "if" "cond" "case" "when" "unless")) + +;; quote + +(abbreviation + "'" + (symbol)) @symbol + +(list + . + (symbol) @_f + (#eq? @_f "quote")) @symbol + +;; library + +(list + . + (symbol) @_lib + . + (symbol) @namespace + + (#eq? @_lib "library")) + +;; builtin procedures +;; procedures in R5RS and R6RS but not in R6RS-lib + +((symbol) @function.builtin + (#any-of? @function.builtin + ;; eq + "eqv?" "eq?" "equal?" + ;; number + "number?" "complex?" "real?" "rational?" "integer?" + "exact?" "inexact?" + "zero?" "positive?" "negative?" "odd?" "even?" "finite?" "infinite?" "nan?" + "max" "min" + "abs" "quotient" "remainder" "modulo" + "div" "div0" "mod" "mod0" "div-and-mod" "div0-and-mod0" + "gcd" "lcm" "numerator" "denominator" + "floor" "ceiling" "truncate" "round" + "rationalize" + "exp" "log" "sin" "cos" "tan" "asin" "acos" "atan" + "sqrt" "expt" + "exact-integer-sqrt" + "make-rectangular" "make-polar" "real-part" "imag-part" "magnitude" "angle" + "real-valued" "rational-valued?" "integer-valued?" + "exact" "inexact" "exact->inexact" "inexact->exact" + "number->string" "string->number" + ;; boolean + "boolean?" "not" "boolean=?" + ;; pair + "pair?" "cons" + "car" "cdr" + "caar" "cadr" "cdar" "cddr" + "caaar" "caadr" "cadar" "caddr" "cdaar" "cdadr" "cddar" "cdddr" + "caaaar" "caaadr" "caadar" "caaddr" "cadaar" "cadadr" "caddar" "cadddr" + "cdaaar" "cdaadr" "cdadar" "cdaddr" "cddaar" "cddadr" "cdddar" "cddddr" + "set-car!" "set-cdr!" + ;; list + "null?" "list?" + "list" "length" "append" "reverse" "list-tail" "list-ref" + "map" "for-each" + "memq" "memv" "member" "assq" "assv" "assoc" + ;; symbol + "symbol?" "symbol->string" "string->symbol" "symbol=?" + ;; char + "char?" "char=?" "char?" "char<=?" "char>=?" + "char-ci=?" "char-ci?" "char-ci<=?" "char-ci>=?" + "char-alphabetic?" "char-numeric?" "char-whitespace?" "char-upper-case?" "char-lower-case?" + "char->integer" "integer->char" + "char-upcase" "char-downcase" + ;; string + "string?" "make-string" "string" "string-length" "string-ref" "string-set!" + "string=?" "string-ci=?" "string?" "string<=?" "string>=?" + "string-ci?" "string-ci<=?" "string-ci>=?" + "substring" "string-append" "string->list" "list->string" + "string-for-each" + "string-copy" "string-fill!" + "string-upcase" "string-downcase" + ;; vector + "vector?" "make-vector" "vector" "vector-length" "vector-ref" "vector-set!" + "vector->list" "list->vector" "vector-fill!" "vector-map" "vector-for-each" + ;; bytevector + "bytevector?" "native-endianness" + "make-bytevector" "bytevector-length" "bytevector=?" "bytevector-fill!" + "bytevector-copy!" "bytevector-copy" + ;; error + "error" "assertion-violation" + ;; control + "procedure?" "apply" "force" + "call-with-current-continuation" "call/cc" + "values" "call-with-values" "dynamic-wind" + "eval" "scheme-report-environment" "null-environment" "interaction-environment" + ;; IO + "call-with-input-file" "call-with-output-file" "input-port?" "output-port?" + "current-input-port" "current-output-port" "with-input-from-file" "with-output-to-file" + "open-input-file" "open-output-file" "close-input-port" "close-output-port" + ;; input + "read" "read-char" "peek-char" "eof-object?" "char-ready?" + ;; output + "write" "display" "newline" "write-char" + ;; system + "load" "transcript-on" "transcript-off")) \ No newline at end of file diff --git a/crates/zed/src/languages/scheme/injections.scm b/crates/zed/src/languages/scheme/injections.scm new file mode 100644 index 0000000000..5a76034eed --- /dev/null +++ b/crates/zed/src/languages/scheme/injections.scm @@ -0,0 +1,3 @@ +; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/injections.scm + +(comment) @comment \ No newline at end of file From 0921178b4224e1420c07d311bded6695f95a7fdf Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 5 Dec 2022 11:29:52 -0800 Subject: [PATCH 205/240] Got tree sitter integration to a shippable place --- Cargo.lock | 12 +- crates/zed/Cargo.toml | 3 +- crates/zed/src/languages.rs | 3 +- .../zed/src/languages/commonlisp/config.toml | 9 - crates/zed/src/languages/commonlisp/folds.scm | 3 - .../src/languages/commonlisp/highlights.scm | 189 ------------------ crates/zed/src/languages/installation.rs | 21 -- crates/zed/src/languages/racket/brackets.scm | 3 + crates/zed/src/languages/racket/config.toml | 6 +- crates/zed/src/languages/racket/folds.scm | 3 - .../zed/src/languages/racket/highlights.scm | 132 ++---------- crates/zed/src/languages/racket/indents.scm | 3 + .../zed/src/languages/racket/injections.scm | 4 - crates/zed/src/languages/racket/outline.scm | 10 + crates/zed/src/languages/ruby/brackets.scm | 14 ++ crates/zed/src/languages/scheme/brackets.scm | 3 + crates/zed/src/languages/scheme/config.toml | 8 +- crates/zed/src/languages/scheme/folds.scm | 3 - .../zed/src/languages/scheme/highlights.scm | 187 ++--------------- crates/zed/src/languages/scheme/indents.scm | 3 + .../zed/src/languages/scheme/injections.scm | 3 - crates/zed/src/languages/scheme/outline.scm | 10 + 22 files changed, 88 insertions(+), 544 deletions(-) delete mode 100644 crates/zed/src/languages/commonlisp/config.toml delete mode 100644 crates/zed/src/languages/commonlisp/folds.scm delete mode 100644 crates/zed/src/languages/commonlisp/highlights.scm create mode 100644 crates/zed/src/languages/racket/brackets.scm delete mode 100644 crates/zed/src/languages/racket/folds.scm create mode 100644 crates/zed/src/languages/racket/indents.scm delete mode 100644 crates/zed/src/languages/racket/injections.scm create mode 100644 crates/zed/src/languages/racket/outline.scm create mode 100644 crates/zed/src/languages/scheme/brackets.scm delete mode 100644 crates/zed/src/languages/scheme/folds.scm create mode 100644 crates/zed/src/languages/scheme/indents.scm delete mode 100644 crates/zed/src/languages/scheme/injections.scm create mode 100644 crates/zed/src/languages/scheme/outline.scm diff --git a/Cargo.lock b/Cargo.lock index 82fcd2edf4..d3cc50f991 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6461,15 +6461,6 @@ dependencies = [ "tree-sitter", ] -[[package]] -name = "tree-sitter-commonlisp" -version = "0.3.0" -source = "git+https://github.com/theHamsta/tree-sitter-commonlisp?rev=c7e814975ab0d0d04333d1f32391c41180c58919#c7e814975ab0d0d04333d1f32391c41180c58919" -dependencies = [ - "cc", - "tree-sitter", -] - [[package]] name = "tree-sitter-cpp" version = "0.20.0" @@ -6578,7 +6569,7 @@ dependencies = [ [[package]] name = "tree-sitter-racket" version = "0.0.1" -source = "git+https://github.com/6cdh/tree-sitter-racket?rev=69ca563af3bcf9d67220532e0814786f2dc34db1#69ca563af3bcf9d67220532e0814786f2dc34db1" +source = "git+https://github.com/zed-industries/tree-sitter-racket?rev=eb010cf2c674c6fd9a6316a84e28ef90190fe51a#eb010cf2c674c6fd9a6316a84e28ef90190fe51a" dependencies = [ "cc", "tree-sitter", @@ -7823,7 +7814,6 @@ dependencies = [ "toml", "tree-sitter", "tree-sitter-c", - "tree-sitter-commonlisp", "tree-sitter-cpp", "tree-sitter-css", "tree-sitter-elixir", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 755d876bec..cc81f3bf23 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -105,8 +105,7 @@ tree-sitter-typescript = "0.20.1" tree-sitter-ruby = "0.20.0" tree-sitter-html = "0.19.0" tree-sitter-scheme = { git = "https://github.com/6cdh/tree-sitter-scheme", rev = "af0fd1fa452cb2562dc7b5c8a8c55551c39273b9"} -tree-sitter-racket = { git = "https://github.com/6cdh/tree-sitter-racket", rev = "69ca563af3bcf9d67220532e0814786f2dc34db1"} -tree-sitter-commonlisp = { git = "https://github.com/theHamsta/tree-sitter-commonlisp", rev = "c7e814975ab0d0d04333d1f32391c41180c58919" } +tree-sitter-racket = { git = "https://github.com/zed-industries/tree-sitter-racket", rev = "eb010cf2c674c6fd9a6316a84e28ef90190fe51a"} url = "2.2" [dev-dependencies] diff --git a/crates/zed/src/languages.rs b/crates/zed/src/languages.rs index 03df3fbab4..0250c53684 100644 --- a/crates/zed/src/languages.rs +++ b/crates/zed/src/languages.rs @@ -129,8 +129,7 @@ pub async fn init(languages: Arc, _executor: Arc) Some(CachedLspAdapter::new(ruby::RubyLanguageServer).await), ), ("scheme", tree_sitter_scheme::language(), None), - // ("racket", tree_sitter_racket::language(), None), - ("commonlisp", tree_sitter_commonlisp::language(), None), + ("racket", tree_sitter_racket::language(), None), ] { languages.add(language(name, grammar, lsp_adapter)); } diff --git a/crates/zed/src/languages/commonlisp/config.toml b/crates/zed/src/languages/commonlisp/config.toml deleted file mode 100644 index a8200f5d32..0000000000 --- a/crates/zed/src/languages/commonlisp/config.toml +++ /dev/null @@ -1,9 +0,0 @@ -name = "Racket" -path_suffixes = ["lisp", "lsp", "l", "cl"] -line_comment = "; " -autoclose_before = "])" -brackets = [ - { start = "[", end = "]", close = true, newline = true }, - { start = "(", end = ")", close = true, newline = true }, - { start = "'", end = "'", close = false, newline = false }, -] diff --git a/crates/zed/src/languages/commonlisp/folds.scm b/crates/zed/src/languages/commonlisp/folds.scm deleted file mode 100644 index c140fa39d8..0000000000 --- a/crates/zed/src/languages/commonlisp/folds.scm +++ /dev/null @@ -1,3 +0,0 @@ -;; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/commonlisp/folds.scm - -(source (list_lit) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/commonlisp/highlights.scm b/crates/zed/src/languages/commonlisp/highlights.scm deleted file mode 100644 index 74b1b8399e..0000000000 --- a/crates/zed/src/languages/commonlisp/highlights.scm +++ /dev/null @@ -1,189 +0,0 @@ -;; Copied from nvim: https://raw.githubusercontent.com/nvim-treesitter/nvim-treesitter/master/queries/commonlisp/highlights.scm - -(sym_lit) @variable - -;; A highlighting for functions/macros in th cl namespace is available in theHamsta/nvim-treesitter-commonlisp -;(list_lit . (sym_lit) @function.builtin (#cl-standard-function? @function.builtin)) -;(list_lit . (sym_lit) @function.builtin (#cl-standard-macro? @function.macro)) - -(dis_expr) @comment - -(defun_keyword) @function.macro -(defun_header - function_name: (_) @function) -(defun_header - lambda_list: (list_lit (sym_lit) @parameter)) -(defun_header - keyword: (defun_keyword "defmethod") - lambda_list: (list_lit (list_lit . (sym_lit) . (sym_lit) @symbol))) -(defun_header - lambda_list: (list_lit (list_lit . (sym_lit) @parameter . (_)))) -(defun_header - specifier: (sym_lit) @symbol) - -[":" "::" "."] @punctuation.special - -[ - (accumulation_verb) - (for_clause_word) - "for" - "and" - "finally" - "thereis" - "always" - "when" - "if" - "unless" - "else" - "do" - "loop" - "below" - "in" - "from" - "across" - "repeat" - "being" - "into" - "with" - "as" - "while" - "until" - "return" - "initially" -] @function.macro -"=" @operator - -(include_reader_macro) @symbol -["#C" "#c"] @number - -[(kwd_lit) (self_referential_reader_macro)] @symbol - -(package_lit - package: (_) @namespace) -"cl" @namespace - -(str_lit) @string - -(num_lit) @number - -((sym_lit) @boolean (#match? @boolean "^(t|T)$")) - -(nil_lit) @constant.builtin - -(comment) @comment - -;; dynamic variables -((sym_lit) @variable.builtin - (#match? @variable.builtin "^[*].+[*]$")) - -;; quote -"'" @string.escape -(format_specifier) @string.escape -(quoting_lit) @string.escape - -;; syntax quote -"`" @string.escape -"," @string.escape -",@" @string.escape -(syn_quoting_lit) @string.escape -(unquoting_lit) @none -(unquote_splicing_lit) @none - - -["(" ")"] @punctuation.bracket - -(block_comment) @comment - - -(with_clause - type: (_) @type) -(for_clause - type: (_) @type) - -;; defun-like things -(list_lit - . - (sym_lit) @function.macro - . - (sym_lit) @function - (#eq? @function.macro "deftest")) - -;;; Macros and Special Operators -(list_lit - . - (sym_lit) @function.macro - ;; For a complete and more efficient version install theHamsta/nvim-treesitter-commonlisp - (#any-of? @function.macro - "let" - "function" - "the" - "unwind-protect" - "labels" - "flet" - "tagbody" - "go" - "symbol-macrolet" - "symbol-macrolet" - "progn" - "prog1" - "error" - "or" - "and" - "defvar" - "defparameter" - "in-package" - "defpackage" - "case" - "ecase" - "typecase" - "etypecase" - "defstruct" - "defclass" - "if" - "when" - "unless" - "cond" - "switch" - "declaim" - "optimize")) - -;; constant -((sym_lit) @constant - (#match? @constant "^[+].+[+]$")) - -(var_quoting_lit - marker: "#'" @symbol - value: (_) @symbol) - -["#" "#p" "#P"] @symbol - -(list_lit - . - (sym_lit) @function.builtin - ;; For a complete and more efficient version install theHamsta/nvim-treesitter-commonlisp - (#any-of? @function.builtin - "mapcar" - "reduce" - "remove-if-not" - "cons" - "car" - "last" - "nth" - "equal" - "cdr" - "first" - "rest" - "format")) - -(list_lit - . - (sym_lit) @operator - (#match? @operator "^([+*-+=<>]|<=|>=|/=)$")) - - -((sym_lit) @symbol -(#match? @symbol "^[&]")) - -[(array_dimension) "#0A" "#0a"] @number - -(char_lit) @character diff --git a/crates/zed/src/languages/installation.rs b/crates/zed/src/languages/installation.rs index 2e5705d5a0..40edbb88d7 100644 --- a/crates/zed/src/languages/installation.rs +++ b/crates/zed/src/languages/installation.rs @@ -35,18 +35,6 @@ pub(crate) struct GithubReleaseAsset { pub browser_download_url: String, } -#[derive(Deserialize)] -pub(crate) struct CodebergReleaseAsset { - pub name: String, - pub assets: Vec, -} - -#[derive(Deserialize)] -pub(crate) struct CodebergRelease { - pub name: String, - pub browser_download_url: String, -} - pub async fn npm_package_latest_version(name: &str) -> Result { let output = smol::process::Command::new("npm") .args(["info", name, "--json"]) @@ -115,12 +103,3 @@ pub(crate) async fn latest_github_release( serde_json::from_slice(body.as_slice()).context("error deserializing latest release")?; Ok(release) } - -// pub(crate) async fn latest_codeberg_release( -// repo_name_with_owner: &str, -// http: Arc, -// ) -> anyhow::Result { -// let mut response = http.get(uri, body, follow_redirects); - -// bail!("unimplemented :("); -// } diff --git a/crates/zed/src/languages/racket/brackets.scm b/crates/zed/src/languages/racket/brackets.scm new file mode 100644 index 0000000000..191fd9c084 --- /dev/null +++ b/crates/zed/src/languages/racket/brackets.scm @@ -0,0 +1,3 @@ +("(" @open ")" @close) +("[" @open "]" @close) +("{" @open "}" @close) diff --git a/crates/zed/src/languages/racket/config.toml b/crates/zed/src/languages/racket/config.toml index 33dd539c41..0177e6ef6d 100644 --- a/crates/zed/src/languages/racket/config.toml +++ b/crates/zed/src/languages/racket/config.toml @@ -3,7 +3,7 @@ path_suffixes = ["rkt"] line_comment = "; " autoclose_before = "])" brackets = [ - { start = "[", end = "]", close = true, newline = true }, - { start = "(", end = ")", close = true, newline = true }, - { start = "'", end = "'", close = false, newline = false }, + { start = "[", end = "]", close = true, newline = false }, + { start = "(", end = ")", close = true, newline = false }, + { start = "\"", end = "\"", close = true, newline = false }, ] diff --git a/crates/zed/src/languages/racket/folds.scm b/crates/zed/src/languages/racket/folds.scm deleted file mode 100644 index d85ef45cfb..0000000000 --- a/crates/zed/src/languages/racket/folds.scm +++ /dev/null @@ -1,3 +0,0 @@ -; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/racket/folds.scm - -(program (list) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/racket/highlights.scm b/crates/zed/src/languages/racket/highlights.scm index b410479529..9eeeb1d4ea 100644 --- a/crates/zed/src/languages/racket/highlights.scm +++ b/crates/zed/src/languages/racket/highlights.scm @@ -1,140 +1,40 @@ -;; Copied from nvim: https://raw.githubusercontent.com/nvim-treesitter/nvim-treesitter/master/queries/racket/highlights.scm - -;; A highlight query can override the highlights queries before it. -;; So the order is important. -;; We should highlight general rules, then highlight special forms. - -;;------------------------------------------------------------------;; -;; Basic highlights ;; -;;------------------------------------------------------------------;; - -(ERROR) @error - -;; basic ;; - -(number) @number -(character) @character -(boolean) @boolean -(keyword) @symbol - -;; string ;; +["(" ")" "[" "]" "{" "}"] @punctuation.bracket [(string) (here_string) (byte_string)] @string - -(escape_sequence) @string.escape - -(regex) @string.regex - -;; comment ;; +(regex) @string.special +(escape_sequence) @escape [(comment) (block_comment) (sexp_comment)] @comment -;; symbol ;; - (symbol) @variable -((symbol) @comment - (#match? @comment "^#[cC][iIsS]$")) - -;; extension ;; +(number) @number +(character) @constant.builtin +(boolean) @constant.builtin +(keyword) @constant +(quote . (symbol)) @constant (extension) @keyword (lang_name) @variable.builtin -;; quote ;; - -(quote) @symbol - -;; list ;; - -["(" ")" "[" "]" "{" "}"] @punctuation.bracket - -;; procedure ;; +((symbol) @operator + (#match? @operator "^(\\+|-|\\*|/|=|>|<|>=|<=)$")) (list - . - (symbol) @function) - -;;------------------------------------------------------------------;; -;; Builtin highlights ;; -;;------------------------------------------------------------------;; - -;; The following lists are generated by a racket script: -;; https://gist.github.com/6cdh/65619e761753eb4166d15185a6236040 -;; Don't edit them directly. - -;; keyword ;; + . + (symbol) @function) (list . (symbol) @keyword - (#any-of? @keyword - "#%app" "#%datum" "#%declare" "#%expression" "#%module-begin" "#%plain-app" "#%plain-lambda" "#%plain-module-begin" "#%printing-module-begin" "#%provide" "#%require" "#%stratified-body" "#%top" "#%top-interaction" "#%variable-reference" "->" "->*" "->*m" "->d" "->dm" "->i" "->m" "..." ":do-in" "==" "=>" "_" "absent" "abstract" "all-defined-out" "all-from-out" "and" "any" "augment" "augment*" "augment-final" "augment-final*" "augride" "augride*" "begin" "begin-for-syntax" "begin0" "case" "case->" "case->m" "case-lambda" "class" "class*" "class-field-accessor" "class-field-mutator" "class/c" "class/derived" "combine-in" "combine-out" "command-line" "compound-unit" "compound-unit/infer" "cond" "cons/dc" "contract" "contract-out" "contract-pos/neg-doubling" "contract-struct" "contracted" "current-contract-region" "define" "define-compound-unit" "define-compound-unit/infer" "define-contract-struct" "define-custom-hash-types" "define-custom-set-types" "define-for-syntax" "define-local-member-name" "define-logger" "define-match-expander" "define-member-name" "define-module-boundary-contract" "define-namespace-anchor" "define-opt/c" "define-sequence-syntax" "define-serializable-class" "define-serializable-class*" "define-signature" "define-signature-form" "define-splicing-for-clause-syntax" "define-struct" "define-struct/contract" "define-struct/derived" "define-syntax" "define-syntax-rule" "define-syntaxes" "define-unit" "define-unit-binding" "define-unit-from-context" "define-unit/contract" "define-unit/new-import-export" "define-unit/s" "define-values" "define-values-for-export" "define-values-for-syntax" "define-values/invoke-unit" "define-values/invoke-unit/infer" "define/augment" "define/augment-final" "define/augride" "define/contract" "define/final-prop" "define/match" "define/overment" "define/override" "define/override-final" "define/private" "define/public" "define/public-final" "define/pubment" "define/subexpression-pos-prop" "define/subexpression-pos-prop/name" "delay" "delay/idle" "delay/name" "delay/strict" "delay/sync" "delay/thread" "do" "else" "except" "except-in" "except-out" "export" "extends" "failure-cont" "field" "field-bound?" "file" "flat-murec-contract" "flat-rec-contract" "for" "for*" "for*/and" "for*/async" "for*/first" "for*/fold" "for*/fold/derived" "for*/foldr" "for*/foldr/derived" "for*/hash" "for*/hasheq" "for*/hasheqv" "for*/last" "for*/list" "for*/lists" "for*/mutable-set" "for*/mutable-seteq" "for*/mutable-seteqv" "for*/or" "for*/product" "for*/set" "for*/seteq" "for*/seteqv" "for*/stream" "for*/sum" "for*/vector" "for*/weak-set" "for*/weak-seteq" "for*/weak-seteqv" "for-label" "for-meta" "for-space" "for-syntax" "for-template" "for/and" "for/async" "for/first" "for/fold" "for/fold/derived" "for/foldr" "for/foldr/derived" "for/hash" "for/hasheq" "for/hasheqv" "for/last" "for/list" "for/lists" "for/mutable-set" "for/mutable-seteq" "for/mutable-seteqv" "for/or" "for/product" "for/set" "for/seteq" "for/seteqv" "for/stream" "for/sum" "for/vector" "for/weak-set" "for/weak-seteq" "for/weak-seteqv" "gen:custom-write" "gen:dict" "gen:equal+hash" "gen:set" "gen:stream" "generic" "get-field" "hash/dc" "if" "implies" "import" "include" "include-at/relative-to" "include-at/relative-to/reader" "include/reader" "inherit" "inherit-field" "inherit/inner" "inherit/super" "init" "init-depend" "init-field" "init-rest" "inner" "inspect" "instantiate" "interface" "interface*" "invariant-assertion" "invoke-unit" "invoke-unit/infer" "lambda" "lazy" "let" "let*" "let*-values" "let-syntax" "let-syntaxes" "let-values" "let/cc" "let/ec" "letrec" "letrec-syntax" "letrec-syntaxes" "letrec-syntaxes+values" "letrec-values" "lib" "link" "local" "local-require" "log-debug" "log-error" "log-fatal" "log-info" "log-warning" "match" "match*" "match*/derived" "match-define" "match-define-values" "match-lambda" "match-lambda*" "match-lambda**" "match-let" "match-let*" "match-let*-values" "match-let-values" "match-letrec" "match-letrec-values" "match/derived" "match/values" "member-name-key" "mixin" "module" "module*" "module+" "nand" "new" "nor" "object-contract" "object/c" "only" "only-in" "only-meta-in" "only-space-in" "open" "opt/c" "or" "overment" "overment*" "override" "override*" "override-final" "override-final*" "parameterize" "parameterize*" "parameterize-break" "parametric->/c" "place" "place*" "place/context" "planet" "prefix" "prefix-in" "prefix-out" "private" "private*" "prompt-tag/c" "prop:dict/contract" "protect-out" "provide" "provide-signature-elements" "provide/contract" "public" "public*" "public-final" "public-final*" "pubment" "pubment*" "quasiquote" "quasisyntax" "quasisyntax/loc" "quote" "quote-syntax" "quote-syntax/prune" "recontract-out" "recursive-contract" "relative-in" "rename" "rename-in" "rename-inner" "rename-out" "rename-super" "require" "send" "send*" "send+" "send-generic" "send/apply" "send/keyword-apply" "set!" "set!-values" "set-field!" "shared" "stream" "stream*" "stream-cons" "stream-lazy" "struct" "struct*" "struct-copy" "struct-field-index" "struct-guard/c" "struct-out" "struct/c" "struct/contract" "struct/ctc" "struct/dc" "struct/derived" "submod" "super" "super-instantiate" "super-make-object" "super-new" "syntax" "syntax-case" "syntax-case*" "syntax-id-rules" "syntax-rules" "syntax/loc" "tag" "this" "this%" "thunk" "thunk*" "time" "unconstrained-domain->" "unit" "unit-from-context" "unit/c" "unit/new-import-export" "unit/s" "unless" "unquote" "unquote-splicing" "unsyntax" "unsyntax-splicing" "values/drop" "when" "with-continuation-mark" "with-contract" "with-contract-continuation-mark" "with-handlers" "with-handlers*" "with-method" "with-syntax" "~?" "~@" "λ" + (#match? @keyword + "^(unit-from-context|for/last|syntax-case|match-let\\*-values|define-for-syntax|define/subexpression-pos-prop|set-field!|class-field-accessor|invoke-unit|#%stratified-body|for\\*/and|for\\*/weak-set|flat-rec-contract|for\\*/stream|planet|for/mutable-seteqv|log-error|delay|#%declare|prop:dict/contract|->d|lib|override\\*|define-local-member-name|send-generic|for\\*/hasheq|define-syntax|submod|except|include-at/relative-to/reader|public\\*|define-member-name|define/public|let\\*|for/and|for\\*/first|for|delay/strict|define-values-for-export|==|match-define-values|for/weak-seteq|for\\*/async|for/stream|for/weak-seteqv|set!-values|lambda|for\\*/product|augment-final\\*|pubment\\*|command-line|contract|case|struct-field-index|contract-struct|unless|for/hasheq|for/seteqv|with-method|define-values-for-syntax|for-template|pubment|for\\*/list|syntax-case\\*|init-field|define-serializable-class|=>|for/foldr/derived|letrec-syntaxes|overment\\*|unquote-splicing|_|inherit-field|for\\*|stream-lazy|match-lambda\\*|contract-pos/neg-doubling|unit/c|match-define|for\\*/set|unit/s|nor|#%expression|class/c|this%|place/context|super-make-object|when|set!|parametric->/c|syntax-id-rules|include/reader|compound-unit|override-final|get-field|gen:dict|for\\*/seteqv|for\\*/hash|#%provide|combine-out|link|with-contract-continuation-mark|define-struct/derived|stream\\*|λ|rename-out|define-serializable-class\\*|augment|define/augment|let|define-signature-form|letrec-syntax|abstract|define-namespace-anchor|#%module-begin|#%top-interaction|for\\*/weak-seteqv|do|define/subexpression-pos-prop/name|absent|send/apply|with-handlers\\*|all-from-out|provide-signature-elements|gen:stream|define/override-final|for\\*/mutable-seteqv|rename|quasisyntax/loc|instantiate|for/list|extends|include-at/relative-to|mixin|define/pubment|#%plain-lambda|except-out|#%plain-module-begin|init|for\\*/last|relative-in|define-unit/new-import-export|->dm|member-name-key|nand|interface\\*|struct|define/override|else|define/augment-final|failure-cont|open|log-info|define/final-prop|all-defined-out|for/sum|for\\*/sum|recursive-contract|define|define-logger|match\\*|log-debug|rename-inner|->|struct/derived|unit|class\\*|prefix-out|any|define/overment|define-signature|match-letrec-values|let-syntaxes|for/mutable-set|define/match|cond|super-instantiate|define-contract-struct|import|hash/dc|define-custom-set-types|public-final|for/vector|for-label|prefix-in|for\\*/foldr/derived|define-unit-binding|object-contract|syntax-rules|augride|for\\*/mutable-seteq|quasisyntax|inner|for-syntax|overment|send/keyword-apply|generic|let\\*-values|->m|define-values|struct-copy|init-depend|struct/ctc|match-lambda|#%printing-module-begin|match\\*/derived|case->m|this|file|stream-cons|inspect|field|for/weak-set|struct\\*|gen:custom-write|thunk\\*|combine-in|unquote|for/lists|define/private|for\\*/foldr|define-unit/s|with-continuation-mark|begin|prefix|quote-syntax/prune|object/c|interface|match/derived|for/hasheqv|current-contract-region|define-compound-unit|override|define/public-final|recontract-out|let/cc|augride\\*|inherit|send|define-values/invoke-unit|for/mutable-seteq|#%datum|for/first|match-let\\*|invoke-unit/infer|define/contract|syntax/loc|for\\*/hasheqv|define-sequence-syntax|let/ec|for/product|for\\*/fold/derived|define-syntax-rule|lazy|unconstrained-domain->|augment-final|private|class|define-splicing-for-clause-syntax|for\\*/fold|prompt-tag/c|contract-out|match/values|public-final\\*|case-lambda|for/fold|unsyntax|for/set|begin0|#%require|time|public|define-struct|include|define-values/invoke-unit/infer|only-space-in|struct/c|only-meta-in|unit/new-import-export|place|begin-for-syntax|shared|inherit/super|quote|for/or|struct/contract|export|inherit/inner|struct-out|let-syntax|augment\\*|for\\*/vector|rename-in|match-let|define-unit|:do-in|~@|for\\*/weak-seteq|private\\*|and|except-in|log-fatal|gen:equal\\+hash|provide|require|thunk|invariant-assertion|define-match-expander|init-rest|->\\*|class/derived|super-new|for/fold/derived|for\\*/mutable-set|match-lambda\\*\\*|only|with-contract|~\\?|opt/c|let-values|delay/thread|->i|for/foldr|for-meta|only-in|send\\+|\\.\\.\\.|struct-guard/c|->\\*m|gen:set|struct/dc|define-syntaxes|if|parameterize|module\\*|module|send\\*|#%variable-reference|compound-unit/infer|#%plain-app|for/hash|contracted|case->|match|for\\*/lists|#%app|letrec-values|log-warning|super|define/augride|local-require|provide/contract|define-struct/contract|match-let-values|quote-syntax|for\\*/seteq|define-compound-unit/infer|parameterize\\*|values/drop|for/seteq|tag|stream|delay/idle|module\\+|define-custom-hash-types|cons/dc|define-module-boundary-contract|or|protect-out|define-opt/c|implies|letrec-syntaxes\\+values|for\\*/or|unsyntax-splicing|override-final\\*|for/async|parameterize-break|syntax|place\\*|for-space|quasiquote|with-handlers|delay/sync|define-unit-from-context|match-letrec|#%top|define-unit/contract|delay/name|new|field-bound\\?|letrec|class-field-mutator|with-syntax|flat-murec-contract|rename-super|local)$" )) -;; builtin procedures - -((symbol) @function.builtin - (#any-of? @function.builtin - "*" "*list/c" "+" "-" "/" "<" "" ">/c" ">=" ">=/c" "abort-current-continuation" "abs" "absolute-path?" "acos" "add-between" "add1" "alarm-evt" "and/c" "andmap" "angle" "any/c" "append" "append*" "append-map" "apply" "argmax" "argmin" "arithmetic-shift" "arity-at-least" "arity-at-least-value" "arity-at-least?" "arity-checking-wrapper" "arity-includes?" "arity=?" "arrow-contract-info" "arrow-contract-info-accepts-arglist" "arrow-contract-info-chaperone-procedure" "arrow-contract-info-check-first-order" "arrow-contract-info?" "asin" "assert-unreachable" "assf" "assoc" "assq" "assv" "atan" "bad-number-of-results" "banner" "base->-doms/c" "base->-rngs/c" "base->?" "between/c" "bitwise-and" "bitwise-bit-field" "bitwise-bit-set?" "bitwise-ior" "bitwise-not" "bitwise-xor" "blame-add-car-context" "blame-add-cdr-context" "blame-add-context" "blame-add-missing-party" "blame-add-nth-arg-context" "blame-add-range-context" "blame-add-unknown-context" "blame-context" "blame-contract" "blame-fmt->-string" "blame-missing-party?" "blame-negative" "blame-original?" "blame-positive" "blame-replace-negative" "blame-replaced-negative?" "blame-source" "blame-swap" "blame-swapped?" "blame-update" "blame-value" "blame?" "boolean=?" "boolean?" "bound-identifier=?" "box" "box-cas!" "box-immutable" "box-immutable/c" "box/c" "box?" "break-enabled" "break-parameterization?" "break-thread" "build-chaperone-contract-property" "build-compound-type-name" "build-contract-property" "build-flat-contract-property" "build-list" "build-path" "build-path/convention-type" "build-string" "build-vector" "byte-pregexp" "byte-pregexp?" "byte-ready?" "byte-regexp" "byte-regexp?" "byte?" "bytes" "bytes->immutable-bytes" "bytes->list" "bytes->path" "bytes->path-element" "bytes->string/latin-1" "bytes->string/locale" "bytes->string/utf-8" "bytes-append" "bytes-append*" "bytes-close-converter" "bytes-convert" "bytes-convert-end" "bytes-converter?" "bytes-copy" "bytes-copy!" "bytes-environment-variable-name?" "bytes-fill!" "bytes-join" "bytes-length" "bytes-no-nuls?" "bytes-open-converter" "bytes-ref" "bytes-set!" "bytes-utf-8-index" "bytes-utf-8-length" "bytes-utf-8-ref" "bytes?" "bytes?" "caaaar" "caaadr" "caaar" "caadar" "caaddr" "caadr" "caar" "cadaar" "cadadr" "cadar" "caddar" "cadddr" "caddr" "cadr" "call-in-continuation" "call-in-nested-thread" "call-with-atomic-output-file" "call-with-break-parameterization" "call-with-composable-continuation" "call-with-continuation-barrier" "call-with-continuation-prompt" "call-with-current-continuation" "call-with-default-reading-parameterization" "call-with-escape-continuation" "call-with-exception-handler" "call-with-file-lock/timeout" "call-with-immediate-continuation-mark" "call-with-input-bytes" "call-with-input-file" "call-with-input-file*" "call-with-input-string" "call-with-output-bytes" "call-with-output-file" "call-with-output-file*" "call-with-output-string" "call-with-parameterization" "call-with-semaphore" "call-with-semaphore/enable-break" "call-with-values" "call/cc" "call/ec" "car" "cartesian-product" "cdaaar" "cdaadr" "cdaar" "cdadar" "cdaddr" "cdadr" "cdar" "cddaar" "cddadr" "cddar" "cdddar" "cddddr" "cdddr" "cddr" "cdr" "ceiling" "channel-get" "channel-put" "channel-put-evt" "channel-put-evt?" "channel-try-get" "channel/c" "channel?" "chaperone-box" "chaperone-channel" "chaperone-continuation-mark-key" "chaperone-contract-property?" "chaperone-contract?" "chaperone-evt" "chaperone-hash" "chaperone-hash-set" "chaperone-of?" "chaperone-procedure" "chaperone-procedure*" "chaperone-prompt-tag" "chaperone-struct" "chaperone-struct-type" "chaperone-vector" "chaperone-vector*" "chaperone?" "char->integer" "char-alphabetic?" "char-blank?" "char-ci<=?" "char-ci=?" "char-ci>?" "char-downcase" "char-foldcase" "char-general-category" "char-graphic?" "char-in" "char-in/c" "char-iso-control?" "char-lower-case?" "char-numeric?" "char-punctuation?" "char-ready?" "char-symbolic?" "char-title-case?" "char-titlecase" "char-upcase" "char-upper-case?" "char-utf-8-length" "char-whitespace?" "char<=?" "char=?" "char>?" "char?" "check-duplicate-identifier" "check-duplicates" "checked-procedure-check-and-extract" "choice-evt" "class->interface" "class-info" "class-seal" "class-unseal" "class?" "cleanse-path" "close-input-port" "close-output-port" "coerce-chaperone-contract" "coerce-chaperone-contracts" "coerce-contract" "coerce-contract/f" "coerce-contracts" "coerce-flat-contract" "coerce-flat-contracts" "collect-garbage" "collection-file-path" "collection-path" "combinations" "combine-output" "compile" "compile-allow-set!-undefined" "compile-context-preservation-enabled" "compile-enforce-module-constants" "compile-syntax" "compile-target-machine?" "compiled-expression-recompile" "compiled-expression?" "compiled-module-expression?" "complete-path?" "complex?" "compose" "compose1" "conjoin" "conjugate" "cons" "cons/c" "cons?" "const" "continuation-mark-key/c" "continuation-mark-key?" "continuation-mark-set->context" "continuation-mark-set->iterator" "continuation-mark-set->list" "continuation-mark-set->list*" "continuation-mark-set-first" "continuation-mark-set?" "continuation-marks" "continuation-prompt-available?" "continuation-prompt-tag?" "continuation?" "contract-custom-write-property-proc" "contract-equivalent?" "contract-exercise" "contract-first-order" "contract-first-order-passes?" "contract-late-neg-projection" "contract-name" "contract-proc" "contract-projection" "contract-property?" "contract-random-generate" "contract-random-generate-env?" "contract-random-generate-fail?" "contract-random-generate-get-current-environment" "contract-random-generate-stash" "contract-random-generate/choose" "contract-stronger?" "contract-struct-exercise" "contract-struct-generate" "contract-struct-late-neg-projection" "contract-struct-list-contract?" "contract-val-first-projection" "contract?" "convert-stream" "copy-directory/files" "copy-file" "copy-port" "cos" "cosh" "count" "current-blame-format" "current-break-parameterization" "current-code-inspector" "current-command-line-arguments" "current-compile" "current-compile-realm" "current-compile-target-machine" "current-compiled-file-roots" "current-continuation-marks" "current-custodian" "current-directory" "current-directory-for-user" "current-drive" "current-environment-variables" "current-error-message-adjuster" "current-error-port" "current-eval" "current-evt-pseudo-random-generator" "current-force-delete-permissions" "current-future" "current-gc-milliseconds" "current-get-interaction-evt" "current-get-interaction-input-port" "current-inexact-milliseconds" "current-inexact-monotonic-milliseconds" "current-input-port" "current-inspector" "current-library-collection-links" "current-library-collection-paths" "current-load" "current-load-extension" "current-load-relative-directory" "current-load/use-compiled" "current-locale" "current-logger" "current-memory-use" "current-milliseconds" "current-module-declare-name" "current-module-declare-source" "current-module-name-resolver" "current-module-path-for-load" "current-namespace" "current-output-port" "current-parameterization" "current-plumber" "current-preserved-thread-cell-values" "current-print" "current-process-milliseconds" "current-prompt-read" "current-pseudo-random-generator" "current-read-interaction" "current-reader-guard" "current-readtable" "current-seconds" "current-security-guard" "current-subprocess-custodian-mode" "current-subprocess-keep-file-descriptors" "current-thread" "current-thread-group" "current-thread-initial-stack-size" "current-write-relative-directory" "curry" "curryr" "custodian-box-value" "custodian-box?" "custodian-limit-memory" "custodian-managed-list" "custodian-memory-accounting-available?" "custodian-require-memory" "custodian-shut-down?" "custodian-shutdown-all" "custodian?" "custom-print-quotable-accessor" "custom-print-quotable?" "custom-write-accessor" "custom-write-property-proc" "custom-write?" "date" "date*" "date*-nanosecond" "date*-time-zone-name" "date*?" "date-day" "date-dst?" "date-hour" "date-minute" "date-month" "date-second" "date-time-zone-offset" "date-week-day" "date-year" "date-year-day" "date?" "datum->syntax" "datum-intern-literal" "default-continuation-prompt-tag" "degrees->radians" "delete-directory" "delete-directory/files" "delete-file" "denominator" "dict->list" "dict-can-functional-set?" "dict-can-remove-keys?" "dict-clear" "dict-clear!" "dict-copy" "dict-count" "dict-empty?" "dict-for-each" "dict-has-key?" "dict-implements/c" "dict-implements?" "dict-iter-contract" "dict-iterate-first" "dict-iterate-key" "dict-iterate-next" "dict-iterate-value" "dict-key-contract" "dict-keys" "dict-map" "dict-mutable?" "dict-ref" "dict-ref!" "dict-remove" "dict-remove!" "dict-set" "dict-set!" "dict-set*" "dict-set*!" "dict-update" "dict-update!" "dict-value-contract" "dict-values" "dict?" "directory-exists?" "directory-list" "disjoin" "display" "display-lines" "display-lines-to-file" "display-to-file" "displayln" "double-flonum?" "drop" "drop-common-prefix" "drop-right" "dropf" "dropf-right" "dump-memory-stats" "dup-input-port" "dup-output-port" "dynamic->*" "dynamic-get-field" "dynamic-object/c" "dynamic-place" "dynamic-place*" "dynamic-require" "dynamic-require-for-syntax" "dynamic-send" "dynamic-set-field!" "dynamic-wind" "eighth" "empty?" "environment-variables-copy" "environment-variables-names" "environment-variables-ref" "environment-variables-set!" "environment-variables?" "eof-evt" "eof-object?" "ephemeron-value" "ephemeron?" "eprintf" "eq-contract-val" "eq-contract?" "eq-hash-code" "eq?" "equal-contract-val" "equal-contract?" "equal-hash-code" "equal-secondary-hash-code" "equal?" "equal?/recur" "eqv-hash-code" "eqv?" "error" "error-contract->adjusted-string" "error-display-handler" "error-escape-handler" "error-message->adjusted-string" "error-print-context-length" "error-print-source-location" "error-print-width" "error-syntax->string-handler" "error-value->string-handler" "eval" "eval-jit-enabled" "eval-syntax" "even?" "evt/c" "evt?" "exact->inexact" "exact-ceiling" "exact-floor" "exact-integer?" "exact-nonnegative-integer?" "exact-positive-integer?" "exact-round" "exact-truncate" "exact?" "executable-yield-handler" "exit" "exit-handler" "exn" "exn-continuation-marks" "exn-message" "exn:break" "exn:break-continuation" "exn:break:hang-up" "exn:break:hang-up?" "exn:break:terminate" "exn:break:terminate?" "exn:break?" "exn:fail" "exn:fail:contract" "exn:fail:contract:arity" "exn:fail:contract:arity?" "exn:fail:contract:blame" "exn:fail:contract:blame-object" "exn:fail:contract:blame?" "exn:fail:contract:continuation" "exn:fail:contract:continuation?" "exn:fail:contract:divide-by-zero" "exn:fail:contract:divide-by-zero?" "exn:fail:contract:non-fixnum-result" "exn:fail:contract:non-fixnum-result?" "exn:fail:contract:variable" "exn:fail:contract:variable-id" "exn:fail:contract:variable?" "exn:fail:contract?" "exn:fail:filesystem" "exn:fail:filesystem:errno" "exn:fail:filesystem:errno-errno" "exn:fail:filesystem:errno?" "exn:fail:filesystem:exists" "exn:fail:filesystem:exists?" "exn:fail:filesystem:missing-module" "exn:fail:filesystem:missing-module-path" "exn:fail:filesystem:missing-module?" "exn:fail:filesystem:version" "exn:fail:filesystem:version?" "exn:fail:filesystem?" "exn:fail:network" "exn:fail:network:errno" "exn:fail:network:errno-errno" "exn:fail:network:errno?" "exn:fail:network?" "exn:fail:object" "exn:fail:object?" "exn:fail:out-of-memory" "exn:fail:out-of-memory?" "exn:fail:read" "exn:fail:read-srclocs" "exn:fail:read:eof" "exn:fail:read:eof?" "exn:fail:read:non-char" "exn:fail:read:non-char?" "exn:fail:read?" "exn:fail:syntax" "exn:fail:syntax-exprs" "exn:fail:syntax:missing-module" "exn:fail:syntax:missing-module-path" "exn:fail:syntax:missing-module?" "exn:fail:syntax:unbound" "exn:fail:syntax:unbound?" "exn:fail:syntax?" "exn:fail:unsupported" "exn:fail:unsupported?" "exn:fail:user" "exn:fail:user?" "exn:fail?" "exn:misc:match?" "exn:missing-module-accessor" "exn:missing-module?" "exn:srclocs-accessor" "exn:srclocs?" "exn?" "exp" "expand" "expand-once" "expand-syntax" "expand-syntax-once" "expand-syntax-to-top-form" "expand-to-top-form" "expand-user-path" "explode-path" "expt" "false?" "field-names" "fifth" "file->bytes" "file->bytes-lines" "file->lines" "file->list" "file->string" "file->value" "file-exists?" "file-name-from-path" "file-or-directory-identity" "file-or-directory-modify-seconds" "file-or-directory-permissions" "file-or-directory-stat" "file-or-directory-type" "file-position" "file-position*" "file-size" "file-stream-buffer-mode" "file-stream-port?" "file-truncate" "filename-extension" "filesystem-change-evt" "filesystem-change-evt-cancel" "filesystem-change-evt?" "filesystem-root-list" "filter" "filter-map" "filter-not" "filter-read-input-port" "find-compiled-file-roots" "find-executable-path" "find-files" "find-library-collection-links" "find-library-collection-paths" "find-relative-path" "find-system-path" "findf" "first" "first-or/c" "fixnum?" "flat-contract" "flat-contract-predicate" "flat-contract-property?" "flat-contract-with-explanation" "flat-contract?" "flat-named-contract" "flatten" "floating-point-bytes->real" "flonum?" "floor" "flush-output" "fold-files" "foldl" "foldr" "for-each" "force" "format" "fourth" "fprintf" "free-identifier=?" "free-label-identifier=?" "free-template-identifier=?" "free-transformer-identifier=?" "fsemaphore-count" "fsemaphore-post" "fsemaphore-try-wait?" "fsemaphore-wait" "fsemaphore?" "future" "future?" "futures-enabled?" "gcd" "generate-member-key" "generate-temporaries" "generic-set?" "generic?" "gensym" "get-output-bytes" "get-output-string" "get-preference" "get/build-late-neg-projection" "get/build-val-first-projection" "getenv" "global-port-print-handler" "group-by" "guard-evt" "handle-evt" "handle-evt?" "has-blame?" "has-contract?" "hash" "hash->list" "hash-clear" "hash-clear!" "hash-copy" "hash-copy-clear" "hash-count" "hash-empty?" "hash-ephemeron?" "hash-eq?" "hash-equal?" "hash-eqv?" "hash-for-each" "hash-has-key?" "hash-iterate-first" "hash-iterate-key" "hash-iterate-key+value" "hash-iterate-next" "hash-iterate-pair" "hash-iterate-value" "hash-keys" "hash-keys-subset?" "hash-map" "hash-placeholder?" "hash-ref" "hash-ref!" "hash-ref-key" "hash-remove" "hash-remove!" "hash-set" "hash-set!" "hash-set*" "hash-set*!" "hash-strong?" "hash-update" "hash-update!" "hash-values" "hash-weak?" "hash/c" "hash?" "hasheq" "hasheqv" "identifier-binding" "identifier-binding-portal-syntax" "identifier-binding-symbol" "identifier-distinct-binding" "identifier-label-binding" "identifier-prune-lexical-context" "identifier-prune-to-source-module" "identifier-remove-from-definition-context" "identifier-template-binding" "identifier-transformer-binding" "identifier?" "identity" "if/c" "imag-part" "immutable?" "impersonate-box" "impersonate-channel" "impersonate-continuation-mark-key" "impersonate-hash" "impersonate-hash-set" "impersonate-procedure" "impersonate-procedure*" "impersonate-prompt-tag" "impersonate-struct" "impersonate-vector" "impersonate-vector*" "impersonator-contract?" "impersonator-ephemeron" "impersonator-of?" "impersonator-property-accessor-procedure?" "impersonator-property?" "impersonator?" "implementation?" "implementation?/c" "in-bytes" "in-bytes-lines" "in-combinations" "in-cycle" "in-dict" "in-dict-keys" "in-dict-pairs" "in-dict-values" "in-directory" "in-ephemeron-hash" "in-ephemeron-hash-keys" "in-ephemeron-hash-pairs" "in-ephemeron-hash-values" "in-hash" "in-hash-keys" "in-hash-pairs" "in-hash-values" "in-immutable-hash" "in-immutable-hash-keys" "in-immutable-hash-pairs" "in-immutable-hash-values" "in-immutable-set" "in-inclusive-range" "in-indexed" "in-input-port-bytes" "in-input-port-chars" "in-lines" "in-list" "in-mlist" "in-mutable-hash" "in-mutable-hash-keys" "in-mutable-hash-pairs" "in-mutable-hash-values" "in-mutable-set" "in-naturals" "in-parallel" "in-permutations" "in-port" "in-producer" "in-range" "in-sequences" "in-set" "in-slice" "in-stream" "in-string" "in-syntax" "in-value" "in-values*-sequence" "in-values-sequence" "in-vector" "in-weak-hash" "in-weak-hash-keys" "in-weak-hash-pairs" "in-weak-hash-values" "in-weak-set" "inclusive-range" "index-of" "index-where" "indexes-of" "indexes-where" "inexact->exact" "inexact-real?" "inexact?" "infinite?" "input-port-append" "input-port?" "inspector-superior?" "inspector?" "instanceof/c" "integer->char" "integer->integer-bytes" "integer-bytes->integer" "integer-in" "integer-length" "integer-sqrt" "integer-sqrt/remainder" "integer?" "interface->method-names" "interface-extension?" "interface?" "internal-definition-context-add-scopes" "internal-definition-context-binding-identifiers" "internal-definition-context-introduce" "internal-definition-context-seal" "internal-definition-context-splice-binding-identifier" "internal-definition-context?" "is-a?" "is-a?/c" "keyword->string" "keyword-apply" "keyword-apply/dict" "keywordbytes" "list->mutable-set" "list->mutable-seteq" "list->mutable-seteqv" "list->set" "list->seteq" "list->seteqv" "list->string" "list->vector" "list->weak-set" "list->weak-seteq" "list->weak-seteqv" "list-contract?" "list-prefix?" "list-ref" "list-set" "list-tail" "list-update" "list/c" "list?" "listen-port-number?" "listof" "load" "load-extension" "load-on-demand-enabled" "load-relative" "load-relative-extension" "load/cd" "load/use-compiled" "local-expand" "local-expand/capture-lifts" "local-transformer-expand" "local-transformer-expand/capture-lifts" "locale-string-encoding" "log" "log-all-levels" "log-level-evt" "log-level?" "log-max-level" "log-message" "log-receiver?" "logger-name" "logger?" "magnitude" "make-arity-at-least" "make-base-empty-namespace" "make-base-namespace" "make-bytes" "make-channel" "make-chaperone-contract" "make-continuation-mark-key" "make-continuation-prompt-tag" "make-contract" "make-custodian" "make-custodian-box" "make-custom-hash" "make-custom-hash-types" "make-custom-set" "make-custom-set-types" "make-date" "make-date*" "make-derived-parameter" "make-directory" "make-directory*" "make-do-sequence" "make-empty-namespace" "make-environment-variables" "make-ephemeron" "make-ephemeron-hash" "make-ephemeron-hasheq" "make-ephemeron-hasheqv" "make-exn" "make-exn:break" "make-exn:break:hang-up" "make-exn:break:terminate" "make-exn:fail" "make-exn:fail:contract" "make-exn:fail:contract:arity" "make-exn:fail:contract:blame" "make-exn:fail:contract:continuation" "make-exn:fail:contract:divide-by-zero" "make-exn:fail:contract:non-fixnum-result" "make-exn:fail:contract:variable" "make-exn:fail:filesystem" "make-exn:fail:filesystem:errno" "make-exn:fail:filesystem:exists" "make-exn:fail:filesystem:missing-module" "make-exn:fail:filesystem:version" "make-exn:fail:network" "make-exn:fail:network:errno" "make-exn:fail:object" "make-exn:fail:out-of-memory" "make-exn:fail:read" "make-exn:fail:read:eof" "make-exn:fail:read:non-char" "make-exn:fail:syntax" "make-exn:fail:syntax:missing-module" "make-exn:fail:syntax:unbound" "make-exn:fail:unsupported" "make-exn:fail:user" "make-file-or-directory-link" "make-flat-contract" "make-fsemaphore" "make-generic" "make-handle-get-preference-locked" "make-hash" "make-hash-placeholder" "make-hasheq" "make-hasheq-placeholder" "make-hasheqv" "make-hasheqv-placeholder" "make-immutable-custom-hash" "make-immutable-hash" "make-immutable-hasheq" "make-immutable-hasheqv" "make-impersonator-property" "make-input-port" "make-input-port/read-to-peek" "make-inspector" "make-interned-syntax-introducer" "make-keyword-procedure" "make-known-char-range-list" "make-limited-input-port" "make-list" "make-lock-file-name" "make-log-receiver" "make-logger" "make-mixin-contract" "make-mutable-custom-set" "make-none/c" "make-object" "make-output-port" "make-parameter" "make-parent-directory*" "make-phantom-bytes" "make-pipe" "make-pipe-with-specials" "make-placeholder" "make-plumber" "make-polar" "make-portal-syntax" "make-prefab-struct" "make-primitive-class" "make-proj-contract" "make-pseudo-random-generator" "make-reader-graph" "make-readtable" "make-rectangular" "make-rename-transformer" "make-resolved-module-path" "make-security-guard" "make-semaphore" "make-set!-transformer" "make-shared-bytes" "make-sibling-inspector" "make-special-comment" "make-srcloc" "make-string" "make-struct-field-accessor" "make-struct-field-mutator" "make-struct-type" "make-struct-type-property" "make-syntax-delta-introducer" "make-syntax-introducer" "make-temporary-directory" "make-temporary-directory*" "make-temporary-file" "make-temporary-file*" "make-tentative-pretty-print-output-port" "make-thread-cell" "make-thread-group" "make-vector" "make-weak-box" "make-weak-custom-hash" "make-weak-custom-set" "make-weak-hash" "make-weak-hasheq" "make-weak-hasheqv" "make-will-executor" "map" "match-equality-test" "matches-arity-exactly?" "max" "mcar" "mcdr" "mcons" "member" "member-name-key-hash-code" "member-name-key=?" "member-name-key?" "memf" "memory-order-acquire" "memory-order-release" "memq" "memv" "merge-input" "method-in-interface?" "min" "module->exports" "module->imports" "module->indirect-exports" "module->language-info" "module->namespace" "module->realm" "module-compiled-cross-phase-persistent?" "module-compiled-exports" "module-compiled-imports" "module-compiled-indirect-exports" "module-compiled-language-info" "module-compiled-name" "module-compiled-realm" "module-compiled-submodules" "module-declared?" "module-path-index-join" "module-path-index-resolve" "module-path-index-split" "module-path-index-submodule" "module-path-index?" "module-path?" "module-predefined?" "module-provide-protected?" "modulo" "mpair?" "mutable-set" "mutable-seteq" "mutable-seteqv" "n->th" "nack-guard-evt" "namespace-anchor->empty-namespace" "namespace-anchor->namespace" "namespace-anchor?" "namespace-attach-module" "namespace-attach-module-declaration" "namespace-base-phase" "namespace-call-with-registry-lock" "namespace-mapped-symbols" "namespace-module-identifier" "namespace-module-registry" "namespace-require" "namespace-require/constant" "namespace-require/copy" "namespace-require/expansion-time" "namespace-set-variable-value!" "namespace-symbol->identifier" "namespace-syntax-introduce" "namespace-undefine-variable!" "namespace-unprotect-module" "namespace-variable-value" "namespace?" "nan?" "natural-number/c" "natural?" "negate" "negative-integer?" "negative?" "new-∀/c" "new-∃/c" "newline" "ninth" "non-empty-listof" "non-empty-string?" "none/c" "nonnegative-integer?" "nonpositive-integer?" "normal-case-path" "normalize-arity" "normalize-path" "normalized-arity?" "not" "not/c" "null?" "number->string" "number?" "numerator" "object->vector" "object-info" "object-interface" "object-method-arity-includes?" "object-name" "object-or-false=?" "object=-hash-code" "object=?" "object?" "odd?" "one-of/c" "open-input-bytes" "open-input-file" "open-input-output-file" "open-input-string" "open-output-bytes" "open-output-file" "open-output-nowhere" "open-output-string" "or/c" "order-of-magnitude" "ormap" "output-port?" "pair?" "parameter-procedure=?" "parameter/c" "parameter?" "parameterization?" "parse-command-line" "partition" "path->bytes" "path->complete-path" "path->directory-path" "path->string" "path-add-extension" "path-add-suffix" "path-convention-type" "path-element->bytes" "path-element->string" "path-element?" "path-for-some-system?" "path-get-extension" "path-has-extension?" "path-list-string->path-list" "path-only" "path-replace-extension" "path-replace-suffix" "path-string?" "pathbytes" "port->bytes-lines" "port->lines" "port->list" "port->string" "port-closed-evt" "port-closed?" "port-commit-peeked" "port-count-lines!" "port-count-lines-enabled" "port-counts-lines?" "port-display-handler" "port-file-identity" "port-file-unlock" "port-next-location" "port-number?" "port-print-handler" "port-progress-evt" "port-provides-progress-evts?" "port-read-handler" "port-try-file-lock?" "port-waiting-peer?" "port-write-handler" "port-writes-atomic?" "port-writes-special?" "port?" "portal-syntax-content" "portal-syntax?" "positive-integer?" "positive?" "prefab-key->struct-type" "prefab-key?" "prefab-struct-key" "preferences-lock-file-mode" "pregexp" "pregexp?" "pretty-display" "pretty-format" "pretty-print" "pretty-print-.-symbol-without-bars" "pretty-print-abbreviate-read-macros" "pretty-print-columns" "pretty-print-current-style-table" "pretty-print-depth" "pretty-print-exact-as-decimal" "pretty-print-extend-style-table" "pretty-print-handler" "pretty-print-newline" "pretty-print-post-print-hook" "pretty-print-pre-print-hook" "pretty-print-print-hook" "pretty-print-print-line" "pretty-print-remap-stylable" "pretty-print-show-inexactness" "pretty-print-size-hook" "pretty-print-style-table?" "pretty-printing" "pretty-write" "primitive-closure?" "primitive-result-arity" "primitive?" "print" "print-as-expression" "print-boolean-long-form" "print-box" "print-graph" "print-hash-table" "print-mpair-curly-braces" "print-pair-curly-braces" "print-reader-abbreviations" "print-struct" "print-syntax-width" "print-unreadable" "print-value-columns" "print-vector-length" "printable/c" "printf" "println" "procedure->method" "procedure-arity" "procedure-arity-includes/c" "procedure-arity-includes?" "procedure-arity-mask" "procedure-arity?" "procedure-closure-contents-eq?" "procedure-extract-target" "procedure-impersonator*?" "procedure-keywords" "procedure-realm" "procedure-reduce-arity" "procedure-reduce-arity-mask" "procedure-reduce-keyword-arity" "procedure-reduce-keyword-arity-mask" "procedure-rename" "procedure-result-arity" "procedure-specialize" "procedure-struct-type?" "procedure?" "process" "process*" "process*/ports" "process/ports" "processor-count" "progress-evt?" "promise-forced?" "promise-running?" "promise/c" "promise/name?" "promise?" "prop:arrow-contract-get-info" "prop:arrow-contract?" "prop:orc-contract-get-subcontracts" "prop:orc-contract?" "prop:recursive-contract-unroll" "prop:recursive-contract?" "proper-subset?" "property/c" "pseudo-random-generator->vector" "pseudo-random-generator-vector?" "pseudo-random-generator?" "put-preferences" "putenv" "quotient" "quotient/remainder" "radians->degrees" "raise" "raise-argument-error" "raise-argument-error*" "raise-arguments-error" "raise-arguments-error*" "raise-arity-error" "raise-arity-error*" "raise-arity-mask-error" "raise-arity-mask-error*" "raise-blame-error" "raise-contract-error" "raise-mismatch-error" "raise-not-cons-blame-error" "raise-range-error" "raise-range-error*" "raise-result-arity-error" "raise-result-arity-error*" "raise-result-error" "raise-result-error*" "raise-syntax-error" "raise-type-error" "raise-user-error" "random" "random-seed" "range" "rational?" "rationalize" "read" "read-accept-bar-quote" "read-accept-box" "read-accept-compiled" "read-accept-dot" "read-accept-graph" "read-accept-infix-dot" "read-accept-lang" "read-accept-quasiquote" "read-accept-reader" "read-byte" "read-byte-or-special" "read-bytes" "read-bytes!" "read-bytes!-evt" "read-bytes-avail!" "read-bytes-avail!*" "read-bytes-avail!-evt" "read-bytes-avail!/enable-break" "read-bytes-evt" "read-bytes-line" "read-bytes-line-evt" "read-case-sensitive" "read-cdot" "read-char" "read-char-or-special" "read-curly-brace-as-paren" "read-curly-brace-with-tag" "read-decimal-as-inexact" "read-eval-print-loop" "read-installation-configuration-table" "read-language" "read-line" "read-line-evt" "read-on-demand-source" "read-single-flonum" "read-square-bracket-as-paren" "read-square-bracket-with-tag" "read-string" "read-string!" "read-string!-evt" "read-string-evt" "read-syntax" "read-syntax-accept-graph" "read-syntax/recursive" "read/recursive" "readtable-mapping" "readtable?" "real->decimal-string" "real->double-flonum" "real->floating-point-bytes" "real->single-flonum" "real-in" "real-part" "real?" "reencode-input-port" "reencode-output-port" "regexp" "regexp-match" "regexp-match*" "regexp-match-evt" "regexp-match-exact?" "regexp-match-peek" "regexp-match-peek-immediate" "regexp-match-peek-positions" "regexp-match-peek-positions*" "regexp-match-peek-positions-immediate" "regexp-match-peek-positions-immediate/end" "regexp-match-peek-positions/end" "regexp-match-positions" "regexp-match-positions*" "regexp-match-positions/end" "regexp-match/end" "regexp-match?" "regexp-max-lookbehind" "regexp-quote" "regexp-replace" "regexp-replace*" "regexp-replace-quote" "regexp-replaces" "regexp-split" "regexp-try-match" "regexp?" "relative-path?" "relocate-input-port" "relocate-output-port" "remainder" "remf" "remf*" "remove" "remove*" "remove-duplicates" "remq" "remq*" "remv" "remv*" "rename-contract" "rename-file-or-directory" "rename-transformer-target" "rename-transformer?" "replace-evt" "reroot-path" "resolve-path" "resolved-module-path-name" "resolved-module-path?" "rest" "reverse" "round" "second" "seconds->date" "security-guard?" "semaphore-peek-evt" "semaphore-peek-evt?" "semaphore-post" "semaphore-try-wait?" "semaphore-wait" "semaphore-wait/enable-break" "semaphore?" "sequence->list" "sequence->stream" "sequence-add-between" "sequence-andmap" "sequence-append" "sequence-count" "sequence-filter" "sequence-fold" "sequence-for-each" "sequence-generate" "sequence-generate*" "sequence-length" "sequence-map" "sequence-ormap" "sequence-ref" "sequence-tail" "sequence/c" "sequence?" "set" "set!-transformer-procedure" "set!-transformer?" "set->list" "set->stream" "set-add" "set-add!" "set-box!" "set-box*!" "set-clear" "set-clear!" "set-copy" "set-copy-clear" "set-count" "set-empty?" "set-eq?" "set-equal?" "set-eqv?" "set-first" "set-for-each" "set-implements/c" "set-implements?" "set-intersect" "set-intersect!" "set-map" "set-mcar!" "set-mcdr!" "set-member?" "set-mutable?" "set-phantom-bytes!" "set-port-next-location!" "set-remove" "set-remove!" "set-rest" "set-subtract" "set-subtract!" "set-symmetric-difference" "set-symmetric-difference!" "set-union" "set-union!" "set-weak?" "set/c" "set=?" "set?" "seteq" "seteqv" "seventh" "sgn" "sha1-bytes" "sha224-bytes" "sha256-bytes" "shared-bytes" "shell-execute" "shrink-path-wrt" "shuffle" "simple-form-path" "simplify-path" "sin" "single-flonum-available?" "single-flonum?" "sinh" "sixth" "skip-projection-wrapper?" "sleep" "some-system-path->string" "sort" "special-comment-value" "special-comment?" "special-filter-input-port" "split-at" "split-at-right" "split-common-prefix" "split-path" "splitf-at" "splitf-at-right" "sqr" "sqrt" "srcloc" "srcloc->string" "srcloc-column" "srcloc-line" "srcloc-position" "srcloc-source" "srcloc-span" "srcloc?" "stop-after" "stop-before" "stream->list" "stream-add-between" "stream-andmap" "stream-append" "stream-count" "stream-empty?" "stream-filter" "stream-first" "stream-fold" "stream-for-each" "stream-force" "stream-length" "stream-map" "stream-ormap" "stream-ref" "stream-rest" "stream-tail" "stream-take" "stream/c" "stream?" "string" "string->bytes/latin-1" "string->bytes/locale" "string->bytes/utf-8" "string->immutable-string" "string->keyword" "string->list" "string->number" "string->path" "string->path-element" "string->some-system-path" "string->symbol" "string->uninterned-symbol" "string->unreadable-symbol" "string-append" "string-append*" "string-append-immutable" "string-ci<=?" "string-ci=?" "string-ci>?" "string-contains?" "string-copy" "string-copy!" "string-downcase" "string-environment-variable-name?" "string-fill!" "string-foldcase" "string-join" "string-len/c" "string-length" "string-locale-ci?" "string-locale-downcase" "string-locale-upcase" "string-locale?" "string-no-nuls?" "string-normalize-nfc" "string-normalize-nfd" "string-normalize-nfkc" "string-normalize-nfkd" "string-normalize-spaces" "string-port?" "string-prefix?" "string-ref" "string-replace" "string-set!" "string-split" "string-suffix?" "string-titlecase" "string-trim" "string-upcase" "string-utf-8-length" "string<=?" "string=?" "string>?" "string?" "struct->vector" "struct-accessor-procedure?" "struct-constructor-procedure?" "struct-info" "struct-mutator-procedure?" "struct-predicate-procedure?" "struct-type-authentic?" "struct-type-info" "struct-type-make-constructor" "struct-type-make-predicate" "struct-type-property-accessor-procedure?" "struct-type-property-predicate-procedure?" "struct-type-property/c" "struct-type-property?" "struct-type-sealed?" "struct-type?" "struct?" "sub1" "subbytes" "subclass?" "subclass?/c" "subprocess" "subprocess-group-enabled" "subprocess-kill" "subprocess-pid" "subprocess-status" "subprocess-wait" "subprocess?" "subset?" "substring" "suggest/c" "symbol->string" "symbol-interned?" "symbol-unreadable?" "symboldatum" "syntax->list" "syntax-arm" "syntax-binding-set" "syntax-binding-set->syntax" "syntax-binding-set-extend" "syntax-binding-set?" "syntax-column" "syntax-debug-info" "syntax-deserialize" "syntax-disarm" "syntax-e" "syntax-line" "syntax-local-apply-transformer" "syntax-local-bind-syntaxes" "syntax-local-certifier" "syntax-local-context" "syntax-local-expand-expression" "syntax-local-get-shadower" "syntax-local-identifier-as-binding" "syntax-local-introduce" "syntax-local-lift-context" "syntax-local-lift-expression" "syntax-local-lift-module" "syntax-local-lift-module-end-declaration" "syntax-local-lift-provide" "syntax-local-lift-require" "syntax-local-lift-values-expression" "syntax-local-make-definition-context" "syntax-local-make-delta-introducer" "syntax-local-module-defined-identifiers" "syntax-local-module-exports" "syntax-local-module-interned-scope-symbols" "syntax-local-module-required-identifiers" "syntax-local-name" "syntax-local-phase-level" "syntax-local-submodules" "syntax-local-transforming-module-provides?" "syntax-local-value" "syntax-local-value/immediate" "syntax-original?" "syntax-position" "syntax-property" "syntax-property-preserved?" "syntax-property-remove" "syntax-property-symbol-keys" "syntax-protect" "syntax-rearm" "syntax-recertify" "syntax-serialize" "syntax-shift-phase-level" "syntax-source" "syntax-source-module" "syntax-span" "syntax-taint" "syntax-tainted?" "syntax-track-origin" "syntax-transforming-module-expression?" "syntax-transforming-with-lifts?" "syntax-transforming?" "syntax/c" "syntax?" "system" "system*" "system*/exit-code" "system-big-endian?" "system-idle-evt" "system-language+country" "system-library-subpath" "system-path-convention-type" "system-type" "system/exit-code" "tail-marks-match?" "take" "take-common-prefix" "take-right" "takef" "takef-right" "tan" "tanh" "tcp-abandon-port" "tcp-accept" "tcp-accept-evt" "tcp-accept-ready?" "tcp-accept/enable-break" "tcp-addresses" "tcp-close" "tcp-connect" "tcp-connect/enable-break" "tcp-listen" "tcp-listener?" "tcp-port?" "tentative-pretty-print-port-cancel" "tentative-pretty-print-port-transfer" "tenth" "terminal-port?" "third" "thread" "thread-cell-ref" "thread-cell-set!" "thread-cell-values?" "thread-cell?" "thread-dead-evt" "thread-dead?" "thread-group?" "thread-receive" "thread-receive-evt" "thread-resume" "thread-resume-evt" "thread-rewind-receive" "thread-running?" "thread-send" "thread-suspend" "thread-suspend-evt" "thread-try-receive" "thread-wait" "thread/suspend-to-kill" "thread?" "time-apply" "touch" "transplant-input-port" "transplant-output-port" "truncate" "udp-addresses" "udp-bind!" "udp-bound?" "udp-close" "udp-connect!" "udp-connected?" "udp-multicast-interface" "udp-multicast-join-group!" "udp-multicast-leave-group!" "udp-multicast-loopback?" "udp-multicast-set-interface!" "udp-multicast-set-loopback!" "udp-multicast-set-ttl!" "udp-multicast-ttl" "udp-open-socket" "udp-receive!" "udp-receive!*" "udp-receive!-evt" "udp-receive!/enable-break" "udp-receive-ready-evt" "udp-send" "udp-send*" "udp-send-evt" "udp-send-ready-evt" "udp-send-to" "udp-send-to*" "udp-send-to-evt" "udp-send-to/enable-break" "udp-send/enable-break" "udp-set-receive-buffer-size!" "udp-set-ttl!" "udp-ttl" "udp?" "unbox" "unbox*" "uncaught-exception-handler" "unit?" "unquoted-printing-string" "unquoted-printing-string-value" "unquoted-printing-string?" "unsupplied-arg?" "use-collection-link-paths" "use-compiled-file-check" "use-compiled-file-paths" "use-user-specific-search-paths" "value-blame" "value-contract" "values" "variable-reference->empty-namespace" "variable-reference->module-base-phase" "variable-reference->module-declaration-inspector" "variable-reference->module-path-index" "variable-reference->module-source" "variable-reference->namespace" "variable-reference->phase" "variable-reference->resolved-module-path" "variable-reference-constant?" "variable-reference-from-unsafe?" "variable-reference?" "vector" "vector*-length" "vector*-ref" "vector*-set!" "vector->immutable-vector" "vector->list" "vector->pseudo-random-generator" "vector->pseudo-random-generator!" "vector->values" "vector-append" "vector-argmax" "vector-argmin" "vector-cas!" "vector-copy" "vector-copy!" "vector-count" "vector-drop" "vector-drop-right" "vector-empty?" "vector-fill!" "vector-filter" "vector-filter-not" "vector-immutable" "vector-immutable/c" "vector-immutableof" "vector-length" "vector-map" "vector-map!" "vector-member" "vector-memq" "vector-memv" "vector-ref" "vector-set!" "vector-set*!" "vector-set-performance-stats!" "vector-sort" "vector-sort!" "vector-split-at" "vector-split-at-right" "vector-take" "vector-take-right" "vector/c" "vector?" "vectorof" "version" "void" "void?" "weak-box-value" "weak-box?" "weak-set" "weak-seteq" "weak-seteqv" "will-execute" "will-executor?" "will-register" "will-try-execute" "with-input-from-bytes" "with-input-from-file" "with-input-from-string" "with-output-to-bytes" "with-output-to-file" "with-output-to-string" "would-be-future" "wrap-evt" "write" "write-byte" "write-bytes" "write-bytes-avail" "write-bytes-avail*" "write-bytes-avail-evt" "write-bytes-avail/enable-break" "write-char" "write-special" "write-special-avail*" "write-special-evt" "write-string" "write-to-file" "writeln" "xor" "zero?" "~.a" "~.s" "~.v" "~a" "~e" "~r" "~s" "~v" - )) - -;; operators ;; - -((symbol) @operator - (#any-of? @operator - "+" "-" "*" "/" "=" "<=" ">=" "<" ">")) - -;; builtin variables ;; - -((symbol) @variable.builtin - (#any-of? @variable.builtin - "always-evt" "block-device-type-bits" "character-device-type-bits" "check-tail-contract" "contract-continuation-mark-key" "contract-random-generate-fail" "directory-type-bits" "empty" "empty-sequence" "empty-stream" "eof" "equal<%>" "error-message-adjuster-key" "externalizable<%>" "failure-result/c" "false" "false/c" "fifo-type-bits" "file-type-bits" "for-clause-syntax-protect" "group-execute-bit" "group-permission-bits" "group-read-bit" "group-write-bit" "impersonator-prop:application-mark" "impersonator-prop:blame" "impersonator-prop:contracted" "legacy-match-expander?" "match-...-nesting" "match-expander?" "mixin-contract" "never-evt" "null" "object%" "other-execute-bit" "other-permission-bits" "other-read-bit" "other-write-bit" "pi" "pi.f" "predicate/c" "printable<%>" "prop:arity-string" "prop:arrow-contract" "prop:authentic" "prop:blame" "prop:chaperone-contract" "prop:checked-procedure" "prop:contract" "prop:contracted" "prop:custom-print-quotable" "prop:custom-write" "prop:dict" "prop:equal+hash" "prop:evt" "prop:exn:missing-module" "prop:exn:srclocs" "prop:expansion-contexts" "prop:flat-contract" "prop:impersonator-of" "prop:input-port" "prop:legacy-match-expander" "prop:liberal-define-context" "prop:match-expander" "prop:object-name" "prop:orc-contract" "prop:output-port" "prop:place-location" "prop:procedure" "prop:recursive-contract" "prop:rename-transformer" "prop:sealed" "prop:sequence" "prop:set!-transformer" "prop:stream" "regular-file-type-bits" "set-group-id-bit" "set-user-id-bit" "socket-type-bits" "sticky-bit" "struct:arity-at-least" "struct:arrow-contract-info" "struct:date" "struct:date*" "struct:exn" "struct:exn:break" "struct:exn:break:hang-up" "struct:exn:break:terminate" "struct:exn:fail" "struct:exn:fail:contract" "struct:exn:fail:contract:arity" "struct:exn:fail:contract:blame" "struct:exn:fail:contract:continuation" "struct:exn:fail:contract:divide-by-zero" "struct:exn:fail:contract:non-fixnum-result" "struct:exn:fail:contract:variable" "struct:exn:fail:filesystem" "struct:exn:fail:filesystem:errno" "struct:exn:fail:filesystem:exists" "struct:exn:fail:filesystem:missing-module" "struct:exn:fail:filesystem:version" "struct:exn:fail:network" "struct:exn:fail:network:errno" "struct:exn:fail:object" "struct:exn:fail:out-of-memory" "struct:exn:fail:read" "struct:exn:fail:read:eof" "struct:exn:fail:read:non-char" "struct:exn:fail:syntax" "struct:exn:fail:syntax:missing-module" "struct:exn:fail:syntax:unbound" "struct:exn:fail:unsupported" "struct:exn:fail:user" "struct:srcloc" "symbolic-link-type-bits" "syntax-local-match-introduce" "syntax-pattern-variable?" "the-unsupplied-arg" "true" "unspecified-dom" "user-execute-bit" "user-permission-bits" "user-read-bit" "user-write-bit" "writable<%>" - )) - -(dot) @variable.builtin - -;;------------------------------------------------------------------;; -;; Special cases ;; -;;------------------------------------------------------------------;; - -(list - "[" - (symbol) @variable - "]") - -(list - . - (symbol) @_p - . - (list - (symbol) @variable) - (#any-of? @_p - "lambda" "λ" "define-values" "define-syntaxes" "define-values-for-export" - "define-values-for-syntax" - )) - -;;------------------------------------------------------------------;; -;; Solve conflicts ;; -;;------------------------------------------------------------------;; - -;; See `:h treesitter`, and search `priority` - -(list - . - (symbol) @include - (#eq? @include "require") - (#set! "priority" 101)) - -(quote - . - (symbol) - (#set! "priority" 105)) @symbol - -((sexp_comment) @comment - (#set! "priority" 110)) +((symbol) @comment + (#match? @comment "^#[cC][iIsS]$")) diff --git a/crates/zed/src/languages/racket/indents.scm b/crates/zed/src/languages/racket/indents.scm new file mode 100644 index 0000000000..9a1cbad161 --- /dev/null +++ b/crates/zed/src/languages/racket/indents.scm @@ -0,0 +1,3 @@ +(_ "[" "]") @indent +(_ "{" "}") @indent +(_ "(" ")") @indent diff --git a/crates/zed/src/languages/racket/injections.scm b/crates/zed/src/languages/racket/injections.scm deleted file mode 100644 index 9bfa09db91..0000000000 --- a/crates/zed/src/languages/racket/injections.scm +++ /dev/null @@ -1,4 +0,0 @@ -; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/racket/injections.scm - -[(comment) - (block_comment)] @comment \ No newline at end of file diff --git a/crates/zed/src/languages/racket/outline.scm b/crates/zed/src/languages/racket/outline.scm new file mode 100644 index 0000000000..604e052a63 --- /dev/null +++ b/crates/zed/src/languages/racket/outline.scm @@ -0,0 +1,10 @@ +(list + . + (symbol) @start-symbol @context + . + [ + (symbol) @name + (list . (symbol) @name) + ] + (#match? @start-symbol "^define") +) @item \ No newline at end of file diff --git a/crates/zed/src/languages/ruby/brackets.scm b/crates/zed/src/languages/ruby/brackets.scm index e69de29bb2..957b20ecdb 100644 --- a/crates/zed/src/languages/ruby/brackets.scm +++ b/crates/zed/src/languages/ruby/brackets.scm @@ -0,0 +1,14 @@ +("[" @open "]" @close) +("{" @open "}" @close) +("\"" @open "\"" @close) +("do" @open "end" @close) + +(block_parameters "|" @open "|" @close) +(interpolation "#{" @open "}" @close) + +(if "if" @open "end" @close) +(unless "unless" @open "end" @close) +(begin "begin" @open "end" @close) +(module "module" @open "end" @close) +(_ . "def" @open "end" @close) +(_ . "class" @open "end" @close) \ No newline at end of file diff --git a/crates/zed/src/languages/scheme/brackets.scm b/crates/zed/src/languages/scheme/brackets.scm new file mode 100644 index 0000000000..191fd9c084 --- /dev/null +++ b/crates/zed/src/languages/scheme/brackets.scm @@ -0,0 +1,3 @@ +("(" @open ")" @close) +("[" @open "]" @close) +("{" @open "}" @close) diff --git a/crates/zed/src/languages/scheme/config.toml b/crates/zed/src/languages/scheme/config.toml index c23099a551..7e63673834 100644 --- a/crates/zed/src/languages/scheme/config.toml +++ b/crates/zed/src/languages/scheme/config.toml @@ -1,9 +1,9 @@ name = "Scheme" -path_suffixes = ["scm", "ss", "mjs"] +path_suffixes = ["scm", "ss"] line_comment = "; " autoclose_before = "])" brackets = [ - { start = "[", end = "]", close = true, newline = true }, - { start = "(", end = ")", close = true, newline = true }, - { start = "'", end = "'", close = false, newline = false }, + { start = "[", end = "]", close = true, newline = false }, + { start = "(", end = ")", close = true, newline = false }, + { start = "\"", end = "\"", close = true, newline = false }, ] diff --git a/crates/zed/src/languages/scheme/folds.scm b/crates/zed/src/languages/scheme/folds.scm deleted file mode 100644 index e8ba2f269c..0000000000 --- a/crates/zed/src/languages/scheme/folds.scm +++ /dev/null @@ -1,3 +0,0 @@ -; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/folds.scm - -(program (list) @fold) \ No newline at end of file diff --git a/crates/zed/src/languages/scheme/highlights.scm b/crates/zed/src/languages/scheme/highlights.scm index 222675d3fc..40ba61cd05 100644 --- a/crates/zed/src/languages/scheme/highlights.scm +++ b/crates/zed/src/languages/scheme/highlights.scm @@ -1,183 +1,28 @@ -;; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/highlights.scm - -;; A highlight query can override the highlights queries before it. -;; So the order is important. -;; We should highlight general rules, then highlight special forms. - -(number) @number -(character) @character -(boolean) @boolean -(string) @string -[(comment) - (block_comment)] @comment - -;; highlight for datum comment -;; copied from ../clojure/highlights.scm -([(comment) (directive)] @comment - (#set! "priority" 105)) - -(escape_sequence) @string.escape - ["(" ")" "[" "]" "{" "}"] @punctuation.bracket -;; variables +(number) @number +(character) @constant.builtin +(boolean) @constant.builtin (symbol) @variable -((symbol) @variable.builtin - (#any-of? @variable.builtin "..." ".")) +(string) @string -;; procedure +(escape_sequence) @escape -(list - . - (symbol) @function) - -;; special forms - -(list - "[" - (symbol)+ @variable - "]") - -(list - . - (symbol) @_f - . - (list - (symbol) @variable) - (#any-of? @_f "lambda" "λ")) - -(list - . - (symbol) @_f - . - (list - (list - (symbol) @variable)) - (#any-of? @_f - "let" "let*" "let-syntax" "let-values" "let*-values" "letrec" "letrec*" "letrec-syntax")) - -;; operators +[(comment) + (block_comment) + (directive)] @comment ((symbol) @operator - (#any-of? @operator - "+" "-" "*" "/" "=" "<=" ">=" "<" ">")) - -;; keyword - -((symbol) @keyword - (#any-of? @keyword - "define" "lambda" "λ" "begin" "do" "define-syntax" - "and" "or" - "if" "cond" "case" "when" "unless" "else" "=>" - "let" "let*" "let-syntax" "let-values" "let*-values" "letrec" "letrec*" "letrec-syntax" - "set!" - "syntax-rules" "identifier-syntax" - "quote" "unquote" "quote-splicing" "quasiquote" "unquote-splicing" - "delay" - "assert" - "library" "export" "import" "rename" "only" "except" "prefix")) - -((symbol) @conditional - (#any-of? @conditional "if" "cond" "case" "when" "unless")) - -;; quote - -(abbreviation - "'" - (symbol)) @symbol + (#match? @operator "^(\\+|-|\\*|/|=|>|<|>=|<=)$")) (list - . - (symbol) @_f - (#eq? @_f "quote")) @symbol - -;; library + . + (symbol) @function) (list - . - (symbol) @_lib - . - (symbol) @namespace - - (#eq? @_lib "library")) - -;; builtin procedures -;; procedures in R5RS and R6RS but not in R6RS-lib - -((symbol) @function.builtin - (#any-of? @function.builtin - ;; eq - "eqv?" "eq?" "equal?" - ;; number - "number?" "complex?" "real?" "rational?" "integer?" - "exact?" "inexact?" - "zero?" "positive?" "negative?" "odd?" "even?" "finite?" "infinite?" "nan?" - "max" "min" - "abs" "quotient" "remainder" "modulo" - "div" "div0" "mod" "mod0" "div-and-mod" "div0-and-mod0" - "gcd" "lcm" "numerator" "denominator" - "floor" "ceiling" "truncate" "round" - "rationalize" - "exp" "log" "sin" "cos" "tan" "asin" "acos" "atan" - "sqrt" "expt" - "exact-integer-sqrt" - "make-rectangular" "make-polar" "real-part" "imag-part" "magnitude" "angle" - "real-valued" "rational-valued?" "integer-valued?" - "exact" "inexact" "exact->inexact" "inexact->exact" - "number->string" "string->number" - ;; boolean - "boolean?" "not" "boolean=?" - ;; pair - "pair?" "cons" - "car" "cdr" - "caar" "cadr" "cdar" "cddr" - "caaar" "caadr" "cadar" "caddr" "cdaar" "cdadr" "cddar" "cdddr" - "caaaar" "caaadr" "caadar" "caaddr" "cadaar" "cadadr" "caddar" "cadddr" - "cdaaar" "cdaadr" "cdadar" "cdaddr" "cddaar" "cddadr" "cdddar" "cddddr" - "set-car!" "set-cdr!" - ;; list - "null?" "list?" - "list" "length" "append" "reverse" "list-tail" "list-ref" - "map" "for-each" - "memq" "memv" "member" "assq" "assv" "assoc" - ;; symbol - "symbol?" "symbol->string" "string->symbol" "symbol=?" - ;; char - "char?" "char=?" "char?" "char<=?" "char>=?" - "char-ci=?" "char-ci?" "char-ci<=?" "char-ci>=?" - "char-alphabetic?" "char-numeric?" "char-whitespace?" "char-upper-case?" "char-lower-case?" - "char->integer" "integer->char" - "char-upcase" "char-downcase" - ;; string - "string?" "make-string" "string" "string-length" "string-ref" "string-set!" - "string=?" "string-ci=?" "string?" "string<=?" "string>=?" - "string-ci?" "string-ci<=?" "string-ci>=?" - "substring" "string-append" "string->list" "list->string" - "string-for-each" - "string-copy" "string-fill!" - "string-upcase" "string-downcase" - ;; vector - "vector?" "make-vector" "vector" "vector-length" "vector-ref" "vector-set!" - "vector->list" "list->vector" "vector-fill!" "vector-map" "vector-for-each" - ;; bytevector - "bytevector?" "native-endianness" - "make-bytevector" "bytevector-length" "bytevector=?" "bytevector-fill!" - "bytevector-copy!" "bytevector-copy" - ;; error - "error" "assertion-violation" - ;; control - "procedure?" "apply" "force" - "call-with-current-continuation" "call/cc" - "values" "call-with-values" "dynamic-wind" - "eval" "scheme-report-environment" "null-environment" "interaction-environment" - ;; IO - "call-with-input-file" "call-with-output-file" "input-port?" "output-port?" - "current-input-port" "current-output-port" "with-input-from-file" "with-output-to-file" - "open-input-file" "open-output-file" "close-input-port" "close-output-port" - ;; input - "read" "read-char" "peek-char" "eof-object?" "char-ready?" - ;; output - "write" "display" "newline" "write-char" - ;; system - "load" "transcript-on" "transcript-off")) \ No newline at end of file + . + (symbol) @keyword + (#match? @keyword + "^(define-syntax|let\\*|lambda|λ|case|=>|quote-splicing|unquote-splicing|set!|let|letrec|letrec-syntax|let-values|let\\*-values|do|else|define|cond|syntax-rules|unquote|begin|quote|let-syntax|and|if|quasiquote|letrec|delay|or|when|unless|identifier-syntax|assert|library|export|import|rename|only|except|prefix)$" + )) diff --git a/crates/zed/src/languages/scheme/indents.scm b/crates/zed/src/languages/scheme/indents.scm new file mode 100644 index 0000000000..9a1cbad161 --- /dev/null +++ b/crates/zed/src/languages/scheme/indents.scm @@ -0,0 +1,3 @@ +(_ "[" "]") @indent +(_ "{" "}") @indent +(_ "(" ")") @indent diff --git a/crates/zed/src/languages/scheme/injections.scm b/crates/zed/src/languages/scheme/injections.scm deleted file mode 100644 index 5a76034eed..0000000000 --- a/crates/zed/src/languages/scheme/injections.scm +++ /dev/null @@ -1,3 +0,0 @@ -; Copied from nvim: https://github.com/nvim-treesitter/nvim-treesitter/blob/master/queries/scheme/injections.scm - -(comment) @comment \ No newline at end of file diff --git a/crates/zed/src/languages/scheme/outline.scm b/crates/zed/src/languages/scheme/outline.scm new file mode 100644 index 0000000000..604e052a63 --- /dev/null +++ b/crates/zed/src/languages/scheme/outline.scm @@ -0,0 +1,10 @@ +(list + . + (symbol) @start-symbol @context + . + [ + (symbol) @name + (list . (symbol) @name) + ] + (#match? @start-symbol "^define") +) @item \ No newline at end of file From 855f17c378875ea919a8b3a1d935d68fd0953923 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 5 Dec 2022 13:55:55 -0800 Subject: [PATCH 206/240] Include outline items for c/c++ functions returning pointers-to-pointers, references Co-authored-by: Julia Risley --- crates/zed/src/languages/c/outline.scm | 64 +++++++++++++++++++----- crates/zed/src/languages/cpp/outline.scm | 48 ++++++++++++++++++ 2 files changed, 100 insertions(+), 12 deletions(-) diff --git a/crates/zed/src/languages/c/outline.scm b/crates/zed/src/languages/c/outline.scm index 11de5ab9a7..ef80b7af8c 100644 --- a/crates/zed/src/languages/c/outline.scm +++ b/crates/zed/src/languages/c/outline.scm @@ -14,17 +14,57 @@ declarator: (_) @name) @item (declaration - type: (_) @context - declarator: (function_declarator - declarator: (_) @name - parameters: (parameter_list - "(" @context - ")" @context))) @item + (type_qualifier)? @context + type: (_)? @context + declarator: [ + (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)) + (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context))) + (pointer_declarator + "*" @context + declarator: (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)))) + ] +) @item (function_definition - type: (_) @context - declarator: (function_declarator - declarator: (_) @name - parameters: (parameter_list - "(" @context - ")" @context))) @item + (type_qualifier)? @context + type: (_)? @context + declarator: [ + (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)) + (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context))) + (pointer_declarator + "*" @context + declarator: (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)))) + ] +) @item diff --git a/crates/zed/src/languages/cpp/outline.scm b/crates/zed/src/languages/cpp/outline.scm index cefbac314d..38e75f193f 100644 --- a/crates/zed/src/languages/cpp/outline.scm +++ b/crates/zed/src/languages/cpp/outline.scm @@ -51,6 +51,22 @@ parameters: (parameter_list "(" @context ")" @context))) + (pointer_declarator + "*" @context + declarator: (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)))) + (reference_declarator + ["&" "&&"] @context + (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context))) ] (type_qualifier)? @context) @item @@ -74,6 +90,22 @@ parameters: (parameter_list "(" @context ")" @context))) + (pointer_declarator + "*" @context + declarator: (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)))) + (reference_declarator + ["&" "&&"] @context + (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context))) ] (type_qualifier)? @context) @item @@ -97,5 +129,21 @@ parameters: (parameter_list "(" @context ")" @context))) + (pointer_declarator + "*" @context + declarator: (pointer_declarator + "*" @context + declarator: (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context)))) + (reference_declarator + ["&" "&&"] @context + (function_declarator + declarator: (_) @name + parameters: (parameter_list + "(" @context + ")" @context))) ] (type_qualifier)? @context) @item From 744714b478701e5475bfc28deaf0b94276ae9ff4 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 6 Dec 2022 09:07:25 +0100 Subject: [PATCH 207/240] Remove unused `UserId` import from seed script --- crates/collab/src/bin/seed.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/bin/seed.rs b/crates/collab/src/bin/seed.rs index 5ddacf6d64..dfd2ae3a21 100644 --- a/crates/collab/src/bin/seed.rs +++ b/crates/collab/src/bin/seed.rs @@ -1,5 +1,5 @@ use collab::db; -use db::{ConnectOptions, Database, UserId}; +use db::{ConnectOptions, Database}; use serde::{de::DeserializeOwned, Deserialize}; use std::fmt::Write; From fc7b01b74ea1927ff4f5cb1fa4a8875fc367780b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 6 Dec 2022 10:19:34 +0100 Subject: [PATCH 208/240] Fix busy status when accepting a contact request Previously, we would send an contact update when accepting a request using the same `busy` status for both the requester and the responder. This was obviously wrong and caused the requester to see their own busy status as the newly-added responder contact's status. --- crates/collab/src/integration_tests.rs | 89 +++++++++++++++++++++++--- crates/collab/src/rpc.rs | 7 +- 2 files changed, 85 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index c1610d71cd..3639afd47c 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -4188,18 +4188,21 @@ async fn test_contacts( cx_a: &mut TestAppContext, cx_b: &mut TestAppContext, cx_c: &mut TestAppContext, + cx_d: &mut TestAppContext, ) { cx_a.foreground().forbid_parking(); let mut server = TestServer::start(cx_a.background()).await; let client_a = server.create_client(cx_a, "user_a").await; let client_b = server.create_client(cx_b, "user_b").await; let client_c = server.create_client(cx_c, "user_c").await; + let client_d = server.create_client(cx_d, "user_d").await; server .make_contacts(&mut [(&client_a, cx_a), (&client_b, cx_b), (&client_c, cx_c)]) .await; let active_call_a = cx_a.read(ActiveCall::global); let active_call_b = cx_b.read(ActiveCall::global); let active_call_c = cx_c.read(ActiveCall::global); + let _active_call_d = cx_d.read(ActiveCall::global); deterministic.run_until_parked(); assert_eq!( @@ -4223,6 +4226,7 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!(contacts(&client_d, cx_d), []); server.disconnect_client(client_c.peer_id().unwrap()); server.forbid_connections(); @@ -4242,6 +4246,7 @@ async fn test_contacts( ] ); assert_eq!(contacts(&client_c, cx_c), []); + assert_eq!(contacts(&client_d, cx_d), []); server.allow_connections(); client_c @@ -4271,6 +4276,7 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!(contacts(&client_d, cx_d), []); active_call_a .update(cx_a, |call, cx| { @@ -4300,6 +4306,39 @@ async fn test_contacts( ("user_b".to_string(), "online", "busy") ] ); + assert_eq!(contacts(&client_d, cx_d), []); + + // Client B and client D become contacts while client B is being called. + server + .make_contacts(&mut [(&client_b, cx_b), (&client_d, cx_d)]) + .await; + deterministic.run_until_parked(); + assert_eq!( + contacts(&client_a, cx_a), + [ + ("user_b".to_string(), "online", "busy"), + ("user_c".to_string(), "online", "free") + ] + ); + assert_eq!( + contacts(&client_b, cx_b), + [ + ("user_a".to_string(), "online", "busy"), + ("user_c".to_string(), "online", "free"), + ("user_d".to_string(), "online", "free"), + ] + ); + assert_eq!( + contacts(&client_c, cx_c), + [ + ("user_a".to_string(), "online", "busy"), + ("user_b".to_string(), "online", "busy") + ] + ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "busy")] + ); active_call_b.update(cx_b, |call, _| call.decline_incoming().unwrap()); deterministic.run_until_parked(); @@ -4314,7 +4353,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "free"), - ("user_c".to_string(), "online", "free") + ("user_c".to_string(), "online", "free"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4324,6 +4364,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "free")] + ); active_call_c .update(cx_c, |call, cx| { @@ -4343,7 +4387,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "busy"), - ("user_c".to_string(), "online", "busy") + ("user_c".to_string(), "online", "busy"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4353,6 +4398,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "free")] + ); active_call_a .update(cx_a, |call, cx| call.accept_incoming(cx)) @@ -4370,7 +4419,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "busy"), - ("user_c".to_string(), "online", "busy") + ("user_c".to_string(), "online", "busy"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4380,6 +4430,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "free")] + ); active_call_a .update(cx_a, |call, cx| { @@ -4399,7 +4453,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "busy"), - ("user_c".to_string(), "online", "busy") + ("user_c".to_string(), "online", "busy"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4409,6 +4464,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "busy") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "busy")] + ); active_call_a.update(cx_a, |call, cx| call.hang_up(cx).unwrap()); deterministic.run_until_parked(); @@ -4423,7 +4482,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "free"), - ("user_c".to_string(), "online", "free") + ("user_c".to_string(), "online", "free"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4433,6 +4493,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "free")] + ); active_call_a .update(cx_a, |call, cx| { @@ -4452,7 +4516,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "online", "busy"), - ("user_c".to_string(), "online", "free") + ("user_c".to_string(), "online", "free"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4462,6 +4527,10 @@ async fn test_contacts( ("user_b".to_string(), "online", "busy") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "busy")] + ); server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); @@ -4471,7 +4540,8 @@ async fn test_contacts( contacts(&client_b, cx_b), [ ("user_a".to_string(), "offline", "free"), - ("user_c".to_string(), "online", "free") + ("user_c".to_string(), "online", "free"), + ("user_d".to_string(), "online", "free") ] ); assert_eq!( @@ -4481,8 +4551,11 @@ async fn test_contacts( ("user_b".to_string(), "online", "free") ] ); + assert_eq!( + contacts(&client_d, cx_d), + [("user_b".to_string(), "online", "free")] + ); - #[allow(clippy::type_complexity)] fn contacts( client: &TestClient, cx: &TestAppContext, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 0136a5fec6..736f5eb31b 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1616,7 +1616,8 @@ async fn respond_to_contact_request( db.respond_to_contact_request(responder_id, requester_id, accept) .await?; - let busy = db.is_user_busy(requester_id).await?; + let requester_busy = db.is_user_busy(requester_id).await?; + let responder_busy = db.is_user_busy(responder_id).await?; let pool = session.connection_pool().await; // Update responder with new contact @@ -1624,7 +1625,7 @@ async fn respond_to_contact_request( if accept { update .contacts - .push(contact_for_user(requester_id, false, busy, &pool)); + .push(contact_for_user(requester_id, false, requester_busy, &pool)); } update .remove_incoming_requests @@ -1638,7 +1639,7 @@ async fn respond_to_contact_request( if accept { update .contacts - .push(contact_for_user(responder_id, true, busy, &pool)); + .push(contact_for_user(responder_id, true, responder_busy, &pool)); } update .remove_outgoing_requests From 3b5b48c043fcc005fdecfab840afc43032b1d3d1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 6 Dec 2022 14:49:36 +0100 Subject: [PATCH 209/240] Query project count as i64 instead of i32 when gathering metrics Using the latter will cause a type mismatch when performing the query. --- crates/collab/src/db.rs | 2 +- crates/collab/src/db/tests.rs | 65 +++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 915acb00eb..1cda33c00c 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1472,7 +1472,7 @@ impl Database { .into_values::<_, QueryAs>() .one(&*tx) .await? - .unwrap_or(0) as usize) + .unwrap_or(0i64) as usize) }) .await } diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 9e70ae4b05..baa3f87060 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -402,6 +402,71 @@ test_both_dbs!(test_metrics_id_postgres, test_metrics_id_sqlite, db, { assert_ne!(metrics_id1, metrics_id2); }); +test_both_dbs!( + test_project_count_postgres, + test_project_count_sqlite, + db, + { + let user1 = db + .create_user( + &format!("admin@example.com"), + true, + NewUserParams { + github_login: "admin".into(), + github_user_id: 0, + invite_count: 0, + }, + ) + .await + .unwrap(); + let user2 = db + .create_user( + &format!("user@example.com"), + false, + NewUserParams { + github_login: "user".into(), + github_user_id: 1, + invite_count: 0, + }, + ) + .await + .unwrap(); + + let room_id = RoomId::from_proto( + db.create_room(user1.user_id, ConnectionId(0), "") + .await + .unwrap() + .id, + ); + db.call(room_id, user1.user_id, ConnectionId(0), user2.user_id, None) + .await + .unwrap(); + db.join_room(room_id, user2.user_id, ConnectionId(1)) + .await + .unwrap(); + assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); + + db.share_project(room_id, ConnectionId(1), &[]) + .await + .unwrap(); + assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1); + + db.share_project(room_id, ConnectionId(1), &[]) + .await + .unwrap(); + assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); + + // Projects shared by admins aren't counted. + db.share_project(room_id, ConnectionId(0), &[]) + .await + .unwrap(); + assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); + + db.leave_room(ConnectionId(1)).await.unwrap(); + assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); + } +); + #[test] fn test_fuzzy_like_string() { assert_eq!(Database::fuzzy_like_string("abcd"), "%a%b%c%d%"); From 694840cdd6378cffb0eef83f0ef3658425445a46 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 6 Dec 2022 17:12:12 -0500 Subject: [PATCH 210/240] Allow overwriting signup data if a user signs up more than once with the same email address --- Cargo.lock | 34 +++++++++++++++ crates/collab/Cargo.toml | 1 + crates/collab/src/db.rs | 25 +++++++++++- crates/collab/src/db/signup.rs | 2 +- crates/collab/src/db/tests.rs | 75 ++++++++++++++++++++++++++++++++++ 5 files changed, 135 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a4d3a78d1..21966a9673 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1160,6 +1160,7 @@ dependencies = [ "lsp", "nanoid", "parking_lot 0.11.2", + "pretty_assertions", "project", "prometheus", "rand 0.8.5", @@ -1730,6 +1731,12 @@ dependencies = [ "workspace", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.9.0" @@ -4005,6 +4012,15 @@ dependencies = [ "workspace", ] +[[package]] +name = "output_vt100" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "overload" version = "0.1.1" @@ -4346,6 +4362,18 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pretty_assertions" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" +dependencies = [ + "ctor", + "diff", + "output_vt100", + "yansi", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -8065,6 +8093,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zed" version = "0.67.0" diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 8725642ae5..c741341d48 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -64,6 +64,7 @@ fs = { path = "../fs", features = ["test-support"] } git = { path = "../git", features = ["test-support"] } live_kit_client = { path = "../live_kit_client", features = ["test-support"] } lsp = { path = "../lsp", features = ["test-support"] } +pretty_assertions = "1.3.0" project = { path = "../project", features = ["test-support"] } rpc = { path = "../rpc", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1cda33c00c..d90c138886 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -669,7 +669,15 @@ impl Database { }) .on_conflict( OnConflict::column(signup::Column::EmailAddress) - .update_column(signup::Column::EmailAddress) + .update_columns([ + signup::Column::PlatformMac, + signup::Column::PlatformWindows, + signup::Column::PlatformLinux, + signup::Column::EditorFeatures, + signup::Column::ProgrammingLanguages, + signup::Column::DeviceId, + signup::Column::AddedToMailingList, + ]) .to_owned(), ) .exec(&*tx) @@ -679,6 +687,21 @@ impl Database { .await } + pub async fn get_signup(&self, email_address: &str) -> Result { + self.transaction(|tx| async move { + let signup = signup::Entity::find() + .filter(signup::Column::EmailAddress.eq(email_address)) + .one(&*tx) + .await? + .ok_or_else(|| { + anyhow!("signup with email address {} doesn't exist", email_address) + })?; + + Ok(signup) + }) + .await + } + pub async fn get_waitlist_summary(&self) -> Result { self.transaction(|tx| async move { let query = " diff --git a/crates/collab/src/db/signup.rs b/crates/collab/src/db/signup.rs index ca219736a8..5d5a9a1b61 100644 --- a/crates/collab/src/db/signup.rs +++ b/crates/collab/src/db/signup.rs @@ -34,7 +34,7 @@ pub struct Invite { pub email_confirmation_code: String, } -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct NewSignup { pub email_address: String, pub platform_mac: bool, diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index baa3f87060..298176adf2 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -2,6 +2,9 @@ use super::*; use gpui::executor::{Background, Deterministic}; use std::sync::Arc; +#[cfg(test)] +use pretty_assertions::{assert_eq, assert_ne}; + macro_rules! test_both_dbs { ($postgres_test_name:ident, $sqlite_test_name:ident, $db:ident, $body:block) => { #[gpui::test] @@ -727,6 +730,78 @@ async fn test_invite_codes() { assert_eq!(invite_count, 1); } +#[gpui::test] +async fn test_multiple_signup_overwrite() { + let test_db = TestDb::postgres(build_background_executor()); + let db = test_db.db(); + + let email_address = "user_1@example.com".to_string(); + + let signup = NewSignup { + email_address: email_address.clone(), + platform_mac: false, + platform_linux: true, + platform_windows: false, + editor_features: vec!["speed".into()], + programming_languages: vec!["rust".into(), "c".into()], + device_id: Some(format!("device_id")), + added_to_mailing_list: false, + }; + + db.create_signup(&signup).await.unwrap(); + + // TODO: Remove this method and just have create_signup return an instance? + let signup_from_db = db.get_signup(&signup.email_address).await.unwrap(); + + assert_eq!( + signup_from_db.clone(), + signup::Model { + email_address: signup.email_address, + platform_mac: signup.platform_mac, + platform_linux: signup.platform_linux, + platform_windows: signup.platform_windows, + editor_features: Some(signup.editor_features), + programming_languages: Some(signup.programming_languages), + added_to_mailing_list: signup.added_to_mailing_list, + ..signup_from_db + } + ); + + let signup_overwrite = NewSignup { + email_address, + platform_mac: true, + platform_linux: false, + platform_windows: true, + editor_features: vec!["git integration".into(), "clean design".into()], + programming_languages: vec!["d".into(), "elm".into()], + device_id: Some(format!("different_device_id")), + added_to_mailing_list: true, + }; + + db.create_signup(&signup_overwrite).await.unwrap(); + + let signup_overwrite_from_db = db + .get_signup(&signup_overwrite.email_address) + .await + .unwrap(); + + assert_eq!( + signup_overwrite_from_db.clone(), + signup::Model { + platform_mac: signup_overwrite.platform_mac, + platform_linux: signup_overwrite.platform_linux, + platform_windows: signup_overwrite.platform_windows, + editor_features: Some(signup_overwrite.editor_features), + programming_languages: Some(signup_overwrite.programming_languages), + device_id: signup_overwrite.device_id, + added_to_mailing_list: signup_overwrite.added_to_mailing_list, + // shouldn't overwrite their creation Datetime - user shouldn't lose their spot in line + created_at: signup_from_db.created_at, + ..signup_overwrite_from_db + } + ); +} + #[gpui::test] async fn test_signups() { let test_db = TestDb::postgres(build_background_executor()); From 97989b04a0de265f8dd31f8c53238583415479c0 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 6 Dec 2022 17:18:54 -0500 Subject: [PATCH 211/240] Remove comment --- crates/collab/src/db/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 298176adf2..46919d1467 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -750,7 +750,6 @@ async fn test_multiple_signup_overwrite() { db.create_signup(&signup).await.unwrap(); - // TODO: Remove this method and just have create_signup return an instance? let signup_from_db = db.get_signup(&signup.email_address).await.unwrap(); assert_eq!( From 5f319071270643f040ad4c7e329151e06dca8c59 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Wed, 7 Dec 2022 07:12:27 -0500 Subject: [PATCH 212/240] Clean up test --- crates/collab/src/db/tests.rs | 55 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 46919d1467..84cf422976 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -737,7 +737,7 @@ async fn test_multiple_signup_overwrite() { let email_address = "user_1@example.com".to_string(); - let signup = NewSignup { + let initial_signup = NewSignup { email_address: email_address.clone(), platform_mac: false, platform_linux: true, @@ -748,26 +748,26 @@ async fn test_multiple_signup_overwrite() { added_to_mailing_list: false, }; - db.create_signup(&signup).await.unwrap(); + db.create_signup(&initial_signup).await.unwrap(); - let signup_from_db = db.get_signup(&signup.email_address).await.unwrap(); + let initial_signup_from_db = db.get_signup(&email_address).await.unwrap(); assert_eq!( - signup_from_db.clone(), + initial_signup_from_db.clone(), signup::Model { - email_address: signup.email_address, - platform_mac: signup.platform_mac, - platform_linux: signup.platform_linux, - platform_windows: signup.platform_windows, - editor_features: Some(signup.editor_features), - programming_languages: Some(signup.programming_languages), - added_to_mailing_list: signup.added_to_mailing_list, - ..signup_from_db + email_address: initial_signup.email_address, + platform_mac: initial_signup.platform_mac, + platform_linux: initial_signup.platform_linux, + platform_windows: initial_signup.platform_windows, + editor_features: Some(initial_signup.editor_features), + programming_languages: Some(initial_signup.programming_languages), + added_to_mailing_list: initial_signup.added_to_mailing_list, + ..initial_signup_from_db } ); - let signup_overwrite = NewSignup { - email_address, + let subsequent_signup = NewSignup { + email_address: email_address.clone(), platform_mac: true, platform_linux: false, platform_windows: true, @@ -777,26 +777,23 @@ async fn test_multiple_signup_overwrite() { added_to_mailing_list: true, }; - db.create_signup(&signup_overwrite).await.unwrap(); + db.create_signup(&subsequent_signup).await.unwrap(); - let signup_overwrite_from_db = db - .get_signup(&signup_overwrite.email_address) - .await - .unwrap(); + let subsequent_signup_from_db = db.get_signup(&email_address).await.unwrap(); assert_eq!( - signup_overwrite_from_db.clone(), + subsequent_signup_from_db.clone(), signup::Model { - platform_mac: signup_overwrite.platform_mac, - platform_linux: signup_overwrite.platform_linux, - platform_windows: signup_overwrite.platform_windows, - editor_features: Some(signup_overwrite.editor_features), - programming_languages: Some(signup_overwrite.programming_languages), - device_id: signup_overwrite.device_id, - added_to_mailing_list: signup_overwrite.added_to_mailing_list, + platform_mac: subsequent_signup.platform_mac, + platform_linux: subsequent_signup.platform_linux, + platform_windows: subsequent_signup.platform_windows, + editor_features: Some(subsequent_signup.editor_features), + programming_languages: Some(subsequent_signup.programming_languages), + device_id: subsequent_signup.device_id, + added_to_mailing_list: subsequent_signup.added_to_mailing_list, // shouldn't overwrite their creation Datetime - user shouldn't lose their spot in line - created_at: signup_from_db.created_at, - ..signup_overwrite_from_db + created_at: initial_signup_from_db.created_at, + ..subsequent_signup_from_db } ); } From 1b8f23eeedea0127cbba3f691a036193df096c7e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 7 Dec 2022 14:01:30 +0100 Subject: [PATCH 213/240] Add failing test showcasing inviting existing user via different email --- crates/collab/src/db/tests.rs | 77 +++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index baa3f87060..e1c9e04812 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -595,6 +595,8 @@ async fn test_invite_codes() { busy: false, }] ); + assert!(db.has_contact(user1, user2).await.unwrap()); + assert!(db.has_contact(user2, user1).await.unwrap()); assert_eq!( db.get_invite_code_for_user(user2).await.unwrap().unwrap().1, 7 @@ -649,6 +651,8 @@ async fn test_invite_codes() { busy: false, }] ); + assert!(db.has_contact(user1, user3).await.unwrap()); + assert!(db.has_contact(user3, user1).await.unwrap()); assert_eq!( db.get_invite_code_for_user(user3).await.unwrap().unwrap().1, 3 @@ -714,6 +718,8 @@ async fn test_invite_codes() { busy: false, }] ); + assert!(db.has_contact(user1, user4).await.unwrap()); + assert!(db.has_contact(user4, user1).await.unwrap()); assert_eq!( db.get_invite_code_for_user(user4).await.unwrap().unwrap().1, 5 @@ -725,6 +731,77 @@ async fn test_invite_codes() { .unwrap_err(); let (_, invite_count) = db.get_invite_code_for_user(user1).await.unwrap().unwrap(); assert_eq!(invite_count, 1); + + // A newer user can invite an existing one via a different email address + // than the one they used to sign up. + let user5 = db + .create_user( + "user5@example.com", + false, + NewUserParams { + github_login: "user5".into(), + github_user_id: 5, + invite_count: 0, + }, + ) + .await + .unwrap() + .user_id; + db.set_invite_count_for_user(user5, 5).await.unwrap(); + let (user5_invite_code, _) = db.get_invite_code_for_user(user5).await.unwrap().unwrap(); + let user5_invite_to_user1 = db + .create_invite_from_code(&user5_invite_code, "user1@different.com", None) + .await + .unwrap(); + let user1_2 = db + .create_user_from_invite( + &user5_invite_to_user1, + NewUserParams { + github_login: "user1".into(), + github_user_id: 1, + invite_count: 5, + }, + ) + .await + .unwrap() + .unwrap() + .user_id; + assert_eq!(user1_2, user1); + assert_eq!( + db.get_contacts(user1).await.unwrap(), + [ + Contact::Accepted { + user_id: user2, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user3, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user4, + should_notify: true, + busy: false, + }, + Contact::Accepted { + user_id: user5, + should_notify: false, + busy: false, + } + ] + ); + assert_eq!( + db.get_contacts(user5).await.unwrap(), + [Contact::Accepted { + user_id: user1, + should_notify: true, + busy: false, + }] + ); + assert!(db.has_contact(user1, user5).await.unwrap()); + assert!(db.has_contact(user5, user1).await.unwrap()); } #[gpui::test] From 665219fb00826adf532124c5ba17e5bb88e60af2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 7 Dec 2022 14:01:49 +0100 Subject: [PATCH 214/240] Fix inviting user that had already signed up via a different email --- crates/collab/src/db.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1cda33c00c..0e9227fd07 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -878,10 +878,16 @@ impl Database { let signup = signup.update(&*tx).await?; if let Some(inviting_user_id) = signup.inviting_user_id { + let (user_id_a, user_id_b, a_to_b) = if inviting_user_id < user.id { + (inviting_user_id, user.id, true) + } else { + (user.id, inviting_user_id, false) + }; + contact::Entity::insert(contact::ActiveModel { - user_id_a: ActiveValue::set(inviting_user_id), - user_id_b: ActiveValue::set(user.id), - a_to_b: ActiveValue::set(true), + user_id_a: ActiveValue::set(user_id_a), + user_id_b: ActiveValue::set(user_id_b), + a_to_b: ActiveValue::set(a_to_b), should_notify: ActiveValue::set(true), accepted: ActiveValue::set(true), ..Default::default() From d71d543337abb108bd3ea4b1f5b24729d78ac5e7 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Wed, 7 Dec 2022 08:15:01 -0500 Subject: [PATCH 215/240] Ensure that subsequent signup happens after initial We can't rely on the fact that the test won't run fast enough such that both `created_at`s are the same time. This ensures the subsequent signup happens after the initial one and that the database doesn't overwrite the initial one. --- crates/collab/src/db/signup.rs | 1 + crates/collab/src/db/tests.rs | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/crates/collab/src/db/signup.rs b/crates/collab/src/db/signup.rs index 5d5a9a1b61..6368482de9 100644 --- a/crates/collab/src/db/signup.rs +++ b/crates/collab/src/db/signup.rs @@ -44,6 +44,7 @@ pub struct NewSignup { pub programming_languages: Vec, pub device_id: Option, pub added_to_mailing_list: bool, + pub created_at: Option, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)] diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 84cf422976..e3819116a0 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -737,6 +737,8 @@ async fn test_multiple_signup_overwrite() { let email_address = "user_1@example.com".to_string(); + let initial_signup_created_at_milliseconds = 0; + let initial_signup = NewSignup { email_address: email_address.clone(), platform_mac: false, @@ -746,6 +748,9 @@ async fn test_multiple_signup_overwrite() { programming_languages: vec!["rust".into(), "c".into()], device_id: Some(format!("device_id")), added_to_mailing_list: false, + created_at: Some( + DateTime::from_timestamp_millis(initial_signup_created_at_milliseconds).unwrap(), + ), }; db.create_signup(&initial_signup).await.unwrap(); @@ -775,6 +780,13 @@ async fn test_multiple_signup_overwrite() { programming_languages: vec!["d".into(), "elm".into()], device_id: Some(format!("different_device_id")), added_to_mailing_list: true, + // subsequent signup happens next day + created_at: Some( + DateTime::from_timestamp_millis( + initial_signup_created_at_milliseconds + (1000 * 60 * 60 * 24), + ) + .unwrap(), + ), }; db.create_signup(&subsequent_signup).await.unwrap(); @@ -817,6 +829,7 @@ async fn test_signups() { programming_languages: vec!["rust".into(), "c".into()], device_id: Some(format!("device_id_{i}")), added_to_mailing_list: i != 0, // One user failed to subscribe + created_at: Some(DateTime::from_timestamp_millis(i as i64).unwrap()), // Signups are consecutive }) .collect::>(); From cffb064c16fd9e363ed9b33e27acf91bd5d3b714 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 7 Dec 2022 16:39:32 -0800 Subject: [PATCH 216/240] Refactor editor scrolling and implement scroll commands from vim mode --- assets/keymaps/vim.json | 80 ++- crates/diagnostics/src/diagnostics.rs | 5 +- crates/editor/src/editor.rs | 614 ++---------------- crates/editor/src/editor_tests.rs | 21 +- crates/editor/src/element.rs | 15 +- crates/editor/src/items.rs | 65 +- crates/editor/src/scroll.rs | 339 ++++++++++ crates/editor/src/scroll/actions.rs | 159 +++++ crates/editor/src/scroll/autoscroll.rs | 246 +++++++ crates/editor/src/scroll/scroll_amount.rs | 48 ++ crates/editor/src/selections_collection.rs | 2 +- crates/go_to_line/src/go_to_line.rs | 2 +- crates/gpui/src/app.rs | 173 ++++- crates/gpui/src/keymap.rs | 15 + crates/journal/src/journal.rs | 2 +- crates/outline/src/outline.rs | 4 +- crates/project_symbols/src/project_symbols.rs | 3 +- crates/search/src/project_search.rs | 4 +- crates/util/src/lib.rs | 4 + crates/vim/src/editor_events.rs | 17 +- crates/vim/src/insert.rs | 2 +- crates/vim/src/normal.rs | 61 +- crates/vim/src/normal/change.rs | 3 +- crates/vim/src/normal/delete.rs | 2 +- crates/vim/src/state.rs | 2 + crates/vim/src/vim.rs | 22 + crates/vim/src/visual.rs | 4 +- crates/workspace/src/dock.rs | 8 - crates/zed/src/zed.rs | 5 +- 29 files changed, 1244 insertions(+), 683 deletions(-) create mode 100644 crates/editor/src/scroll.rs create mode 100644 crates/editor/src/scroll/actions.rs create mode 100644 crates/editor/src/scroll/autoscroll.rs create mode 100644 crates/editor/src/scroll/scroll_amount.rs diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index 94729af21f..5cdd4fc7d4 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -38,22 +38,6 @@ ], "%": "vim::Matching", "escape": "editor::Cancel", - "i": [ - "vim::PushOperator", - { - "Object": { - "around": false - } - } - ], - "a": [ - "vim::PushOperator", - { - "Object": { - "around": true - } - } - ], "0": "vim::StartOfLine", // When no number operator present, use start of line motion "1": [ "vim::Number", @@ -93,6 +77,28 @@ ] } }, + { + //Operators + "context": "Editor && VimControl && vim_operator == none", + "bindings": { + "i": [ + "vim::PushOperator", + { + "Object": { + "around": false + } + } + ], + "a": [ + "vim::PushOperator", + { + "Object": { + "around": true + } + } + ] + } + }, { "context": "Editor && vim_mode == normal && vim_operator == none", "bindings": { @@ -110,6 +116,12 @@ "vim::PushOperator", "Yank" ], + "z": [ + "vim::PushOperator", + { + "Namespace": "Z" + } + ], "i": [ "vim::SwitchMode", "Insert" @@ -147,6 +159,30 @@ { "focus": true } + ], + "ctrl-f": [ + "vim::Scroll", + "PageDown" + ], + "ctrl-b": [ + "vim::Scroll", + "PageUp" + ], + "ctrl-d": [ + "vim::Scroll", + "HalfPageDown" + ], + "ctrl-u": [ + "vim::Scroll", + "HalfPageUp" + ], + "ctrl-e": [ + "vim::Scroll", + "LineDown" + ], + "ctrl-y": [ + "vim::Scroll", + "LineUp" ] } }, @@ -188,6 +224,18 @@ "y": "vim::CurrentLine" } }, + { + "context": "Editor && vim_operator == z", + "bindings": { + "t": "editor::ScrollCursorTop", + "z": "editor::ScrollCursorCenter", + "b": "editor::ScrollCursorBottom", + "escape": [ + "vim::SwitchMode", + "Normal" + ] + } + }, { "context": "Editor && VimObject", "bindings": { diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index f1c612a58d..9122706ad3 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -5,8 +5,9 @@ use collections::{BTreeMap, HashSet}; use editor::{ diagnostic_block_renderer, display_map::{BlockDisposition, BlockId, BlockProperties, BlockStyle, RenderBlock}, - highlight_diagnostic_message, Autoscroll, Editor, ExcerptId, ExcerptRange, MultiBuffer, - ToOffset, + highlight_diagnostic_message, + scroll::autoscroll::Autoscroll, + Editor, ExcerptId, ExcerptRange, MultiBuffer, ToOffset, }; use gpui::{ actions, elements::*, fonts::TextStyle, impl_internal_actions, serde_json, AnyViewHandle, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 63db71edae..1aee1e246d 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -10,6 +10,7 @@ mod mouse_context_menu; pub mod movement; mod multi_buffer; mod persistence; +pub mod scroll; pub mod selections_collection; #[cfg(test)] @@ -33,13 +34,13 @@ use gpui::{ elements::*, executor, fonts::{self, HighlightStyle, TextStyle}, - geometry::vector::{vec2f, Vector2F}, + geometry::vector::Vector2F, impl_actions, impl_internal_actions, platform::CursorStyle, serde_json::json, - text_layout, AnyViewHandle, AppContext, AsyncAppContext, Axis, ClipboardItem, Element, - ElementBox, Entity, ModelHandle, MouseButton, MutableAppContext, RenderContext, Subscription, - Task, View, ViewContext, ViewHandle, WeakViewHandle, + AnyViewHandle, AppContext, AsyncAppContext, ClipboardItem, Element, ElementBox, Entity, + ModelHandle, MouseButton, MutableAppContext, RenderContext, Subscription, Task, View, + ViewContext, ViewHandle, WeakViewHandle, }; use highlight_matching_bracket::refresh_matching_bracket_highlights; use hover_popover::{hide_hover, HoverState}; @@ -61,11 +62,13 @@ pub use multi_buffer::{ use multi_buffer::{MultiBufferChunks, ToOffsetUtf16}; use ordered_float::OrderedFloat; use project::{FormatTrigger, LocationLink, Project, ProjectPath, ProjectTransaction}; +use scroll::{ + autoscroll::Autoscroll, OngoingScroll, ScrollAnchor, ScrollManager, ScrollbarAutoHide, +}; use selections_collection::{resolve_multiple, MutableSelectionsCollection, SelectionsCollection}; use serde::{Deserialize, Serialize}; use settings::Settings; use smallvec::SmallVec; -use smol::Timer; use snippet::Snippet; use std::{ any::TypeId, @@ -86,11 +89,9 @@ use workspace::{ItemNavHistory, Workspace, WorkspaceId}; use crate::git::diff_hunk_to_display; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); -const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); const MAX_LINE_LEN: usize = 1024; const MIN_NAVIGATION_HISTORY_ROW_DELTA: i64 = 10; const MAX_SELECTION_HISTORY_LEN: usize = 1024; -pub const SCROLL_EVENT_SEPARATION: Duration = Duration::from_millis(28); pub const FORMAT_TIMEOUT: Duration = Duration::from_secs(2); @@ -100,12 +101,6 @@ pub struct SelectNext { pub replace_newest: bool, } -#[derive(Clone, PartialEq)] -pub struct Scroll { - pub scroll_position: Vector2F, - pub axis: Option, -} - #[derive(Clone, PartialEq)] pub struct Select(pub SelectPhase); @@ -258,7 +253,7 @@ impl_actions!( ] ); -impl_internal_actions!(editor, [Scroll, Select, Jump]); +impl_internal_actions!(editor, [Select, Jump]); enum DocumentHighlightRead {} enum DocumentHighlightWrite {} @@ -270,12 +265,8 @@ pub enum Direction { Next, } -#[derive(Default)] -struct ScrollbarAutoHide(bool); - pub fn init(cx: &mut MutableAppContext) { cx.add_action(Editor::new_file); - cx.add_action(Editor::scroll); cx.add_action(Editor::select); cx.add_action(Editor::cancel); cx.add_action(Editor::newline); @@ -305,12 +296,9 @@ pub fn init(cx: &mut MutableAppContext) { cx.add_action(Editor::redo); cx.add_action(Editor::move_up); cx.add_action(Editor::move_page_up); - cx.add_action(Editor::page_up); cx.add_action(Editor::move_down); cx.add_action(Editor::move_page_down); - cx.add_action(Editor::page_down); cx.add_action(Editor::next_screen); - cx.add_action(Editor::move_left); cx.add_action(Editor::move_right); cx.add_action(Editor::move_to_previous_word_start); @@ -370,6 +358,7 @@ pub fn init(cx: &mut MutableAppContext) { hover_popover::init(cx); link_go_to_definition::init(cx); mouse_context_menu::init(cx); + scroll::actions::init(cx); workspace::register_project_item::(cx); workspace::register_followable_item::(cx); @@ -411,46 +400,6 @@ pub enum SelectMode { All, } -#[derive(PartialEq, Eq)] -pub enum Autoscroll { - Next, - Strategy(AutoscrollStrategy), -} - -impl Autoscroll { - pub fn fit() -> Self { - Self::Strategy(AutoscrollStrategy::Fit) - } - - pub fn newest() -> Self { - Self::Strategy(AutoscrollStrategy::Newest) - } - - pub fn center() -> Self { - Self::Strategy(AutoscrollStrategy::Center) - } -} - -#[derive(PartialEq, Eq, Default)] -pub enum AutoscrollStrategy { - Fit, - Newest, - #[default] - Center, - Top, - Bottom, -} - -impl AutoscrollStrategy { - fn next(&self) -> Self { - match self { - AutoscrollStrategy::Center => AutoscrollStrategy::Top, - AutoscrollStrategy::Top => AutoscrollStrategy::Bottom, - _ => AutoscrollStrategy::Center, - } - } -} - #[derive(Copy, Clone, PartialEq, Eq)] pub enum EditorMode { SingleLine, @@ -477,74 +426,12 @@ type CompletionId = usize; type GetFieldEditorTheme = dyn Fn(&theme::Theme) -> theme::FieldEditor; type OverrideTextStyle = dyn Fn(&EditorStyle) -> Option; -#[derive(Clone, Copy)] -pub struct OngoingScroll { - last_timestamp: Instant, - axis: Option, -} - -impl OngoingScroll { - fn initial() -> OngoingScroll { - OngoingScroll { - last_timestamp: Instant::now() - SCROLL_EVENT_SEPARATION, - axis: None, - } - } - - fn update(&mut self, axis: Option) { - self.last_timestamp = Instant::now(); - self.axis = axis; - } - - pub fn filter(&self, delta: &mut Vector2F) -> Option { - const UNLOCK_PERCENT: f32 = 1.9; - const UNLOCK_LOWER_BOUND: f32 = 6.; - let mut axis = self.axis; - - let x = delta.x().abs(); - let y = delta.y().abs(); - let duration = Instant::now().duration_since(self.last_timestamp); - if duration > SCROLL_EVENT_SEPARATION { - //New ongoing scroll will start, determine axis - axis = if x <= y { - Some(Axis::Vertical) - } else { - Some(Axis::Horizontal) - }; - } else if x.max(y) >= UNLOCK_LOWER_BOUND { - //Check if the current ongoing will need to unlock - match axis { - Some(Axis::Vertical) => { - if x > y && x >= y * UNLOCK_PERCENT { - axis = None; - } - } - - Some(Axis::Horizontal) => { - if y > x && y >= x * UNLOCK_PERCENT { - axis = None; - } - } - - None => {} - } - } - - match axis { - Some(Axis::Vertical) => *delta = vec2f(0., delta.y()), - Some(Axis::Horizontal) => *delta = vec2f(delta.x(), 0.), - None => {} - } - - axis - } -} - pub struct Editor { handle: WeakViewHandle, buffer: ModelHandle, display_map: ModelHandle, pub selections: SelectionsCollection, + pub scroll_manager: ScrollManager, columnar_selection_tail: Option, add_selections_state: Option, select_next_state: Option, @@ -554,10 +441,6 @@ pub struct Editor { select_larger_syntax_node_stack: Vec]>>, ime_transaction: Option, active_diagnostics: Option, - ongoing_scroll: OngoingScroll, - scroll_position: Vector2F, - scroll_top_anchor: Anchor, - autoscroll_request: Option<(Autoscroll, bool)>, soft_wrap_mode_override: Option, get_field_editor_theme: Option>, override_text_style: Option>, @@ -565,10 +448,7 @@ pub struct Editor { focused: bool, blink_manager: ModelHandle, show_local_selections: bool, - show_scrollbars: bool, - hide_scrollbar_task: Option>, mode: EditorMode, - vertical_scroll_margin: f32, placeholder_text: Option>, highlighted_rows: Option>, #[allow(clippy::type_complexity)] @@ -590,8 +470,6 @@ pub struct Editor { leader_replica_id: Option, hover_state: HoverState, link_go_to_definition_state: LinkGoToDefinitionState, - visible_line_count: Option, - last_autoscroll: Option<(Vector2F, f32, f32, AutoscrollStrategy)>, _subscriptions: Vec, } @@ -600,9 +478,8 @@ pub struct EditorSnapshot { pub display_snapshot: DisplaySnapshot, pub placeholder_text: Option>, is_focused: bool, + scroll_anchor: ScrollAnchor, ongoing_scroll: OngoingScroll, - scroll_position: Vector2F, - scroll_top_anchor: Anchor, } #[derive(Clone, Debug)] @@ -1090,12 +967,9 @@ pub struct ClipboardSelection { #[derive(Debug)] pub struct NavigationData { - // Matching offsets for anchor and scroll_top_anchor allows us to recreate the anchor if the buffer - // has since been closed cursor_anchor: Anchor, cursor_position: Point, - scroll_position: Vector2F, - scroll_top_anchor: Anchor, + scroll_anchor: ScrollAnchor, scroll_top_row: u32, } @@ -1163,9 +1037,8 @@ impl Editor { display_map.set_state(&snapshot, cx); }); }); - clone.selections.set_state(&self.selections); - clone.scroll_position = self.scroll_position; - clone.scroll_top_anchor = self.scroll_top_anchor; + clone.selections.clone_state(&self.selections); + clone.scroll_manager.clone_state(&self.scroll_manager); clone.searchable = self.searchable; clone } @@ -1200,6 +1073,7 @@ impl Editor { buffer: buffer.clone(), display_map: display_map.clone(), selections, + scroll_manager: ScrollManager::new(), columnar_selection_tail: None, add_selections_state: None, select_next_state: None, @@ -1212,17 +1086,10 @@ impl Editor { soft_wrap_mode_override: None, get_field_editor_theme, project, - ongoing_scroll: OngoingScroll::initial(), - scroll_position: Vector2F::zero(), - scroll_top_anchor: Anchor::min(), - autoscroll_request: None, focused: false, blink_manager: blink_manager.clone(), show_local_selections: true, - show_scrollbars: true, - hide_scrollbar_task: None, mode, - vertical_scroll_margin: 3.0, placeholder_text: None, highlighted_rows: None, background_highlights: Default::default(), @@ -1244,8 +1111,6 @@ impl Editor { leader_replica_id: None, hover_state: Default::default(), link_go_to_definition_state: Default::default(), - visible_line_count: None, - last_autoscroll: None, _subscriptions: vec![ cx.observe(&buffer, Self::on_buffer_changed), cx.subscribe(&buffer, Self::on_buffer_event), @@ -1254,7 +1119,7 @@ impl Editor { ], }; this.end_selection(cx); - this.make_scrollbar_visible(cx); + this.scroll_manager.show_scrollbar(cx); let editor_created_event = EditorCreated(cx.handle()); cx.emit_global(editor_created_event); @@ -1307,9 +1172,8 @@ impl Editor { EditorSnapshot { mode: self.mode, display_snapshot: self.display_map.update(cx, |map, cx| map.snapshot(cx)), - ongoing_scroll: self.ongoing_scroll, - scroll_position: self.scroll_position, - scroll_top_anchor: self.scroll_top_anchor, + scroll_anchor: self.scroll_manager.anchor(), + ongoing_scroll: self.scroll_manager.ongoing_scroll(), placeholder_text: self.placeholder_text.clone(), is_focused: self .handle @@ -1348,64 +1212,6 @@ impl Editor { cx.notify(); } - pub fn set_vertical_scroll_margin(&mut self, margin_rows: usize, cx: &mut ViewContext) { - self.vertical_scroll_margin = margin_rows as f32; - cx.notify(); - } - - pub fn set_scroll_position(&mut self, scroll_position: Vector2F, cx: &mut ViewContext) { - self.set_scroll_position_internal(scroll_position, true, cx); - } - - fn set_scroll_position_internal( - &mut self, - scroll_position: Vector2F, - local: bool, - cx: &mut ViewContext, - ) { - let map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - - if scroll_position.y() <= 0. { - self.scroll_top_anchor = Anchor::min(); - self.scroll_position = scroll_position.max(vec2f(0., 0.)); - } else { - let scroll_top_buffer_offset = - DisplayPoint::new(scroll_position.y() as u32, 0).to_offset(&map, Bias::Right); - let anchor = map - .buffer_snapshot - .anchor_at(scroll_top_buffer_offset, Bias::Right); - self.scroll_position = vec2f( - scroll_position.x(), - scroll_position.y() - anchor.to_display_point(&map).row() as f32, - ); - self.scroll_top_anchor = anchor; - } - - self.make_scrollbar_visible(cx); - self.autoscroll_request.take(); - hide_hover(self, cx); - - cx.emit(Event::ScrollPositionChanged { local }); - cx.notify(); - } - - fn set_visible_line_count(&mut self, lines: f32) { - self.visible_line_count = Some(lines) - } - - fn set_scroll_top_anchor( - &mut self, - anchor: Anchor, - position: Vector2F, - cx: &mut ViewContext, - ) { - self.scroll_top_anchor = anchor; - self.scroll_position = position; - self.make_scrollbar_visible(cx); - cx.emit(Event::ScrollPositionChanged { local: false }); - cx.notify(); - } - pub fn set_cursor_shape(&mut self, cursor_shape: CursorShape, cx: &mut ViewContext) { self.cursor_shape = cursor_shape; cx.notify(); @@ -1431,199 +1237,6 @@ impl Editor { self.input_enabled = input_enabled; } - pub fn scroll_position(&self, cx: &mut ViewContext) -> Vector2F { - let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - compute_scroll_position(&display_map, self.scroll_position, &self.scroll_top_anchor) - } - - pub fn clamp_scroll_left(&mut self, max: f32) -> bool { - if max < self.scroll_position.x() { - self.scroll_position.set_x(max); - true - } else { - false - } - } - - pub fn autoscroll_vertically( - &mut self, - viewport_height: f32, - line_height: f32, - cx: &mut ViewContext, - ) -> bool { - let visible_lines = viewport_height / line_height; - let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - let mut scroll_position = - compute_scroll_position(&display_map, self.scroll_position, &self.scroll_top_anchor); - let max_scroll_top = if matches!(self.mode, EditorMode::AutoHeight { .. }) { - (display_map.max_point().row() as f32 - visible_lines + 1.).max(0.) - } else { - display_map.max_point().row() as f32 - }; - if scroll_position.y() > max_scroll_top { - scroll_position.set_y(max_scroll_top); - self.set_scroll_position(scroll_position, cx); - } - - let (autoscroll, local) = if let Some(autoscroll) = self.autoscroll_request.take() { - autoscroll - } else { - return false; - }; - - let first_cursor_top; - let last_cursor_bottom; - if let Some(highlighted_rows) = &self.highlighted_rows { - first_cursor_top = highlighted_rows.start as f32; - last_cursor_bottom = first_cursor_top + 1.; - } else if autoscroll == Autoscroll::newest() { - let newest_selection = self.selections.newest::(cx); - first_cursor_top = newest_selection.head().to_display_point(&display_map).row() as f32; - last_cursor_bottom = first_cursor_top + 1.; - } else { - let selections = self.selections.all::(cx); - first_cursor_top = selections - .first() - .unwrap() - .head() - .to_display_point(&display_map) - .row() as f32; - last_cursor_bottom = selections - .last() - .unwrap() - .head() - .to_display_point(&display_map) - .row() as f32 - + 1.0; - } - - let margin = if matches!(self.mode, EditorMode::AutoHeight { .. }) { - 0. - } else { - ((visible_lines - (last_cursor_bottom - first_cursor_top)) / 2.0).floor() - }; - if margin < 0.0 { - return false; - } - - let strategy = match autoscroll { - Autoscroll::Strategy(strategy) => strategy, - Autoscroll::Next => { - let last_autoscroll = &self.last_autoscroll; - if let Some(last_autoscroll) = last_autoscroll { - if self.scroll_position == last_autoscroll.0 - && first_cursor_top == last_autoscroll.1 - && last_cursor_bottom == last_autoscroll.2 - { - last_autoscroll.3.next() - } else { - AutoscrollStrategy::default() - } - } else { - AutoscrollStrategy::default() - } - } - }; - - match strategy { - AutoscrollStrategy::Fit | AutoscrollStrategy::Newest => { - let margin = margin.min(self.vertical_scroll_margin); - let target_top = (first_cursor_top - margin).max(0.0); - let target_bottom = last_cursor_bottom + margin; - let start_row = scroll_position.y(); - let end_row = start_row + visible_lines; - - if target_top < start_row { - scroll_position.set_y(target_top); - self.set_scroll_position_internal(scroll_position, local, cx); - } else if target_bottom >= end_row { - scroll_position.set_y(target_bottom - visible_lines); - self.set_scroll_position_internal(scroll_position, local, cx); - } - } - AutoscrollStrategy::Center => { - scroll_position.set_y((first_cursor_top - margin).max(0.0)); - self.set_scroll_position_internal(scroll_position, local, cx); - } - AutoscrollStrategy::Top => { - scroll_position.set_y((first_cursor_top).max(0.0)); - self.set_scroll_position_internal(scroll_position, local, cx); - } - AutoscrollStrategy::Bottom => { - scroll_position.set_y((last_cursor_bottom - visible_lines).max(0.0)); - self.set_scroll_position_internal(scroll_position, local, cx); - } - } - - self.last_autoscroll = Some(( - self.scroll_position, - first_cursor_top, - last_cursor_bottom, - strategy, - )); - - true - } - - pub fn autoscroll_horizontally( - &mut self, - start_row: u32, - viewport_width: f32, - scroll_width: f32, - max_glyph_width: f32, - layouts: &[text_layout::Line], - cx: &mut ViewContext, - ) -> bool { - let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - let selections = self.selections.all::(cx); - - let mut target_left; - let mut target_right; - - if self.highlighted_rows.is_some() { - target_left = 0.0_f32; - target_right = 0.0_f32; - } else { - target_left = std::f32::INFINITY; - target_right = 0.0_f32; - for selection in selections { - let head = selection.head().to_display_point(&display_map); - if head.row() >= start_row && head.row() < start_row + layouts.len() as u32 { - let start_column = head.column().saturating_sub(3); - let end_column = cmp::min(display_map.line_len(head.row()), head.column() + 3); - target_left = target_left.min( - layouts[(head.row() - start_row) as usize] - .x_for_index(start_column as usize), - ); - target_right = target_right.max( - layouts[(head.row() - start_row) as usize].x_for_index(end_column as usize) - + max_glyph_width, - ); - } - } - } - - target_right = target_right.min(scroll_width); - - if target_right - target_left > viewport_width { - return false; - } - - let scroll_left = self.scroll_position.x() * max_glyph_width; - let scroll_right = scroll_left + viewport_width; - - if target_left < scroll_left { - self.scroll_position.set_x(target_left / max_glyph_width); - true - } else if target_right > scroll_right { - self.scroll_position - .set_x((target_right - viewport_width) / max_glyph_width); - true - } else { - false - } - } - fn selections_did_change( &mut self, local: bool, @@ -1746,11 +1359,6 @@ impl Editor { }); } - fn scroll(&mut self, action: &Scroll, cx: &mut ViewContext) { - self.ongoing_scroll.update(action.axis); - self.set_scroll_position(action.scroll_position, cx); - } - fn select(&mut self, Select(phase): &Select, cx: &mut ViewContext) { self.hide_context_menu(cx); @@ -4073,23 +3681,6 @@ impl Editor { }) } - pub fn next_screen(&mut self, _: &NextScreen, cx: &mut ViewContext) { - if self.take_rename(true, cx).is_some() { - return; - } - - if let Some(_) = self.context_menu.as_mut() { - return; - } - - if matches!(self.mode, EditorMode::SingleLine) { - cx.propagate_action(); - return; - } - - self.request_autoscroll(Autoscroll::Next, cx); - } - pub fn move_up(&mut self, _: &MoveUp, cx: &mut ViewContext) { if self.take_rename(true, cx).is_some() { return; @@ -4118,26 +3709,18 @@ impl Editor { }) } - pub fn move_page_up(&mut self, action: &MovePageUp, cx: &mut ViewContext) { - if self.take_rename(true, cx).is_some() { - return; - } - - if let Some(context_menu) = self.context_menu.as_mut() { - if context_menu.select_first(cx) { - return; - } + pub fn move_page_up(&mut self, action: &MovePageUp, cx: &mut ViewContext) -> Option<()> { + self.take_rename(true, cx)?; + if self.context_menu.as_mut()?.select_first(cx) { + return None; } if matches!(self.mode, EditorMode::SingleLine) { cx.propagate_action(); - return; + return None; } - let row_count = match self.visible_line_count { - Some(row_count) => row_count as u32 - 1, - None => return, - }; + let row_count = self.visible_line_count()? as u32 - 1; let autoscroll = if action.center_cursor { Autoscroll::center() @@ -4156,32 +3739,8 @@ impl Editor { selection.collapse_to(cursor, goal); }); }); - } - pub fn page_up(&mut self, _: &PageUp, cx: &mut ViewContext) { - if self.take_rename(true, cx).is_some() { - return; - } - - if let Some(context_menu) = self.context_menu.as_mut() { - if context_menu.select_first(cx) { - return; - } - } - - if matches!(self.mode, EditorMode::SingleLine) { - cx.propagate_action(); - return; - } - - let lines = match self.visible_line_count { - Some(lines) => lines, - None => return, - }; - - let cur_position = self.scroll_position(cx); - let new_pos = cur_position - vec2f(0., lines + 1.); - self.set_scroll_position(new_pos, cx); + Some(()) } pub fn select_up(&mut self, _: &SelectUp, cx: &mut ViewContext) { @@ -4216,26 +3775,25 @@ impl Editor { }); } - pub fn move_page_down(&mut self, action: &MovePageDown, cx: &mut ViewContext) { + pub fn move_page_down( + &mut self, + action: &MovePageDown, + cx: &mut ViewContext, + ) -> Option<()> { if self.take_rename(true, cx).is_some() { - return; + return None; } - if let Some(context_menu) = self.context_menu.as_mut() { - if context_menu.select_last(cx) { - return; - } + if self.context_menu.as_mut()?.select_last(cx) { + return None; } if matches!(self.mode, EditorMode::SingleLine) { cx.propagate_action(); - return; + return None; } - let row_count = match self.visible_line_count { - Some(row_count) => row_count as u32 - 1, - None => return, - }; + let row_count = self.visible_line_count()? as u32 - 1; let autoscroll = if action.center_cursor { Autoscroll::center() @@ -4254,32 +3812,8 @@ impl Editor { selection.collapse_to(cursor, goal); }); }); - } - pub fn page_down(&mut self, _: &PageDown, cx: &mut ViewContext) { - if self.take_rename(true, cx).is_some() { - return; - } - - if let Some(context_menu) = self.context_menu.as_mut() { - if context_menu.select_last(cx) { - return; - } - } - - if matches!(self.mode, EditorMode::SingleLine) { - cx.propagate_action(); - return; - } - - let lines = match self.visible_line_count { - Some(lines) => lines, - None => return, - }; - - let cur_position = self.scroll_position(cx); - let new_pos = cur_position + vec2f(0., lines - 1.); - self.set_scroll_position(new_pos, cx); + Some(()) } pub fn select_down(&mut self, _: &SelectDown, cx: &mut ViewContext) { @@ -4602,18 +4136,19 @@ impl Editor { fn push_to_nav_history( &self, - position: Anchor, + cursor_anchor: Anchor, new_position: Option, cx: &mut ViewContext, ) { if let Some(nav_history) = &self.nav_history { let buffer = self.buffer.read(cx).read(cx); - let point = position.to_point(&buffer); - let scroll_top_row = self.scroll_top_anchor.to_point(&buffer).row; + let cursor_position = cursor_anchor.to_point(&buffer); + let scroll_state = self.scroll_manager.anchor(); + let scroll_top_row = scroll_state.top_row(&buffer); drop(buffer); if let Some(new_position) = new_position { - let row_delta = (new_position.row as i64 - point.row as i64).abs(); + let row_delta = (new_position.row as i64 - cursor_position.row as i64).abs(); if row_delta < MIN_NAVIGATION_HISTORY_ROW_DELTA { return; } @@ -4621,10 +4156,9 @@ impl Editor { nav_history.push( Some(NavigationData { - cursor_anchor: position, - cursor_position: point, - scroll_position: self.scroll_position, - scroll_top_anchor: self.scroll_top_anchor, + cursor_anchor, + cursor_position, + scroll_anchor: scroll_state, scroll_top_row, }), cx, @@ -5922,16 +5456,6 @@ impl Editor { }); } - pub fn request_autoscroll(&mut self, autoscroll: Autoscroll, cx: &mut ViewContext) { - self.autoscroll_request = Some((autoscroll, true)); - cx.notify(); - } - - fn request_autoscroll_remotely(&mut self, autoscroll: Autoscroll, cx: &mut ViewContext) { - self.autoscroll_request = Some((autoscroll, false)); - cx.notify(); - } - pub fn transact( &mut self, cx: &mut ViewContext, @@ -6340,31 +5864,6 @@ impl Editor { self.blink_manager.read(cx).visible() && self.focused } - pub fn show_scrollbars(&self) -> bool { - self.show_scrollbars - } - - fn make_scrollbar_visible(&mut self, cx: &mut ViewContext) { - if !self.show_scrollbars { - self.show_scrollbars = true; - cx.notify(); - } - - if cx.default_global::().0 { - self.hide_scrollbar_task = Some(cx.spawn_weak(|this, mut cx| async move { - Timer::after(SCROLLBAR_SHOW_INTERVAL).await; - if let Some(this) = this.upgrade(&cx) { - this.update(&mut cx, |this, cx| { - this.show_scrollbars = false; - cx.notify(); - }); - } - })); - } else { - self.hide_scrollbar_task = None; - } - } - fn on_buffer_changed(&mut self, _: ModelHandle, cx: &mut ViewContext) { cx.notify(); } @@ -6561,11 +6060,7 @@ impl EditorSnapshot { } pub fn scroll_position(&self) -> Vector2F { - compute_scroll_position( - &self.display_snapshot, - self.scroll_position, - &self.scroll_top_anchor, - ) + self.scroll_anchor.scroll_position(&self.display_snapshot) } } @@ -6577,20 +6072,6 @@ impl Deref for EditorSnapshot { } } -fn compute_scroll_position( - snapshot: &DisplaySnapshot, - mut scroll_position: Vector2F, - scroll_top_anchor: &Anchor, -) -> Vector2F { - if *scroll_top_anchor != Anchor::min() { - let scroll_top = scroll_top_anchor.to_display_point(snapshot).row() as f32; - scroll_position.set_y(scroll_top + scroll_position.y()); - } else { - scroll_position.set_y(0.); - } - scroll_position -} - #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Event { BufferEdited, @@ -6603,7 +6084,6 @@ pub enum Event { SelectionsChanged { local: bool }, ScrollPositionChanged { local: bool }, Closed, - IgnoredInput, } pub struct EditorFocused(pub ViewHandle); @@ -6789,7 +6269,6 @@ impl View for Editor { cx: &mut ViewContext, ) { if !self.input_enabled { - cx.emit(Event::IgnoredInput); return; } @@ -6826,7 +6305,6 @@ impl View for Editor { cx: &mut ViewContext, ) { if !self.input_enabled { - cx.emit(Event::IgnoredInput); return; } diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index ca66ae7dc9..9a6cd23453 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -12,7 +12,7 @@ use crate::test::{ }; use gpui::{ executor::Deterministic, - geometry::rect::RectF, + geometry::{rect::RectF, vector::vec2f}, platform::{WindowBounds, WindowOptions}, }; use language::{FakeLspAdapter, LanguageConfig, LanguageRegistry, Point}; @@ -544,31 +544,30 @@ fn test_navigation_history(cx: &mut gpui::MutableAppContext) { // Set scroll position to check later editor.set_scroll_position(Vector2F::new(5.5, 5.5), cx); - let original_scroll_position = editor.scroll_position; - let original_scroll_top_anchor = editor.scroll_top_anchor; + let original_scroll_position = editor.scroll_manager.anchor(); // Jump to the end of the document and adjust scroll editor.move_to_end(&MoveToEnd, cx); editor.set_scroll_position(Vector2F::new(-2.5, -0.5), cx); - assert_ne!(editor.scroll_position, original_scroll_position); - assert_ne!(editor.scroll_top_anchor, original_scroll_top_anchor); + assert_ne!(editor.scroll_manager.anchor(), original_scroll_position); let nav_entry = pop_history(&mut editor, cx).unwrap(); editor.navigate(nav_entry.data.unwrap(), cx); - assert_eq!(editor.scroll_position, original_scroll_position); - assert_eq!(editor.scroll_top_anchor, original_scroll_top_anchor); + assert_eq!(editor.scroll_manager.anchor(), original_scroll_position); // Ensure we don't panic when navigation data contains invalid anchors *and* points. - let mut invalid_anchor = editor.scroll_top_anchor; + let mut invalid_anchor = editor.scroll_manager.anchor().top_anchor; invalid_anchor.text_anchor.buffer_id = Some(999); let invalid_point = Point::new(9999, 0); editor.navigate( Box::new(NavigationData { cursor_anchor: invalid_anchor, cursor_position: invalid_point, - scroll_top_anchor: invalid_anchor, + scroll_anchor: ScrollAnchor { + top_anchor: invalid_anchor, + offset: Default::default(), + }, scroll_top_row: invalid_point.row, - scroll_position: Default::default(), }), cx, ); @@ -5034,7 +5033,7 @@ fn test_following(cx: &mut gpui::MutableAppContext) { .apply_update_proto(pending_update.borrow_mut().take().unwrap(), cx) .unwrap(); assert_eq!(follower.scroll_position(cx), initial_scroll_position); - assert!(follower.autoscroll_request.is_some()); + assert!(follower.scroll_manager.has_autoscroll_request()); }); assert_eq!(follower.read(cx).selections.ranges(cx), vec![0..0]); diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 8409786637..7d69d3833c 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -1,7 +1,7 @@ use super::{ display_map::{BlockContext, ToDisplayPoint}, - Anchor, DisplayPoint, Editor, EditorMode, EditorSnapshot, Scroll, Select, SelectPhase, - SoftWrap, ToPoint, MAX_LINE_LEN, + Anchor, DisplayPoint, Editor, EditorMode, EditorSnapshot, Select, SelectPhase, SoftWrap, + ToPoint, MAX_LINE_LEN, }; use crate::{ display_map::{BlockStyle, DisplaySnapshot, TransformBlock}, @@ -13,6 +13,7 @@ use crate::{ GoToFetchedDefinition, GoToFetchedTypeDefinition, UpdateGoToDefinitionLink, }, mouse_context_menu::DeployMouseContextMenu, + scroll::actions::Scroll, EditorStyle, }; use clock::ReplicaId; @@ -955,7 +956,7 @@ impl EditorElement { move |_, cx| { if let Some(view) = view.upgrade(cx.deref_mut()) { view.update(cx.deref_mut(), |view, cx| { - view.make_scrollbar_visible(cx); + view.scroll_manager.show_scrollbar(cx); }); } } @@ -977,7 +978,7 @@ impl EditorElement { position.set_y(top_row as f32); view.set_scroll_position(position, cx); } else { - view.make_scrollbar_visible(cx); + view.scroll_manager.show_scrollbar(cx); } }); } @@ -1298,7 +1299,7 @@ impl EditorElement { }; let tooltip_style = cx.global::().theme.tooltip.clone(); - let scroll_x = snapshot.scroll_position.x(); + let scroll_x = snapshot.scroll_anchor.offset.x(); let (fixed_blocks, non_fixed_blocks) = snapshot .blocks_in_range(rows.clone()) .partition::, _>(|(_, block)| match block { @@ -1670,7 +1671,7 @@ impl Element for EditorElement { )); } - show_scrollbars = view.show_scrollbars(); + show_scrollbars = view.scroll_manager.scrollbars_visible(); include_root = view .project .as_ref() @@ -1725,7 +1726,7 @@ impl Element for EditorElement { ); self.update_view(cx.app, |view, cx| { - let clamped = view.clamp_scroll_left(scroll_max.x()); + let clamped = view.scroll_manager.clamp_scroll_left(scroll_max.x()); let autoscrolled = if autoscroll_horizontally { view.autoscroll_horizontally( diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index afe659af61..4779fe73b8 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -26,8 +26,9 @@ use workspace::{ use crate::{ display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, persistence::DB, Anchor, Autoscroll, Editor, Event, ExcerptId, - MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, + movement::surrounding_word, persistence::DB, scroll::ScrollAnchor, Anchor, Autoscroll, Editor, + Event, ExcerptId, MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, + FORMAT_TIMEOUT, }; pub const MAX_TAB_TITLE_LEN: usize = 24; @@ -87,14 +88,16 @@ impl FollowableItem for Editor { } if let Some(anchor) = state.scroll_top_anchor { - editor.set_scroll_top_anchor( - Anchor { - buffer_id: Some(state.buffer_id as usize), - excerpt_id, - text_anchor: language::proto::deserialize_anchor(anchor) - .ok_or_else(|| anyhow!("invalid scroll top"))?, + editor.set_scroll_anchor( + ScrollAnchor { + top_anchor: Anchor { + buffer_id: Some(state.buffer_id as usize), + excerpt_id, + text_anchor: language::proto::deserialize_anchor(anchor) + .ok_or_else(|| anyhow!("invalid scroll top"))?, + }, + offset: vec2f(state.scroll_x, state.scroll_y), }, - vec2f(state.scroll_x, state.scroll_y), cx, ); } @@ -132,13 +135,14 @@ impl FollowableItem for Editor { fn to_state_proto(&self, cx: &AppContext) -> Option { let buffer_id = self.buffer.read(cx).as_singleton()?.read(cx).remote_id(); + let scroll_anchor = self.scroll_manager.anchor(); Some(proto::view::Variant::Editor(proto::view::Editor { buffer_id, scroll_top_anchor: Some(language::proto::serialize_anchor( - &self.scroll_top_anchor.text_anchor, + &scroll_anchor.top_anchor.text_anchor, )), - scroll_x: self.scroll_position.x(), - scroll_y: self.scroll_position.y(), + scroll_x: scroll_anchor.offset.x(), + scroll_y: scroll_anchor.offset.y(), selections: self .selections .disjoint_anchors() @@ -160,11 +164,12 @@ impl FollowableItem for Editor { match update { proto::update_view::Variant::Editor(update) => match event { Event::ScrollPositionChanged { .. } => { + let scroll_anchor = self.scroll_manager.anchor(); update.scroll_top_anchor = Some(language::proto::serialize_anchor( - &self.scroll_top_anchor.text_anchor, + &scroll_anchor.top_anchor.text_anchor, )); - update.scroll_x = self.scroll_position.x(); - update.scroll_y = self.scroll_position.y(); + update.scroll_x = scroll_anchor.offset.x(); + update.scroll_y = scroll_anchor.offset.y(); true } Event::SelectionsChanged { .. } => { @@ -207,14 +212,16 @@ impl FollowableItem for Editor { self.set_selections_from_remote(selections, cx); self.request_autoscroll_remotely(Autoscroll::newest(), cx); } else if let Some(anchor) = message.scroll_top_anchor { - self.set_scroll_top_anchor( - Anchor { - buffer_id: Some(buffer_id), - excerpt_id, - text_anchor: language::proto::deserialize_anchor(anchor) - .ok_or_else(|| anyhow!("invalid scroll top"))?, + self.set_scroll_anchor( + ScrollAnchor { + top_anchor: Anchor { + buffer_id: Some(buffer_id), + excerpt_id, + text_anchor: language::proto::deserialize_anchor(anchor) + .ok_or_else(|| anyhow!("invalid scroll top"))?, + }, + offset: vec2f(message.scroll_x, message.scroll_y), }, - vec2f(message.scroll_x, message.scroll_y), cx, ); } @@ -279,13 +286,12 @@ impl Item for Editor { buffer.clip_point(data.cursor_position, Bias::Left) }; - let scroll_top_anchor = if buffer.can_resolve(&data.scroll_top_anchor) { - data.scroll_top_anchor - } else { - buffer.anchor_before( + let mut scroll_anchor = data.scroll_anchor; + if !buffer.can_resolve(&scroll_anchor.top_anchor) { + scroll_anchor.top_anchor = buffer.anchor_before( buffer.clip_point(Point::new(data.scroll_top_row, 0), Bias::Left), - ) - }; + ); + } drop(buffer); @@ -293,8 +299,7 @@ impl Item for Editor { false } else { let nav_history = self.nav_history.take(); - self.scroll_position = data.scroll_position; - self.scroll_top_anchor = scroll_top_anchor; + self.set_scroll_anchor(data.scroll_anchor, cx); self.change_selections(Some(Autoscroll::fit()), cx, |s| { s.select_ranges([offset..offset]) }); diff --git a/crates/editor/src/scroll.rs b/crates/editor/src/scroll.rs new file mode 100644 index 0000000000..78bc3685c1 --- /dev/null +++ b/crates/editor/src/scroll.rs @@ -0,0 +1,339 @@ +pub mod actions; +pub mod autoscroll; +pub mod scroll_amount; + +use std::{ + cmp::Ordering, + time::{Duration, Instant}, +}; + +use gpui::{ + geometry::vector::{vec2f, Vector2F}, + Axis, MutableAppContext, Task, ViewContext, +}; +use language::Bias; + +use crate::{ + display_map::{DisplaySnapshot, ToDisplayPoint}, + hover_popover::hide_hover, + Anchor, DisplayPoint, Editor, EditorMode, Event, MultiBufferSnapshot, ToPoint, +}; + +use self::{ + autoscroll::{Autoscroll, AutoscrollStrategy}, + scroll_amount::ScrollAmount, +}; + +pub const SCROLL_EVENT_SEPARATION: Duration = Duration::from_millis(28); +const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); + +#[derive(Default)] +pub struct ScrollbarAutoHide(pub bool); + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ScrollAnchor { + pub offset: Vector2F, + pub top_anchor: Anchor, +} + +impl ScrollAnchor { + fn new() -> Self { + Self { + offset: Vector2F::zero(), + top_anchor: Anchor::min(), + } + } + + pub fn scroll_position(&self, snapshot: &DisplaySnapshot) -> Vector2F { + let mut scroll_position = self.offset; + if self.top_anchor != Anchor::min() { + let scroll_top = self.top_anchor.to_display_point(snapshot).row() as f32; + scroll_position.set_y(scroll_top + scroll_position.y()); + } else { + scroll_position.set_y(0.); + } + scroll_position + } + + pub fn top_row(&self, buffer: &MultiBufferSnapshot) -> u32 { + self.top_anchor.to_point(buffer).row + } +} + +#[derive(Clone, Copy, Debug)] +pub struct OngoingScroll { + last_event: Instant, + axis: Option, +} + +impl OngoingScroll { + fn new() -> Self { + Self { + last_event: Instant::now() - SCROLL_EVENT_SEPARATION, + axis: None, + } + } + + pub fn filter(&self, delta: &mut Vector2F) -> Option { + const UNLOCK_PERCENT: f32 = 1.9; + const UNLOCK_LOWER_BOUND: f32 = 6.; + let mut axis = self.axis; + + let x = delta.x().abs(); + let y = delta.y().abs(); + let duration = Instant::now().duration_since(self.last_event); + if duration > SCROLL_EVENT_SEPARATION { + //New ongoing scroll will start, determine axis + axis = if x <= y { + Some(Axis::Vertical) + } else { + Some(Axis::Horizontal) + }; + } else if x.max(y) >= UNLOCK_LOWER_BOUND { + //Check if the current ongoing will need to unlock + match axis { + Some(Axis::Vertical) => { + if x > y && x >= y * UNLOCK_PERCENT { + axis = None; + } + } + + Some(Axis::Horizontal) => { + if y > x && y >= x * UNLOCK_PERCENT { + axis = None; + } + } + + None => {} + } + } + + match axis { + Some(Axis::Vertical) => *delta = vec2f(0., delta.y()), + Some(Axis::Horizontal) => *delta = vec2f(delta.x(), 0.), + None => {} + } + + axis + } +} + +pub struct ScrollManager { + vertical_scroll_margin: f32, + anchor: ScrollAnchor, + ongoing: OngoingScroll, + autoscroll_request: Option<(Autoscroll, bool)>, + last_autoscroll: Option<(Vector2F, f32, f32, AutoscrollStrategy)>, + show_scrollbars: bool, + hide_scrollbar_task: Option>, + visible_line_count: Option, +} + +impl ScrollManager { + pub fn new() -> Self { + ScrollManager { + vertical_scroll_margin: 3.0, + anchor: ScrollAnchor::new(), + ongoing: OngoingScroll::new(), + autoscroll_request: None, + show_scrollbars: true, + hide_scrollbar_task: None, + last_autoscroll: None, + visible_line_count: None, + } + } + + pub fn clone_state(&mut self, other: &Self) { + self.anchor = other.anchor; + self.ongoing = other.ongoing; + } + + pub fn anchor(&self) -> ScrollAnchor { + self.anchor + } + + pub fn ongoing_scroll(&self) -> OngoingScroll { + self.ongoing + } + + pub fn update_ongoing_scroll(&mut self, axis: Option) { + self.ongoing.last_event = Instant::now(); + self.ongoing.axis = axis; + } + + pub fn scroll_position(&self, snapshot: &DisplaySnapshot) -> Vector2F { + self.anchor.scroll_position(snapshot) + } + + fn set_scroll_position( + &mut self, + scroll_position: Vector2F, + map: &DisplaySnapshot, + local: bool, + cx: &mut ViewContext, + ) { + let new_anchor = if scroll_position.y() <= 0. { + ScrollAnchor { + top_anchor: Anchor::min(), + offset: scroll_position.max(vec2f(0., 0.)), + } + } else { + let scroll_top_buffer_offset = + DisplayPoint::new(scroll_position.y() as u32, 0).to_offset(&map, Bias::Right); + let top_anchor = map + .buffer_snapshot + .anchor_at(scroll_top_buffer_offset, Bias::Right); + + ScrollAnchor { + top_anchor, + offset: vec2f( + scroll_position.x(), + scroll_position.y() - top_anchor.to_display_point(&map).row() as f32, + ), + } + }; + + self.set_anchor(new_anchor, local, cx); + } + + fn set_anchor(&mut self, anchor: ScrollAnchor, local: bool, cx: &mut ViewContext) { + self.anchor = anchor; + cx.emit(Event::ScrollPositionChanged { local }); + self.show_scrollbar(cx); + self.autoscroll_request.take(); + cx.notify(); + } + + pub fn show_scrollbar(&mut self, cx: &mut ViewContext) { + if !self.show_scrollbars { + self.show_scrollbars = true; + cx.notify(); + } + + if cx.default_global::().0 { + self.hide_scrollbar_task = Some(cx.spawn_weak(|editor, mut cx| async move { + cx.background().timer(SCROLLBAR_SHOW_INTERVAL).await; + if let Some(editor) = editor.upgrade(&cx) { + editor.update(&mut cx, |editor, cx| { + editor.scroll_manager.show_scrollbars = false; + cx.notify(); + }); + } + })); + } else { + self.hide_scrollbar_task = None; + } + } + + pub fn scrollbars_visible(&self) -> bool { + self.show_scrollbars + } + + pub fn has_autoscroll_request(&self) -> bool { + self.autoscroll_request.is_some() + } + + pub fn clamp_scroll_left(&mut self, max: f32) -> bool { + if max < self.anchor.offset.x() { + self.anchor.offset.set_x(max); + true + } else { + false + } + } +} + +impl Editor { + pub fn vertical_scroll_margin(&mut self) -> usize { + self.scroll_manager.vertical_scroll_margin as usize + } + + pub fn set_vertical_scroll_margin(&mut self, margin_rows: usize, cx: &mut ViewContext) { + self.scroll_manager.vertical_scroll_margin = margin_rows as f32; + cx.notify(); + } + + pub fn visible_line_count(&self) -> Option { + self.scroll_manager.visible_line_count + } + + pub(crate) fn set_visible_line_count(&mut self, lines: f32) { + self.scroll_manager.visible_line_count = Some(lines) + } + + pub fn set_scroll_position(&mut self, scroll_position: Vector2F, cx: &mut ViewContext) { + self.set_scroll_position_internal(scroll_position, true, cx); + } + + pub(crate) fn set_scroll_position_internal( + &mut self, + scroll_position: Vector2F, + local: bool, + cx: &mut ViewContext, + ) { + let map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); + + hide_hover(self, cx); + self.scroll_manager + .set_scroll_position(scroll_position, &map, local, cx); + } + + pub fn scroll_position(&self, cx: &mut ViewContext) -> Vector2F { + let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); + self.scroll_manager.anchor.scroll_position(&display_map) + } + + pub fn set_scroll_anchor(&mut self, scroll_anchor: ScrollAnchor, cx: &mut ViewContext) { + hide_hover(self, cx); + self.scroll_manager.set_anchor(scroll_anchor, true, cx); + } + + pub fn scroll_screen(&mut self, amount: &ScrollAmount, cx: &mut ViewContext) { + if matches!(self.mode, EditorMode::SingleLine) { + cx.propagate_action(); + return; + } + + if self.take_rename(true, cx).is_some() { + return; + } + + if amount.move_context_menu_selection(self, cx) { + return; + } + + let cur_position = self.scroll_position(cx); + let new_pos = cur_position + vec2f(0., amount.lines(self) - 1.); + self.set_scroll_position(new_pos, cx); + } + + /// Returns an ordering. The newest selection is: + /// Ordering::Equal => on screen + /// Ordering::Less => above the screen + /// Ordering::Greater => below the screen + pub fn newest_selection_on_screen(&self, cx: &mut MutableAppContext) -> Ordering { + let snapshot = self.display_map.update(cx, |map, cx| map.snapshot(cx)); + let newest_head = self + .selections + .newest_anchor() + .head() + .to_display_point(&snapshot); + let screen_top = self + .scroll_manager + .anchor + .top_anchor + .to_display_point(&snapshot); + + if screen_top > newest_head { + return Ordering::Less; + } + + if let Some(visible_lines) = self.visible_line_count() { + if newest_head.row() < screen_top.row() + visible_lines as u32 { + return Ordering::Equal; + } + } + + Ordering::Greater + } +} diff --git a/crates/editor/src/scroll/actions.rs b/crates/editor/src/scroll/actions.rs new file mode 100644 index 0000000000..8e57402532 --- /dev/null +++ b/crates/editor/src/scroll/actions.rs @@ -0,0 +1,159 @@ +use gpui::{ + actions, geometry::vector::Vector2F, impl_internal_actions, Axis, MutableAppContext, + ViewContext, +}; +use language::Bias; + +use crate::{Editor, EditorMode}; + +use super::{autoscroll::Autoscroll, scroll_amount::ScrollAmount, ScrollAnchor}; + +actions!( + editor, + [ + LineDown, + LineUp, + HalfPageDown, + HalfPageUp, + PageDown, + PageUp, + NextScreen, + ScrollCursorTop, + ScrollCursorCenter, + ScrollCursorBottom, + ] +); + +#[derive(Clone, PartialEq)] +pub struct Scroll { + pub scroll_position: Vector2F, + pub axis: Option, +} + +impl_internal_actions!(editor, [Scroll]); + +pub fn init(cx: &mut MutableAppContext) { + cx.add_action(Editor::next_screen); + cx.add_action(Editor::scroll); + cx.add_action(Editor::scroll_cursor_top); + cx.add_action(Editor::scroll_cursor_center); + cx.add_action(Editor::scroll_cursor_bottom); + cx.add_action(|this: &mut Editor, _: &LineDown, cx| { + this.scroll_screen(&ScrollAmount::LineDown, cx) + }); + cx.add_action(|this: &mut Editor, _: &LineUp, cx| { + this.scroll_screen(&ScrollAmount::LineUp, cx) + }); + cx.add_action(|this: &mut Editor, _: &HalfPageDown, cx| { + this.scroll_screen(&ScrollAmount::HalfPageDown, cx) + }); + cx.add_action(|this: &mut Editor, _: &HalfPageUp, cx| { + this.scroll_screen(&ScrollAmount::HalfPageUp, cx) + }); + cx.add_action(|this: &mut Editor, _: &PageDown, cx| { + this.scroll_screen(&ScrollAmount::PageDown, cx) + }); + cx.add_action(|this: &mut Editor, _: &PageUp, cx| { + this.scroll_screen(&ScrollAmount::PageUp, cx) + }); +} + +impl Editor { + pub fn next_screen(&mut self, _: &NextScreen, cx: &mut ViewContext) -> Option<()> { + if self.take_rename(true, cx).is_some() { + return None; + } + + self.context_menu.as_mut()?; + + if matches!(self.mode, EditorMode::SingleLine) { + cx.propagate_action(); + return None; + } + + self.request_autoscroll(Autoscroll::Next, cx); + + Some(()) + } + + fn scroll(&mut self, action: &Scroll, cx: &mut ViewContext) { + self.scroll_manager.update_ongoing_scroll(action.axis); + self.set_scroll_position(action.scroll_position, cx); + } + + fn scroll_cursor_top(editor: &mut Editor, _: &ScrollCursorTop, cx: &mut ViewContext) { + let snapshot = editor.snapshot(cx).display_snapshot; + let scroll_margin_rows = editor.vertical_scroll_margin() as u32; + + let mut new_screen_top = editor.selections.newest_display(cx).head(); + *new_screen_top.row_mut() = new_screen_top.row().saturating_sub(scroll_margin_rows); + *new_screen_top.column_mut() = 0; + let new_screen_top = new_screen_top.to_offset(&snapshot, Bias::Left); + let new_anchor = snapshot.buffer_snapshot.anchor_before(new_screen_top); + + editor.set_scroll_anchor( + ScrollAnchor { + top_anchor: new_anchor, + offset: Default::default(), + }, + cx, + ) + } + + fn scroll_cursor_center( + editor: &mut Editor, + _: &ScrollCursorCenter, + cx: &mut ViewContext, + ) { + let snapshot = editor.snapshot(cx).display_snapshot; + let visible_rows = if let Some(visible_rows) = editor.visible_line_count() { + visible_rows as u32 + } else { + return; + }; + + let mut new_screen_top = editor.selections.newest_display(cx).head(); + *new_screen_top.row_mut() = new_screen_top.row().saturating_sub(visible_rows / 2); + *new_screen_top.column_mut() = 0; + let new_screen_top = new_screen_top.to_offset(&snapshot, Bias::Left); + let new_anchor = snapshot.buffer_snapshot.anchor_before(new_screen_top); + + editor.set_scroll_anchor( + ScrollAnchor { + top_anchor: new_anchor, + offset: Default::default(), + }, + cx, + ) + } + + fn scroll_cursor_bottom( + editor: &mut Editor, + _: &ScrollCursorBottom, + cx: &mut ViewContext, + ) { + let snapshot = editor.snapshot(cx).display_snapshot; + let scroll_margin_rows = editor.vertical_scroll_margin() as u32; + let visible_rows = if let Some(visible_rows) = editor.visible_line_count() { + visible_rows as u32 + } else { + return; + }; + + let mut new_screen_top = editor.selections.newest_display(cx).head(); + *new_screen_top.row_mut() = new_screen_top + .row() + .saturating_sub(visible_rows.saturating_sub(scroll_margin_rows)); + *new_screen_top.column_mut() = 0; + let new_screen_top = new_screen_top.to_offset(&snapshot, Bias::Left); + let new_anchor = snapshot.buffer_snapshot.anchor_before(new_screen_top); + + editor.set_scroll_anchor( + ScrollAnchor { + top_anchor: new_anchor, + offset: Default::default(), + }, + cx, + ) + } +} diff --git a/crates/editor/src/scroll/autoscroll.rs b/crates/editor/src/scroll/autoscroll.rs new file mode 100644 index 0000000000..63ee7c56ca --- /dev/null +++ b/crates/editor/src/scroll/autoscroll.rs @@ -0,0 +1,246 @@ +use std::cmp; + +use gpui::{text_layout, ViewContext}; +use language::Point; + +use crate::{display_map::ToDisplayPoint, Editor, EditorMode}; + +#[derive(PartialEq, Eq)] +pub enum Autoscroll { + Next, + Strategy(AutoscrollStrategy), +} + +impl Autoscroll { + pub fn fit() -> Self { + Self::Strategy(AutoscrollStrategy::Fit) + } + + pub fn newest() -> Self { + Self::Strategy(AutoscrollStrategy::Newest) + } + + pub fn center() -> Self { + Self::Strategy(AutoscrollStrategy::Center) + } +} + +#[derive(PartialEq, Eq, Default)] +pub enum AutoscrollStrategy { + Fit, + Newest, + #[default] + Center, + Top, + Bottom, +} + +impl AutoscrollStrategy { + fn next(&self) -> Self { + match self { + AutoscrollStrategy::Center => AutoscrollStrategy::Top, + AutoscrollStrategy::Top => AutoscrollStrategy::Bottom, + _ => AutoscrollStrategy::Center, + } + } +} + +impl Editor { + pub fn autoscroll_vertically( + &mut self, + viewport_height: f32, + line_height: f32, + cx: &mut ViewContext, + ) -> bool { + let visible_lines = viewport_height / line_height; + let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); + let mut scroll_position = self.scroll_manager.scroll_position(&display_map); + let max_scroll_top = if matches!(self.mode, EditorMode::AutoHeight { .. }) { + (display_map.max_point().row() as f32 - visible_lines + 1.).max(0.) + } else { + display_map.max_point().row() as f32 + }; + if scroll_position.y() > max_scroll_top { + scroll_position.set_y(max_scroll_top); + self.set_scroll_position(scroll_position, cx); + } + + let (autoscroll, local) = + if let Some(autoscroll) = self.scroll_manager.autoscroll_request.take() { + autoscroll + } else { + return false; + }; + + let first_cursor_top; + let last_cursor_bottom; + if let Some(highlighted_rows) = &self.highlighted_rows { + first_cursor_top = highlighted_rows.start as f32; + last_cursor_bottom = first_cursor_top + 1.; + } else if autoscroll == Autoscroll::newest() { + let newest_selection = self.selections.newest::(cx); + first_cursor_top = newest_selection.head().to_display_point(&display_map).row() as f32; + last_cursor_bottom = first_cursor_top + 1.; + } else { + let selections = self.selections.all::(cx); + first_cursor_top = selections + .first() + .unwrap() + .head() + .to_display_point(&display_map) + .row() as f32; + last_cursor_bottom = selections + .last() + .unwrap() + .head() + .to_display_point(&display_map) + .row() as f32 + + 1.0; + } + + let margin = if matches!(self.mode, EditorMode::AutoHeight { .. }) { + 0. + } else { + ((visible_lines - (last_cursor_bottom - first_cursor_top)) / 2.0).floor() + }; + if margin < 0.0 { + return false; + } + + let strategy = match autoscroll { + Autoscroll::Strategy(strategy) => strategy, + Autoscroll::Next => { + let last_autoscroll = &self.scroll_manager.last_autoscroll; + if let Some(last_autoscroll) = last_autoscroll { + if self.scroll_manager.anchor.offset == last_autoscroll.0 + && first_cursor_top == last_autoscroll.1 + && last_cursor_bottom == last_autoscroll.2 + { + last_autoscroll.3.next() + } else { + AutoscrollStrategy::default() + } + } else { + AutoscrollStrategy::default() + } + } + }; + + match strategy { + AutoscrollStrategy::Fit | AutoscrollStrategy::Newest => { + let margin = margin.min(self.scroll_manager.vertical_scroll_margin); + let target_top = (first_cursor_top - margin).max(0.0); + let target_bottom = last_cursor_bottom + margin; + let start_row = scroll_position.y(); + let end_row = start_row + visible_lines; + + if target_top < start_row { + scroll_position.set_y(target_top); + self.set_scroll_position_internal(scroll_position, local, cx); + } else if target_bottom >= end_row { + scroll_position.set_y(target_bottom - visible_lines); + self.set_scroll_position_internal(scroll_position, local, cx); + } + } + AutoscrollStrategy::Center => { + scroll_position.set_y((first_cursor_top - margin).max(0.0)); + self.set_scroll_position_internal(scroll_position, local, cx); + } + AutoscrollStrategy::Top => { + scroll_position.set_y((first_cursor_top).max(0.0)); + self.set_scroll_position_internal(scroll_position, local, cx); + } + AutoscrollStrategy::Bottom => { + scroll_position.set_y((last_cursor_bottom - visible_lines).max(0.0)); + self.set_scroll_position_internal(scroll_position, local, cx); + } + } + + self.scroll_manager.last_autoscroll = Some(( + self.scroll_manager.anchor.offset, + first_cursor_top, + last_cursor_bottom, + strategy, + )); + + true + } + + pub fn autoscroll_horizontally( + &mut self, + start_row: u32, + viewport_width: f32, + scroll_width: f32, + max_glyph_width: f32, + layouts: &[text_layout::Line], + cx: &mut ViewContext, + ) -> bool { + let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); + let selections = self.selections.all::(cx); + + let mut target_left; + let mut target_right; + + if self.highlighted_rows.is_some() { + target_left = 0.0_f32; + target_right = 0.0_f32; + } else { + target_left = std::f32::INFINITY; + target_right = 0.0_f32; + for selection in selections { + let head = selection.head().to_display_point(&display_map); + if head.row() >= start_row && head.row() < start_row + layouts.len() as u32 { + let start_column = head.column().saturating_sub(3); + let end_column = cmp::min(display_map.line_len(head.row()), head.column() + 3); + target_left = target_left.min( + layouts[(head.row() - start_row) as usize] + .x_for_index(start_column as usize), + ); + target_right = target_right.max( + layouts[(head.row() - start_row) as usize].x_for_index(end_column as usize) + + max_glyph_width, + ); + } + } + } + + target_right = target_right.min(scroll_width); + + if target_right - target_left > viewport_width { + return false; + } + + let scroll_left = self.scroll_manager.anchor.offset.x() * max_glyph_width; + let scroll_right = scroll_left + viewport_width; + + if target_left < scroll_left { + self.scroll_manager + .anchor + .offset + .set_x(target_left / max_glyph_width); + true + } else if target_right > scroll_right { + self.scroll_manager + .anchor + .offset + .set_x((target_right - viewport_width) / max_glyph_width); + true + } else { + false + } + } + + pub fn request_autoscroll(&mut self, autoscroll: Autoscroll, cx: &mut ViewContext) { + self.scroll_manager.autoscroll_request = Some((autoscroll, true)); + cx.notify(); + } + + pub(crate) fn request_autoscroll_remotely( + &mut self, + autoscroll: Autoscroll, + cx: &mut ViewContext, + ) { + self.scroll_manager.autoscroll_request = Some((autoscroll, false)); + cx.notify(); + } +} diff --git a/crates/editor/src/scroll/scroll_amount.rs b/crates/editor/src/scroll/scroll_amount.rs new file mode 100644 index 0000000000..6f6c21f0d4 --- /dev/null +++ b/crates/editor/src/scroll/scroll_amount.rs @@ -0,0 +1,48 @@ +use gpui::ViewContext; +use serde::Deserialize; +use util::iife; + +use crate::Editor; + +#[derive(Clone, PartialEq, Deserialize)] +pub enum ScrollAmount { + LineUp, + LineDown, + HalfPageUp, + HalfPageDown, + PageUp, + PageDown, +} + +impl ScrollAmount { + pub fn move_context_menu_selection( + &self, + editor: &mut Editor, + cx: &mut ViewContext, + ) -> bool { + iife!({ + let context_menu = editor.context_menu.as_mut()?; + + match self { + Self::LineDown | Self::HalfPageDown => context_menu.select_next(cx), + Self::LineUp | Self::HalfPageUp => context_menu.select_prev(cx), + Self::PageDown => context_menu.select_last(cx), + Self::PageUp => context_menu.select_first(cx), + } + .then_some(()) + }) + .is_some() + } + + pub fn lines(&self, editor: &mut Editor) -> f32 { + match self { + Self::LineDown => 1., + Self::LineUp => -1., + Self::HalfPageDown => editor.visible_line_count().map(|l| l / 2.).unwrap_or(1.), + Self::HalfPageUp => -editor.visible_line_count().map(|l| l / 2.).unwrap_or(1.), + // Minus 1. here so that there is a pivot line that stays on the screen + Self::PageDown => editor.visible_line_count().unwrap_or(1.) - 1., + Self::PageUp => -editor.visible_line_count().unwrap_or(1.) - 1., + } + } +} diff --git a/crates/editor/src/selections_collection.rs b/crates/editor/src/selections_collection.rs index facc1b0491..f1c19bca8a 100644 --- a/crates/editor/src/selections_collection.rs +++ b/crates/editor/src/selections_collection.rs @@ -61,7 +61,7 @@ impl SelectionsCollection { self.buffer.read(cx).read(cx) } - pub fn set_state(&mut self, other: &SelectionsCollection) { + pub fn clone_state(&mut self, other: &SelectionsCollection) { self.next_selection_id = other.next_selection_id; self.line_mode = other.line_mode; self.disjoint = other.disjoint.clone(); diff --git a/crates/go_to_line/src/go_to_line.rs b/crates/go_to_line/src/go_to_line.rs index 4db3d1310b..32c7d3c810 100644 --- a/crates/go_to_line/src/go_to_line.rs +++ b/crates/go_to_line/src/go_to_line.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use editor::{display_map::ToDisplayPoint, Autoscroll, DisplayPoint, Editor}; +use editor::{display_map::ToDisplayPoint, scroll::autoscroll::Autoscroll, DisplayPoint, Editor}; use gpui::{ actions, elements::*, geometry::vector::Vector2F, AnyViewHandle, Axis, Entity, MutableAppContext, RenderContext, View, ViewContext, ViewHandle, diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index c62305f572..bf78399914 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -594,6 +594,9 @@ type ReleaseObservationCallback = Box; type WindowActivationCallback = Box bool>; type WindowFullscreenCallback = Box bool>; +type KeystrokeCallback = Box< + dyn FnMut(&Keystroke, &MatchResult, Option<&Box>, &mut MutableAppContext) -> bool, +>; type DeserializeActionCallback = fn(json: &str) -> anyhow::Result>; type WindowShouldCloseSubscriptionCallback = Box bool>; @@ -619,6 +622,7 @@ pub struct MutableAppContext { observations: CallbackCollection, window_activation_observations: CallbackCollection, window_fullscreen_observations: CallbackCollection, + keystroke_observations: CallbackCollection, release_observations: Arc>>>, action_dispatch_observations: Arc>>, @@ -678,6 +682,7 @@ impl MutableAppContext { global_observations: Default::default(), window_activation_observations: Default::default(), window_fullscreen_observations: Default::default(), + keystroke_observations: Default::default(), action_dispatch_observations: Default::default(), presenters_and_platform_windows: Default::default(), foreground, @@ -763,11 +768,11 @@ impl MutableAppContext { .with_context(|| format!("invalid data for action {}", name)) } - pub fn add_action(&mut self, handler: F) + pub fn add_action(&mut self, handler: F) where A: Action, V: View, - F: 'static + FnMut(&mut V, &A, &mut ViewContext), + F: 'static + FnMut(&mut V, &A, &mut ViewContext) -> R, { self.add_action_internal(handler, false) } @@ -781,11 +786,11 @@ impl MutableAppContext { self.add_action_internal(handler, true) } - fn add_action_internal(&mut self, mut handler: F, capture: bool) + fn add_action_internal(&mut self, mut handler: F, capture: bool) where A: Action, V: View, - F: 'static + FnMut(&mut V, &A, &mut ViewContext), + F: 'static + FnMut(&mut V, &A, &mut ViewContext) -> R, { let handler = Box::new( move |view: &mut dyn AnyView, @@ -1255,6 +1260,27 @@ impl MutableAppContext { } } + pub fn observe_keystrokes(&mut self, window_id: usize, callback: F) -> Subscription + where + F: 'static + + FnMut( + &Keystroke, + &MatchResult, + Option<&Box>, + &mut MutableAppContext, + ) -> bool, + { + let subscription_id = post_inc(&mut self.next_subscription_id); + self.keystroke_observations + .add_callback(window_id, subscription_id, Box::new(callback)); + + Subscription::KeystrokeObservation { + id: subscription_id, + window_id, + observations: Some(self.keystroke_observations.downgrade()), + } + } + pub fn defer(&mut self, callback: impl 'static + FnOnce(&mut MutableAppContext)) { self.pending_effects.push_back(Effect::Deferred { callback: Box::new(callback), @@ -1538,27 +1564,39 @@ impl MutableAppContext { }) .collect(); - match self + let match_result = self .keystroke_matcher - .push_keystroke(keystroke.clone(), dispatch_path) - { + .push_keystroke(keystroke.clone(), dispatch_path); + let mut handled_by = None; + + let keystroke_handled = match &match_result { MatchResult::None => false, MatchResult::Pending => true, MatchResult::Matches(matches) => { for (view_id, action) in matches { if self.handle_dispatch_action_from_effect( window_id, - Some(view_id), + Some(*view_id), action.as_ref(), ) { self.keystroke_matcher.clear_pending(); - return true; + handled_by = Some(action.boxed_clone()); + break; } } - false + handled_by.is_some() } - } + }; + + self.keystroke( + window_id, + keystroke.clone(), + handled_by, + match_result.clone(), + ); + keystroke_handled } else { + self.keystroke(window_id, keystroke.clone(), None, MatchResult::None); false } } @@ -2110,6 +2148,12 @@ impl MutableAppContext { } => { self.handle_window_should_close_subscription_effect(window_id, callback) } + Effect::Keystroke { + window_id, + keystroke, + handled_by, + result, + } => self.handle_keystroke_effect(window_id, keystroke, handled_by, result), } self.pending_notifications.clear(); self.remove_dropped_entities(); @@ -2188,6 +2232,21 @@ impl MutableAppContext { }); } + fn keystroke( + &mut self, + window_id: usize, + keystroke: Keystroke, + handled_by: Option>, + result: MatchResult, + ) { + self.pending_effects.push_back(Effect::Keystroke { + window_id, + keystroke, + handled_by, + result, + }); + } + pub fn refresh_windows(&mut self) { self.pending_effects.push_back(Effect::RefreshWindows); } @@ -2299,6 +2358,21 @@ impl MutableAppContext { }); } + fn handle_keystroke_effect( + &mut self, + window_id: usize, + keystroke: Keystroke, + handled_by: Option>, + result: MatchResult, + ) { + self.update(|this| { + let mut observations = this.keystroke_observations.clone(); + observations.emit_and_cleanup(window_id, this, { + move |callback, this| callback(&keystroke, &result, handled_by.as_ref(), this) + }); + }); + } + fn handle_window_activation_effect(&mut self, window_id: usize, active: bool) { //Short circuit evaluation if we're already g2g if self @@ -2852,6 +2926,12 @@ pub enum Effect { subscription_id: usize, callback: WindowFullscreenCallback, }, + Keystroke { + window_id: usize, + keystroke: Keystroke, + handled_by: Option>, + result: MatchResult, + }, RefreshWindows, DispatchActionFrom { window_id: usize, @@ -2995,6 +3075,21 @@ impl Debug for Effect { .debug_struct("Effect::WindowShouldCloseSubscription") .field("window_id", window_id) .finish(), + Effect::Keystroke { + window_id, + keystroke, + handled_by, + result, + } => f + .debug_struct("Effect::Keystroke") + .field("window_id", window_id) + .field("keystroke", keystroke) + .field( + "keystroke", + &handled_by.as_ref().map(|handled_by| handled_by.name()), + ) + .field("result", result) + .finish(), } } } @@ -3826,6 +3921,33 @@ impl<'a, T: View> ViewContext<'a, T> { }) } + pub fn observe_keystroke(&mut self, mut callback: F) -> Subscription + where + F: 'static + + FnMut( + &mut T, + &Keystroke, + Option<&Box>, + &MatchResult, + &mut ViewContext, + ) -> bool, + { + let observer = self.weak_handle(); + self.app.observe_keystrokes( + self.window_id(), + move |keystroke, result, handled_by, cx| { + if let Some(observer) = observer.upgrade(cx) { + observer.update(cx, |observer, cx| { + callback(observer, keystroke, handled_by, result, cx); + }); + true + } else { + false + } + }, + ) + } + pub fn emit(&mut self, payload: T::Event) { self.app.pending_effects.push_back(Effect::Event { entity_id: self.view_id, @@ -5018,6 +5140,11 @@ pub enum Subscription { window_id: usize, observations: Option>>, }, + KeystrokeObservation { + id: usize, + window_id: usize, + observations: Option>>, + }, ReleaseObservation { id: usize, @@ -5056,6 +5183,9 @@ impl Subscription { Subscription::ActionObservation { observations, .. } => { observations.take(); } + Subscription::KeystrokeObservation { observations, .. } => { + observations.take(); + } Subscription::WindowActivationObservation { observations, .. } => { observations.take(); } @@ -5175,6 +5305,27 @@ impl Drop for Subscription { observations.lock().remove(id); } } + Subscription::KeystrokeObservation { + id, + window_id, + observations, + } => { + if let Some(observations) = observations.as_ref().and_then(Weak::upgrade) { + match observations + .lock() + .entry(*window_id) + .or_default() + .entry(*id) + { + btree_map::Entry::Vacant(entry) => { + entry.insert(None); + } + btree_map::Entry::Occupied(entry) => { + entry.remove(); + } + } + } + } Subscription::WindowActivationObservation { id, window_id, diff --git a/crates/gpui/src/keymap.rs b/crates/gpui/src/keymap.rs index fc97f69624..e9bc228757 100644 --- a/crates/gpui/src/keymap.rs +++ b/crates/gpui/src/keymap.rs @@ -112,6 +112,21 @@ impl PartialEq for MatchResult { impl Eq for MatchResult {} +impl Clone for MatchResult { + fn clone(&self) -> Self { + match self { + MatchResult::None => MatchResult::None, + MatchResult::Pending => MatchResult::Pending, + MatchResult::Matches(matches) => MatchResult::Matches( + matches + .iter() + .map(|(view_id, action)| (*view_id, Action::boxed_clone(action.as_ref()))) + .collect(), + ), + } + } +} + impl Matcher { pub fn new(keymap: Keymap) -> Self { Self { diff --git a/crates/journal/src/journal.rs b/crates/journal/src/journal.rs index ef1dbdc15c..76a56af93d 100644 --- a/crates/journal/src/journal.rs +++ b/crates/journal/src/journal.rs @@ -1,5 +1,5 @@ use chrono::{Datelike, Local, NaiveTime, Timelike}; -use editor::{Autoscroll, Editor}; +use editor::{scroll::autoscroll::Autoscroll, Editor}; use gpui::{actions, MutableAppContext}; use settings::{HourFormat, Settings}; use std::{ diff --git a/crates/outline/src/outline.rs b/crates/outline/src/outline.rs index abb5e8d3df..f6698e23be 100644 --- a/crates/outline/src/outline.rs +++ b/crates/outline/src/outline.rs @@ -1,6 +1,6 @@ use editor::{ - combine_syntax_and_fuzzy_match_highlights, display_map::ToDisplayPoint, Anchor, AnchorRangeExt, - Autoscroll, DisplayPoint, Editor, ToPoint, + combine_syntax_and_fuzzy_match_highlights, display_map::ToDisplayPoint, + scroll::autoscroll::Autoscroll, Anchor, AnchorRangeExt, DisplayPoint, Editor, ToPoint, }; use fuzzy::StringMatch; use gpui::{ diff --git a/crates/project_symbols/src/project_symbols.rs b/crates/project_symbols/src/project_symbols.rs index 273230fe26..957292f035 100644 --- a/crates/project_symbols/src/project_symbols.rs +++ b/crates/project_symbols/src/project_symbols.rs @@ -1,5 +1,6 @@ use editor::{ - combine_syntax_and_fuzzy_match_highlights, styled_runs_for_code_label, Autoscroll, Bias, Editor, + combine_syntax_and_fuzzy_match_highlights, scroll::autoscroll::Autoscroll, + styled_runs_for_code_label, Bias, Editor, }; use fuzzy::{StringMatch, StringMatchCandidate}; use gpui::{ diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 6fa7d07d6f..13b754a417 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -4,8 +4,8 @@ use crate::{ }; use collections::HashMap; use editor::{ - items::active_match_index, Anchor, Autoscroll, Editor, MultiBuffer, SelectAll, - MAX_TAB_TITLE_LEN, + items::active_match_index, scroll::autoscroll::Autoscroll, Anchor, Editor, MultiBuffer, + SelectAll, MAX_TAB_TITLE_LEN, }; use gpui::{ actions, elements::*, platform::CursorStyle, Action, AnyViewHandle, AppContext, ElementBox, diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 0e83bb5f19..d9015ca6c0 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -216,6 +216,8 @@ pub fn unzip_option(option: Option<(T, U)>) -> (Option, Option) { } } +/// Immediately invoked function expression. Good for using the ? operator +/// in functions which do not return an Option or Result #[macro_export] macro_rules! iife { ($block:block) => { @@ -223,6 +225,8 @@ macro_rules! iife { }; } +/// Async lImmediately invoked function expression. Good for using the ? operator +/// in functions which do not return an Option or Result. Async version of above #[macro_export] macro_rules! async_iife { ($block:block) => { diff --git a/crates/vim/src/editor_events.rs b/crates/vim/src/editor_events.rs index 68f36e8fc6..7b777a50ed 100644 --- a/crates/vim/src/editor_events.rs +++ b/crates/vim/src/editor_events.rs @@ -22,20 +22,9 @@ fn editor_focused(EditorFocused(editor): &EditorFocused, cx: &mut MutableAppCont vim.active_editor = Some(editor.downgrade()); vim.selection_subscription = Some(cx.subscribe(editor, |editor, event, cx| { if editor.read(cx).leader_replica_id().is_none() { - match event { - editor::Event::SelectionsChanged { local: true } => { - let newest_empty = - editor.read(cx).selections.newest::(cx).is_empty(); - editor_local_selections_changed(newest_empty, cx); - } - editor::Event::IgnoredInput => { - Vim::update(cx, |vim, cx| { - if vim.active_operator().is_some() { - vim.clear_operator(cx); - } - }); - } - _ => (), + if let editor::Event::SelectionsChanged { local: true } = event { + let newest_empty = editor.read(cx).selections.newest::(cx).is_empty(); + editor_local_selections_changed(newest_empty, cx); } } })); diff --git a/crates/vim/src/insert.rs b/crates/vim/src/insert.rs index 8bfb8952d5..d8aea4aa33 100644 --- a/crates/vim/src/insert.rs +++ b/crates/vim/src/insert.rs @@ -1,5 +1,5 @@ use crate::{state::Mode, Vim}; -use editor::{Autoscroll, Bias}; +use editor::{scroll::autoscroll::Autoscroll, Bias}; use gpui::{actions, MutableAppContext, ViewContext}; use language::SelectionGoal; use workspace::Workspace; diff --git a/crates/vim/src/normal.rs b/crates/vim/src/normal.rs index e4a2749d75..bc65fbd09e 100644 --- a/crates/vim/src/normal.rs +++ b/crates/vim/src/normal.rs @@ -2,7 +2,7 @@ mod change; mod delete; mod yank; -use std::borrow::Cow; +use std::{borrow::Cow, cmp::Ordering}; use crate::{ motion::Motion, @@ -12,10 +12,13 @@ use crate::{ }; use collections::{HashMap, HashSet}; use editor::{ - display_map::ToDisplayPoint, Anchor, Autoscroll, Bias, ClipboardSelection, DisplayPoint, + display_map::ToDisplayPoint, + scroll::{autoscroll::Autoscroll, scroll_amount::ScrollAmount}, + Anchor, Bias, ClipboardSelection, DisplayPoint, Editor, }; -use gpui::{actions, MutableAppContext, ViewContext}; +use gpui::{actions, impl_actions, MutableAppContext, ViewContext}; use language::{AutoindentMode, Point, SelectionGoal}; +use serde::Deserialize; use workspace::Workspace; use self::{ @@ -24,6 +27,9 @@ use self::{ yank::{yank_motion, yank_object}, }; +#[derive(Clone, PartialEq, Deserialize)] +struct Scroll(ScrollAmount); + actions!( vim, [ @@ -41,6 +47,8 @@ actions!( ] ); +impl_actions!(vim, [Scroll]); + pub fn init(cx: &mut MutableAppContext) { cx.add_action(insert_after); cx.add_action(insert_first_non_whitespace); @@ -72,6 +80,13 @@ pub fn init(cx: &mut MutableAppContext) { }) }); cx.add_action(paste); + cx.add_action(|_: &mut Workspace, Scroll(amount): &Scroll, cx| { + Vim::update(cx, |vim, cx| { + vim.update_active_editor(cx, |editor, cx| { + scroll(editor, amount, cx); + }) + }) + }); } pub fn normal_motion( @@ -367,6 +382,46 @@ fn paste(_: &mut Workspace, _: &Paste, cx: &mut ViewContext) { }); } +fn scroll(editor: &mut Editor, amount: &ScrollAmount, cx: &mut ViewContext) { + let should_move_cursor = editor.newest_selection_on_screen(cx).is_eq(); + editor.scroll_screen(amount, cx); + if should_move_cursor { + let selection_ordering = editor.newest_selection_on_screen(cx); + if selection_ordering.is_eq() { + return; + } + + let visible_rows = if let Some(visible_rows) = editor.visible_line_count() { + visible_rows as u32 + } else { + return; + }; + + let scroll_margin_rows = editor.vertical_scroll_margin() as u32; + let top_anchor = editor.scroll_manager.anchor().top_anchor; + + editor.change_selections(None, cx, |s| { + s.replace_cursors_with(|snapshot| { + let mut new_point = top_anchor.to_display_point(&snapshot); + + match selection_ordering { + Ordering::Less => { + *new_point.row_mut() += scroll_margin_rows; + new_point = snapshot.clip_point(new_point, Bias::Right); + } + Ordering::Greater => { + *new_point.row_mut() += visible_rows - scroll_margin_rows as u32; + new_point = snapshot.clip_point(new_point, Bias::Left); + } + Ordering::Equal => unreachable!(), + } + + vec![new_point] + }) + }); + } +} + #[cfg(test)] mod test { use indoc::indoc; diff --git a/crates/vim/src/normal/change.rs b/crates/vim/src/normal/change.rs index 59c0a654a4..a32888f59e 100644 --- a/crates/vim/src/normal/change.rs +++ b/crates/vim/src/normal/change.rs @@ -1,6 +1,7 @@ use crate::{motion::Motion, object::Object, state::Mode, utils::copy_selections_content, Vim}; use editor::{ - char_kind, display_map::DisplaySnapshot, movement, Autoscroll, CharKind, DisplayPoint, + char_kind, display_map::DisplaySnapshot, movement, scroll::autoscroll::Autoscroll, CharKind, + DisplayPoint, }; use gpui::MutableAppContext; use language::Selection; diff --git a/crates/vim/src/normal/delete.rs b/crates/vim/src/normal/delete.rs index 6b6349578f..b22579438f 100644 --- a/crates/vim/src/normal/delete.rs +++ b/crates/vim/src/normal/delete.rs @@ -1,6 +1,6 @@ use crate::{motion::Motion, object::Object, utils::copy_selections_content, Vim}; use collections::{HashMap, HashSet}; -use editor::{display_map::ToDisplayPoint, Autoscroll, Bias}; +use editor::{display_map::ToDisplayPoint, scroll::autoscroll::Autoscroll, Bias}; use gpui::MutableAppContext; pub fn delete_motion(vim: &mut Vim, motion: Motion, times: usize, cx: &mut MutableAppContext) { diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index b5acb50e7c..6bbab1ae42 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -18,6 +18,7 @@ impl Default for Mode { #[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize)] pub enum Namespace { G, + Z, } #[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize)] @@ -95,6 +96,7 @@ impl Operator { let operator_context = match operator { Some(Operator::Number(_)) => "n", Some(Operator::Namespace(Namespace::G)) => "g", + Some(Operator::Namespace(Namespace::Z)) => "z", Some(Operator::Object { around: false }) => "i", Some(Operator::Object { around: true }) => "a", Some(Operator::Change) => "c", diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index ce3a7e2366..4121d6f4bb 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -81,6 +81,28 @@ pub fn init(cx: &mut MutableAppContext) { .detach(); } +// Any keystrokes not mapped to vim should clar the active operator +pub fn observe_keypresses(window_id: usize, cx: &mut MutableAppContext) { + cx.observe_keystrokes(window_id, |_keystroke, _result, handled_by, cx| { + dbg!(_keystroke); + dbg!(_result); + if let Some(handled_by) = handled_by { + dbg!(handled_by.name()); + if handled_by.namespace() == "vim" { + return true; + } + } + + Vim::update(cx, |vim, cx| { + if vim.active_operator().is_some() { + vim.clear_operator(cx); + } + }); + true + }) + .detach() +} + #[derive(Default)] pub struct Vim { editors: HashMap>, diff --git a/crates/vim/src/visual.rs b/crates/vim/src/visual.rs index 95f6c3d8b4..ef5bb6ddd8 100644 --- a/crates/vim/src/visual.rs +++ b/crates/vim/src/visual.rs @@ -1,7 +1,9 @@ use std::borrow::Cow; use collections::HashMap; -use editor::{display_map::ToDisplayPoint, Autoscroll, Bias, ClipboardSelection}; +use editor::{ + display_map::ToDisplayPoint, scroll::autoscroll::Autoscroll, Bias, ClipboardSelection, +}; use gpui::{actions, MutableAppContext, ViewContext}; use language::{AutoindentMode, SelectionGoal}; use workspace::Workspace; diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 9b1342ecd9..0879166bbe 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -175,21 +175,16 @@ impl Dock { new_position: DockPosition, cx: &mut ViewContext, ) { - dbg!("starting", &new_position); workspace.dock.position = new_position; // Tell the pane about the new anchor position workspace.dock.pane.update(cx, |pane, cx| { - dbg!("setting docked"); pane.set_docked(Some(new_position.anchor()), cx) }); if workspace.dock.position.is_visible() { - dbg!("dock is visible"); // Close the right sidebar if the dock is on the right side and the right sidebar is open if workspace.dock.position.anchor() == DockAnchor::Right { - dbg!("dock anchor is right"); if workspace.right_sidebar().read(cx).is_open() { - dbg!("Toggling right sidebar"); workspace.toggle_sidebar(SidebarSide::Right, cx); } } @@ -199,10 +194,8 @@ impl Dock { if pane.read(cx).items().next().is_none() { let item_to_add = (workspace.dock.default_item_factory)(workspace, cx); // Adding the item focuses the pane by default - dbg!("Adding item to dock"); Pane::add_item(workspace, &pane, item_to_add, true, true, None, cx); } else { - dbg!("just focusing dock"); cx.focus(pane); } } else if let Some(last_active_center_pane) = workspace @@ -214,7 +207,6 @@ impl Dock { } cx.emit(crate::Event::DockAnchorChanged); workspace.serialize_workspace(cx); - dbg!("Serializing workspace after dock position changed"); cx.notify(); } diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index d86e449ff2..9a827da8b7 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -324,6 +324,9 @@ pub fn initialize_workspace( auto_update::notify_of_any_new_update(cx.weak_handle(), cx); + let window_id = cx.window_id(); + vim::observe_keypresses(window_id, cx); + cx.on_window_should_close(|workspace, cx| { if let Some(task) = workspace.close(&Default::default(), cx) { task.detach_and_log_err(cx); @@ -613,7 +616,7 @@ fn schema_file_match(path: &Path) -> &Path { mod tests { use super::*; use assets::Assets; - use editor::{Autoscroll, DisplayPoint, Editor}; + use editor::{scroll::autoscroll::Autoscroll, DisplayPoint, Editor}; use gpui::{ executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle, }; From f6f41510d25f78984332fb812745220036d4ee36 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 7 Dec 2022 17:25:48 -0800 Subject: [PATCH 217/240] fix failing tests from incorrect follow behavior --- crates/editor/src/items.rs | 3 ++- crates/editor/src/scroll.rs | 11 ++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 4779fe73b8..73008ca720 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -88,7 +88,7 @@ impl FollowableItem for Editor { } if let Some(anchor) = state.scroll_top_anchor { - editor.set_scroll_anchor( + editor.set_scroll_anchor_internal( ScrollAnchor { top_anchor: Anchor { buffer_id: Some(state.buffer_id as usize), @@ -98,6 +98,7 @@ impl FollowableItem for Editor { }, offset: vec2f(state.scroll_x, state.scroll_y), }, + false, cx, ); } diff --git a/crates/editor/src/scroll.rs b/crates/editor/src/scroll.rs index 78bc3685c1..5cb58e21e9 100644 --- a/crates/editor/src/scroll.rs +++ b/crates/editor/src/scroll.rs @@ -284,8 +284,17 @@ impl Editor { } pub fn set_scroll_anchor(&mut self, scroll_anchor: ScrollAnchor, cx: &mut ViewContext) { + self.set_scroll_anchor_internal(scroll_anchor, true, cx); + } + + pub(crate) fn set_scroll_anchor_internal( + &mut self, + scroll_anchor: ScrollAnchor, + local: bool, + cx: &mut ViewContext, + ) { hide_hover(self, cx); - self.scroll_manager.set_anchor(scroll_anchor, true, cx); + self.scroll_manager.set_anchor(scroll_anchor, local, cx); } pub fn scroll_screen(&mut self, amount: &ScrollAmount, cx: &mut ViewContext) { From 36bc90b2b89e239da3f642ec145373891a682901 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 7 Dec 2022 17:46:00 -0800 Subject: [PATCH 218/240] Add deadzones to drag and drop --- crates/drag_and_drop/src/drag_and_drop.rs | 44 +++++++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/crates/drag_and_drop/src/drag_and_drop.rs b/crates/drag_and_drop/src/drag_and_drop.rs index 6884de7e20..a34fa83a4c 100644 --- a/crates/drag_and_drop/src/drag_and_drop.rs +++ b/crates/drag_and_drop/src/drag_and_drop.rs @@ -9,11 +9,17 @@ use gpui::{ View, WeakViewHandle, }; +const DEAD_ZONE: f32 = 4.; + enum State { Down { region_offset: Vector2F, region: RectF, }, + DeadZone { + region_offset: Vector2F, + region: RectF, + }, Dragging { window_id: usize, position: Vector2F, @@ -35,6 +41,13 @@ impl Clone for State { region_offset, region, }, + &State::DeadZone { + region_offset, + region, + } => State::DeadZone { + region_offset, + region, + }, State::Dragging { window_id, position, @@ -101,7 +114,7 @@ impl DragAndDrop { pub fn drag_started(event: MouseDown, cx: &mut EventContext) { cx.update_global(|this: &mut Self, _| { this.currently_dragged = Some(State::Down { - region_offset: event.region.origin() - event.position, + region_offset: event.position - event.region.origin(), region: event.region, }); }) @@ -122,7 +135,31 @@ impl DragAndDrop { region_offset, region, }) - | Some(&State::Dragging { + | Some(&State::DeadZone { + region_offset, + region, + }) => { + if (dbg!(event.position) - (dbg!(region.origin() + region_offset))).length() + > DEAD_ZONE + { + this.currently_dragged = Some(State::Dragging { + window_id, + region_offset, + region, + position: event.position, + payload, + render: Rc::new(move |payload, cx| { + render(payload.downcast_ref::().unwrap(), cx) + }), + }); + } else { + this.currently_dragged = Some(State::DeadZone { + region_offset, + region, + }) + } + } + Some(&State::Dragging { region_offset, region, .. @@ -151,6 +188,7 @@ impl DragAndDrop { .and_then(|state| { match state { State::Down { .. } => None, + State::DeadZone { .. } => None, State::Dragging { window_id, region_offset, @@ -163,7 +201,7 @@ impl DragAndDrop { return None; } - let position = position + region_offset; + let position = position - region_offset; Some( Overlay::new( MouseEventHandler::::new(0, cx, |_, cx| { From 10aecc310ef20f197e418e0669adc1209b991b91 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 8 Dec 2022 11:26:46 -0800 Subject: [PATCH 219/240] notify views when hover finishes in tooltip wrapper --- crates/gpui/src/elements/tooltip.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/gpui/src/elements/tooltip.rs b/crates/gpui/src/elements/tooltip.rs index f81b4af701..dbcecf9c24 100644 --- a/crates/gpui/src/elements/tooltip.rs +++ b/crates/gpui/src/elements/tooltip.rs @@ -115,6 +115,7 @@ impl Tooltip { } else { state.visible.set(false); state.debounce.take(); + cx.notify(); } } }) From ab978ff1a3c7f803c88749b9a0942bcc27f42c67 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Thu, 8 Dec 2022 16:35:13 -0500 Subject: [PATCH 220/240] collab 0.3.0 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21966a9673..57f2bdbdc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.2.5" +version = "0.3.0" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c741341d48..a4ccabf099 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.2.5" +version = "0.3.0" [[bin]] name = "collab" From 2cf48c03f9977dd1c292957c7538d7bc12b40944 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 8 Dec 2022 14:39:48 -0800 Subject: [PATCH 221/240] fix final failing tests --- assets/keymaps/vim.json | 38 ++++++++---------- crates/editor/src/editor.rs | 53 +++++++++++++++---------- crates/editor/src/items.rs | 2 +- crates/vim/src/normal/change.rs | 1 - crates/vim/src/test/vim_test_context.rs | 3 +- crates/vim/src/vim.rs | 6 +-- 6 files changed, 55 insertions(+), 48 deletions(-) diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index 5cdd4fc7d4..99c94798db 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -8,6 +8,22 @@ "Namespace": "G" } ], + "i": [ + "vim::PushOperator", + { + "Object": { + "around": false + } + } + ], + "a": [ + "vim::PushOperator", + { + "Object": { + "around": true + } + } + ], "h": "vim::Left", "backspace": "vim::Backspace", "j": "vim::Down", @@ -77,28 +93,6 @@ ] } }, - { - //Operators - "context": "Editor && VimControl && vim_operator == none", - "bindings": { - "i": [ - "vim::PushOperator", - { - "Object": { - "around": false - } - } - ], - "a": [ - "vim::PushOperator", - { - "Object": { - "around": true - } - } - ] - } - }, { "context": "Editor && vim_mode == normal && vim_operator == none", "bindings": { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 1aee1e246d..417b60bc5b 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -3709,18 +3709,30 @@ impl Editor { }) } - pub fn move_page_up(&mut self, action: &MovePageUp, cx: &mut ViewContext) -> Option<()> { - self.take_rename(true, cx)?; - if self.context_menu.as_mut()?.select_first(cx) { - return None; + pub fn move_page_up(&mut self, action: &MovePageUp, cx: &mut ViewContext) { + if self.take_rename(true, cx).is_some() { + return; + } + + if self + .context_menu + .as_mut() + .map(|menu| menu.select_first(cx)) + .unwrap_or(false) + { + return; } if matches!(self.mode, EditorMode::SingleLine) { cx.propagate_action(); - return None; + return; } - let row_count = self.visible_line_count()? as u32 - 1; + let row_count = if let Some(row_count) = self.visible_line_count() { + row_count as u32 - 1 + } else { + return; + }; let autoscroll = if action.center_cursor { Autoscroll::center() @@ -3739,8 +3751,6 @@ impl Editor { selection.collapse_to(cursor, goal); }); }); - - Some(()) } pub fn select_up(&mut self, _: &SelectUp, cx: &mut ViewContext) { @@ -3775,25 +3785,30 @@ impl Editor { }); } - pub fn move_page_down( - &mut self, - action: &MovePageDown, - cx: &mut ViewContext, - ) -> Option<()> { + pub fn move_page_down(&mut self, action: &MovePageDown, cx: &mut ViewContext) { if self.take_rename(true, cx).is_some() { - return None; + return; } - if self.context_menu.as_mut()?.select_last(cx) { - return None; + if self + .context_menu + .as_mut() + .map(|menu| menu.select_last(cx)) + .unwrap_or(false) + { + return; } if matches!(self.mode, EditorMode::SingleLine) { cx.propagate_action(); - return None; + return; } - let row_count = self.visible_line_count()? as u32 - 1; + let row_count = if let Some(row_count) = self.visible_line_count() { + row_count as u32 - 1 + } else { + return; + }; let autoscroll = if action.center_cursor { Autoscroll::center() @@ -3812,8 +3827,6 @@ impl Editor { selection.collapse_to(cursor, goal); }); }); - - Some(()) } pub fn select_down(&mut self, _: &SelectDown, cx: &mut ViewContext) { diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 73008ca720..0efce57d5f 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -300,7 +300,7 @@ impl Item for Editor { false } else { let nav_history = self.nav_history.take(); - self.set_scroll_anchor(data.scroll_anchor, cx); + self.set_scroll_anchor(scroll_anchor, cx); self.change_selections(Some(Autoscroll::fit()), cx, |s| { s.select_ranges([offset..offset]) }); diff --git a/crates/vim/src/normal/change.rs b/crates/vim/src/normal/change.rs index a32888f59e..ca372801c7 100644 --- a/crates/vim/src/normal/change.rs +++ b/crates/vim/src/normal/change.rs @@ -200,7 +200,6 @@ mod test { Test test ˇtest"}) .await; - println!("Marker"); cx.assert(indoc! {" Test test ˇ diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index e0d972896f..723dac0581 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -51,8 +51,9 @@ impl<'a> VimTestContext<'a> { ) }); - // Setup search toolbars + // Setup search toolbars and keypress hook workspace.update(cx, |workspace, cx| { + observe_keypresses(window_id, cx); workspace.active_pane().update(cx, |pane, cx| { pane.toolbar().update(cx, |toolbar, cx| { let buffer_search_bar = cx.add_view(BufferSearchBar::new); diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index 4121d6f4bb..898886714e 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -81,20 +81,20 @@ pub fn init(cx: &mut MutableAppContext) { .detach(); } -// Any keystrokes not mapped to vim should clar the active operator +// Any keystrokes not mapped to vim should clear the active operator pub fn observe_keypresses(window_id: usize, cx: &mut MutableAppContext) { cx.observe_keystrokes(window_id, |_keystroke, _result, handled_by, cx| { dbg!(_keystroke); - dbg!(_result); if let Some(handled_by) = handled_by { - dbg!(handled_by.name()); if handled_by.namespace() == "vim" { + println!("Vim action. Don't clear"); return true; } } Vim::update(cx, |vim, cx| { if vim.active_operator().is_some() { + println!("Clearing operator"); vim.clear_operator(cx); } }); From e61a38b3a969b52f98efc141ba2f4a56b0452028 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 8 Dec 2022 14:45:22 -0800 Subject: [PATCH 222/240] remove printline --- crates/vim/src/vim.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index 898886714e..40cc414778 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -84,17 +84,14 @@ pub fn init(cx: &mut MutableAppContext) { // Any keystrokes not mapped to vim should clear the active operator pub fn observe_keypresses(window_id: usize, cx: &mut MutableAppContext) { cx.observe_keystrokes(window_id, |_keystroke, _result, handled_by, cx| { - dbg!(_keystroke); if let Some(handled_by) = handled_by { if handled_by.namespace() == "vim" { - println!("Vim action. Don't clear"); return true; } } Vim::update(cx, |vim, cx| { if vim.active_operator().is_some() { - println!("Clearing operator"); vim.clear_operator(cx); } }); From 7dde54b052b1c8b544ea6fdaa435641713f80967 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 8 Dec 2022 15:33:02 -0800 Subject: [PATCH 223/240] v0.68.x dev --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57f2bdbdc4..7d7dc42bea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8101,7 +8101,7 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zed" -version = "0.67.0" +version = "0.68.0" dependencies = [ "activity_indicator", "anyhow", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index cc81f3bf23..6dfb739f3a 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] description = "The fast, collaborative code editor." edition = "2021" name = "zed" -version = "0.67.0" +version = "0.68.0" [lib] name = "zed" From 1b8763d0cfeadb60791c2cfe7e5d84619f4c8d50 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 6 Dec 2022 11:28:56 -0800 Subject: [PATCH 224/240] WIP - move terminal to project as pre-prep for collaboration --- Cargo.lock | 28 +++++++++++- crates/project/Cargo.toml | 1 + crates/project/src/project.rs | 12 +++++ crates/terminal/Cargo.toml | 20 ++------- crates/terminal/src/persistence.rs | 14 +++--- crates/terminal/src/terminal.rs | 4 -- crates/terminal_view/Cargo.toml | 44 +++++++++++++++++++ crates/{terminal => terminal_view}/README.md | 0 .../scripts/print256color.sh | 0 .../scripts/truecolor.sh | 0 .../src/terminal_container_view.rs | 0 .../src/terminal_element.rs | 0 .../src/terminal_view.rs | 0 crates/zed/Cargo.toml | 2 +- 14 files changed, 95 insertions(+), 30 deletions(-) create mode 100644 crates/terminal_view/Cargo.toml rename crates/{terminal => terminal_view}/README.md (100%) rename crates/{terminal => terminal_view}/scripts/print256color.sh (100%) rename crates/{terminal => terminal_view}/scripts/truecolor.sh (100%) rename crates/{terminal => terminal_view}/src/terminal_container_view.rs (100%) rename crates/{terminal => terminal_view}/src/terminal_element.rs (100%) rename crates/{terminal => terminal_view}/src/terminal_view.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 7d7dc42bea..1868959a09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4463,6 +4463,7 @@ dependencies = [ "smol", "sum_tree", "tempdir", + "terminal", "text", "thiserror", "toml", @@ -6257,6 +6258,31 @@ dependencies = [ [[package]] name = "terminal" version = "0.1.0" +dependencies = [ + "alacritty_terminal", + "anyhow", + "db", + "dirs 4.0.0", + "futures 0.3.25", + "gpui", + "itertools", + "lazy_static", + "libc", + "mio-extras", + "ordered-float", + "procinfo", + "serde", + "settings", + "shellexpand", + "smallvec", + "smol", + "theme", + "thiserror", +] + +[[package]] +name = "terminal_view" +version = "0.1.0" dependencies = [ "alacritty_terminal", "anyhow", @@ -8166,7 +8192,7 @@ dependencies = [ "smol", "sum_tree", "tempdir", - "terminal", + "terminal_view", "text", "theme", "theme_selector", diff --git a/crates/project/Cargo.toml b/crates/project/Cargo.toml index 76c60f9556..dd4d2be5b6 100644 --- a/crates/project/Cargo.toml +++ b/crates/project/Cargo.toml @@ -32,6 +32,7 @@ lsp = { path = "../lsp" } rpc = { path = "../rpc" } settings = { path = "../settings" } sum_tree = { path = "../sum_tree" } +terminal = { path = "../terminal" } util = { path = "../util" } aho-corasick = "0.7" anyhow = "1.0.57" diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 512ac702d0..e61f0fe0b7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -60,6 +60,7 @@ use std::{ atomic::{AtomicUsize, Ordering::SeqCst}, Arc, }, + thread::panicking, time::Instant, }; use thiserror::Error; @@ -1193,6 +1194,17 @@ impl Project { !self.is_local() } + pub fn create_terminal_connection( + &mut self, + cx: &mut ModelContext, + ) -> Result> { + if self.is_remote() { + return Err(anyhow!( + "creating terminals as a guest is not supported yet" + )); + } + } + pub fn create_buffer( &mut self, text: &str, diff --git a/crates/terminal/Cargo.toml b/crates/terminal/Cargo.toml index 5593ee92d4..2948eaec69 100644 --- a/crates/terminal/Cargo.toml +++ b/crates/terminal/Cargo.toml @@ -7,17 +7,12 @@ edition = "2021" path = "src/terminal.rs" doctest = false + [dependencies] -context_menu = { path = "../context_menu" } -editor = { path = "../editor" } -language = { path = "../language" } gpui = { path = "../gpui" } -project = { path = "../project" } settings = { path = "../settings" } -theme = { path = "../theme" } -util = { path = "../util" } -workspace = { path = "../workspace" } db = { path = "../db" } +theme = { path = "../theme" } alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } smallvec = { version = "1.6", features = ["union"] } @@ -32,13 +27,4 @@ libc = "0.2" anyhow = "1" thiserror = "1.0" lazy_static = "1.4.0" -serde = { version = "1.0", features = ["derive"] } - - - -[dev-dependencies] -gpui = { path = "../gpui", features = ["test-support"] } -client = { path = "../client", features = ["test-support"]} -project = { path = "../project", features = ["test-support"]} -workspace = { path = "../workspace", features = ["test-support"] } -rand = "0.8.5" +serde = { version = "1.0", features = ["derive"] } \ No newline at end of file diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 1669a3a546..333911ee6d 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -2,16 +2,16 @@ use std::path::PathBuf; use db::{define_connection, query, sqlez_macros::sql}; -use workspace::{ItemId, WorkspaceDb, WorkspaceId}; +type ModelId = usize; define_connection! { - pub static ref TERMINAL_CONNECTION: TerminalDb = + pub static ref TERMINAL_CONNECTION: TerminalDb<()> = &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, - item_id INTEGER UNIQUE, + model_id INTEGER UNIQUE, working_directory BLOB, - PRIMARY KEY(workspace_id, item_id), + PRIMARY KEY(workspace_id, model_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; @@ -23,7 +23,7 @@ impl TerminalDb { pub async fn update_workspace_id( new_id: WorkspaceId, old_id: WorkspaceId, - item_id: ItemId + item_id: ModelId ) -> Result<()> { UPDATE terminals SET workspace_id = ? @@ -33,7 +33,7 @@ impl TerminalDb { query! { pub async fn save_working_directory( - item_id: ItemId, + item_id: ModelId, workspace_id: WorkspaceId, working_directory: PathBuf ) -> Result<()> { @@ -43,7 +43,7 @@ impl TerminalDb { } query! { - pub fn get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { + pub fn get_working_directory(item_id: ModelId, workspace_id: WorkspaceId) -> Result> { SELECT working_directory FROM terminals WHERE item_id = ? AND workspace_id = ? diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 0cbb6d36b1..62df8aca82 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -1,8 +1,5 @@ pub mod mappings; mod persistence; -pub mod terminal_container_view; -pub mod terminal_element; -pub mod terminal_view; use alacritty_terminal::{ ansi::{ClearMode, Handler}, @@ -37,7 +34,6 @@ use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; -use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, diff --git a/crates/terminal_view/Cargo.toml b/crates/terminal_view/Cargo.toml new file mode 100644 index 0000000000..181ed606e0 --- /dev/null +++ b/crates/terminal_view/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "terminal_view" +version = "0.1.0" +edition = "2021" + +[lib] +path = "src/terminal_container_view.rs" +doctest = false + +[dependencies] +context_menu = { path = "../context_menu" } +editor = { path = "../editor" } +language = { path = "../language" } +gpui = { path = "../gpui" } +project = { path = "../project" } +settings = { path = "../settings" } +theme = { path = "../theme" } +util = { path = "../util" } +workspace = { path = "../workspace" } +db = { path = "../db" } +alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } +procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } +smallvec = { version = "1.6", features = ["union"] } +smol = "1.2.5" +mio-extras = "2.0.6" +futures = "0.3" +ordered-float = "2.1.1" +itertools = "0.10" +dirs = "4.0.0" +shellexpand = "2.1.0" +libc = "0.2" +anyhow = "1" +thiserror = "1.0" +lazy_static = "1.4.0" +serde = { version = "1.0", features = ["derive"] } + + + +[dev-dependencies] +gpui = { path = "../gpui", features = ["test-support"] } +client = { path = "../client", features = ["test-support"]} +project = { path = "../project", features = ["test-support"]} +workspace = { path = "../workspace", features = ["test-support"] } +rand = "0.8.5" diff --git a/crates/terminal/README.md b/crates/terminal_view/README.md similarity index 100% rename from crates/terminal/README.md rename to crates/terminal_view/README.md diff --git a/crates/terminal/scripts/print256color.sh b/crates/terminal_view/scripts/print256color.sh similarity index 100% rename from crates/terminal/scripts/print256color.sh rename to crates/terminal_view/scripts/print256color.sh diff --git a/crates/terminal/scripts/truecolor.sh b/crates/terminal_view/scripts/truecolor.sh similarity index 100% rename from crates/terminal/scripts/truecolor.sh rename to crates/terminal_view/scripts/truecolor.sh diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs similarity index 100% rename from crates/terminal/src/terminal_container_view.rs rename to crates/terminal_view/src/terminal_container_view.rs diff --git a/crates/terminal/src/terminal_element.rs b/crates/terminal_view/src/terminal_element.rs similarity index 100% rename from crates/terminal/src/terminal_element.rs rename to crates/terminal_view/src/terminal_element.rs diff --git a/crates/terminal/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs similarity index 100% rename from crates/terminal/src/terminal_view.rs rename to crates/terminal_view/src/terminal_view.rs diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 6dfb739f3a..a07c0c899c 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -48,7 +48,7 @@ rpc = { path = "../rpc" } settings = { path = "../settings" } sum_tree = { path = "../sum_tree" } text = { path = "../text" } -terminal = { path = "../terminal" } +terminal_view = { path = "../terminal_view" } theme = { path = "../theme" } theme_selector = { path = "../theme_selector" } theme_testbench = { path = "../theme_testbench" } From 83aefffa38a630651104cce729efef77737cebab Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 8 Dec 2022 10:48:28 -0800 Subject: [PATCH 225/240] Rearrange the terminal code to not have a cyclic dependency with the project --- Cargo.lock | 4 +- crates/editor/src/editor.rs | 2 +- crates/project/src/project.rs | 8 +- crates/terminal/Cargo.toml | 6 +- crates/terminal/src/terminal.rs | 153 +++++++--------- .../src/tests/terminal_test_context.rs | 143 --------------- crates/terminal_view/Cargo.toml | 2 +- .../src/persistence.rs | 5 +- .../src/terminal_container_view.rs | 166 ++++++++++++++---- crates/terminal_view/src/terminal_element.rs | 31 ++-- crates/terminal_view/src/terminal_view.rs | 46 ++++- crates/zed/src/main.rs | 4 +- 12 files changed, 270 insertions(+), 300 deletions(-) delete mode 100644 crates/terminal/src/tests/terminal_test_context.rs rename crates/{terminal => terminal_view}/src/persistence.rs (91%) diff --git a/Cargo.lock b/Cargo.lock index 1868959a09..ddd7a0f7fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6271,6 +6271,7 @@ dependencies = [ "mio-extras", "ordered-float", "procinfo", + "rand 0.8.5", "serde", "settings", "shellexpand", @@ -6278,13 +6279,13 @@ dependencies = [ "smol", "theme", "thiserror", + "util", ] [[package]] name = "terminal_view" version = "0.1.0" dependencies = [ - "alacritty_terminal", "anyhow", "client", "context_menu", @@ -6307,6 +6308,7 @@ dependencies = [ "shellexpand", "smallvec", "smol", + "terminal", "theme", "thiserror", "util", diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 417b60bc5b..ad21622fd9 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -2422,7 +2422,7 @@ impl Editor { let all_edits_within_excerpt = buffer.read_with(&cx, |buffer, _| { let excerpt_range = excerpt_range.to_offset(buffer); buffer - .edited_ranges_for_transaction(transaction) + .edited_ranges_for_transaction::(transaction) .all(|range| { excerpt_range.start <= range.start && excerpt_range.end >= range.end diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index e61f0fe0b7..40f1c93e51 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -60,9 +60,9 @@ use std::{ atomic::{AtomicUsize, Ordering::SeqCst}, Arc, }, - thread::panicking, time::Instant, }; +use terminal::Terminal; use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; @@ -1196,12 +1196,14 @@ impl Project { pub fn create_terminal_connection( &mut self, - cx: &mut ModelContext, - ) -> Result> { + _cx: &mut ModelContext, + ) -> Result> { if self.is_remote() { return Err(anyhow!( "creating terminals as a guest is not supported yet" )); + } else { + unimplemented!() } } diff --git a/crates/terminal/Cargo.toml b/crates/terminal/Cargo.toml index 2948eaec69..0dea7bfbcf 100644 --- a/crates/terminal/Cargo.toml +++ b/crates/terminal/Cargo.toml @@ -13,6 +13,7 @@ gpui = { path = "../gpui" } settings = { path = "../settings" } db = { path = "../db" } theme = { path = "../theme" } +util = { path = "../util" } alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } smallvec = { version = "1.6", features = ["union"] } @@ -27,4 +28,7 @@ libc = "0.2" anyhow = "1" thiserror = "1.0" lazy_static = "1.4.0" -serde = { version = "1.0", features = ["derive"] } \ No newline at end of file +serde = { version = "1.0", features = ["derive"] } + +[dev-dependencies] +rand = "0.8.5" diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 62df8aca82..937678df0b 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -1,5 +1,5 @@ pub mod mappings; -mod persistence; +pub use alacritty_terminal; use alacritty_terminal::{ ansi::{ClearMode, Handler}, @@ -30,7 +30,6 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; -use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; @@ -53,8 +52,7 @@ use gpui::{ geometry::vector::{vec2f, Vector2F}, keymap::Keystroke, scene::{MouseDown, MouseDrag, MouseScrollWheel, MouseUp}, - AppContext, ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, - MutableAppContext, Task, + ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, Task, }; use crate::mappings::{ @@ -63,12 +61,6 @@ use crate::mappings::{ }; use lazy_static::lazy_static; -///Initialize and register all of our action handlers -pub fn init(cx: &mut MutableAppContext) { - terminal_view::init(cx); - terminal_container_view::init(cx); -} - ///Scrolling is unbearably sluggish by default. Alacritty supports a configurable ///Scroll multiplier that is set to 3 by default. This will be removed when I ///Implement scroll bars. @@ -124,10 +116,10 @@ impl EventListener for ZedListener { #[derive(Clone, Copy, Debug)] pub struct TerminalSize { - cell_width: f32, - line_height: f32, - height: f32, - width: f32, + pub cell_width: f32, + pub line_height: f32, + pub height: f32, + pub width: f32, } impl TerminalSize { @@ -281,8 +273,6 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, - item_id: ItemId, - workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -387,8 +377,6 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, - workspace_id, - item_id, }; Ok(TerminalBuilder { @@ -460,9 +448,9 @@ impl TerminalBuilder { } #[derive(Debug, Clone)] -struct IndexedCell { - point: Point, - cell: Cell, +pub struct IndexedCell { + pub point: Point, + pub cell: Cell, } impl Deref for IndexedCell { @@ -474,17 +462,18 @@ impl Deref for IndexedCell { } } +// TODO: Un-pub #[derive(Clone)] pub struct TerminalContent { - cells: Vec, - mode: TermMode, - display_offset: usize, - selection_text: Option, - selection: Option, - cursor: RenderableCursor, - cursor_char: char, - size: TerminalSize, - last_hovered_hyperlink: Option<(String, RangeInclusive, usize)>, + pub cells: Vec, + pub mode: TermMode, + pub display_offset: usize, + pub selection_text: Option, + pub selection: Option, + pub cursor: RenderableCursor, + pub cursor_char: char, + pub size: TerminalSize, + pub last_hovered_hyperlink: Option<(String, RangeInclusive, usize)>, } impl Default for TerminalContent { @@ -521,19 +510,17 @@ pub struct Terminal { /// This is only used for terminal hyperlink checking last_mouse_position: Option, pub matches: Vec>, - last_content: TerminalContent, + pub last_content: TerminalContent, last_synced: Instant, sync_task: Option>, - selection_head: Option, - breadcrumb_text: String, + pub selection_head: Option, + pub breadcrumb_text: String, shell_pid: u32, shell_fd: u32, - foreground_process_info: Option, + pub foreground_process_info: Option, scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, - workspace_id: WorkspaceId, - item_id: ItemId, } impl Terminal { @@ -574,20 +561,6 @@ impl Terminal { if self.update_process_info() { cx.emit(Event::TitleChanged); - - if let Some(foreground_info) = &self.foreground_process_info { - let cwd = foreground_info.cwd.clone(); - let item_id = self.item_id; - let workspace_id = self.workspace_id; - cx.background() - .spawn(async move { - TERMINAL_CONNECTION - .save_working_directory(item_id, workspace_id, cwd) - .await - .log_err(); - }) - .detach(); - } } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { @@ -1190,42 +1163,13 @@ impl Terminal { } } - pub fn set_workspace_id(&mut self, id: WorkspaceId, cx: &AppContext) { - let old_workspace_id = self.workspace_id; - let item_id = self.item_id; - cx.background() - .spawn(async move { - TERMINAL_CONNECTION - .update_workspace_id(id, old_workspace_id, item_id) - .await - .log_err() - }) - .detach(); - - self.workspace_id = id; - } - pub fn find_matches( &mut self, - query: project::search::SearchQuery, + searcher: RegexSearch, cx: &mut ModelContext, ) -> Task>> { let term = self.term.clone(); cx.background().spawn(async move { - let searcher = match query { - project::search::SearchQuery::Text { query, .. } => { - RegexSearch::new(query.as_ref()) - } - project::search::SearchQuery::Regex { query, .. } => { - RegexSearch::new(query.as_ref()) - } - }; - - if searcher.is_err() { - return Vec::new(); - } - let searcher = searcher.unwrap(); - let term = term.lock(); all_search_matches(&term, &searcher).collect() @@ -1322,14 +1266,14 @@ fn open_uri(uri: &str) -> Result<(), std::io::Error> { #[cfg(test)] mod tests { + use alacritty_terminal::{ + index::{Column, Line, Point}, + term::cell::Cell, + }; use gpui::geometry::vector::vec2f; - use rand::{thread_rng, Rng}; + use rand::{rngs::ThreadRng, thread_rng, Rng}; - use crate::content_index_for_mouse; - - use self::terminal_test_context::TerminalTestContext; - - pub mod terminal_test_context; + use crate::{content_index_for_mouse, IndexedCell, TerminalContent, TerminalSize}; #[test] fn test_mouse_to_cell() { @@ -1346,7 +1290,7 @@ mod tests { width: cell_size * (viewport_cells as f32), }; - let (content, cells) = TerminalTestContext::create_terminal_content(size, &mut rng); + let (content, cells) = create_terminal_content(size, &mut rng); for i in 0..(viewport_cells - 1) { let i = i as usize; @@ -1382,7 +1326,7 @@ mod tests { width: 100., }; - let (content, cells) = TerminalTestContext::create_terminal_content(size, &mut rng); + let (content, cells) = create_terminal_content(size, &mut rng); assert_eq!( content.cells[content_index_for_mouse(vec2f(-10., -10.), &content)].c, @@ -1393,4 +1337,37 @@ mod tests { cells[9][9] ); } + + fn create_terminal_content( + size: TerminalSize, + rng: &mut ThreadRng, + ) -> (TerminalContent, Vec>) { + let mut ic = Vec::new(); + let mut cells = Vec::new(); + + for row in 0..((size.height() / size.line_height()) as usize) { + let mut row_vec = Vec::new(); + for col in 0..((size.width() / size.cell_width()) as usize) { + let cell_char = rng.gen(); + ic.push(IndexedCell { + point: Point::new(Line(row as i32), Column(col)), + cell: Cell { + c: cell_char, + ..Default::default() + }, + }); + row_vec.push(cell_char) + } + cells.push(row_vec) + } + + ( + TerminalContent { + cells: ic, + size, + ..Default::default() + }, + cells, + ) + } } diff --git a/crates/terminal/src/tests/terminal_test_context.rs b/crates/terminal/src/tests/terminal_test_context.rs deleted file mode 100644 index 67ebb55805..0000000000 --- a/crates/terminal/src/tests/terminal_test_context.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::{path::Path, time::Duration}; - -use alacritty_terminal::{ - index::{Column, Line, Point}, - term::cell::Cell, -}; -use gpui::{ModelHandle, TestAppContext, ViewHandle}; - -use project::{Entry, Project, ProjectPath, Worktree}; -use rand::{rngs::ThreadRng, Rng}; -use workspace::{AppState, Workspace}; - -use crate::{IndexedCell, TerminalContent, TerminalSize}; - -pub struct TerminalTestContext<'a> { - pub cx: &'a mut TestAppContext, -} - -impl<'a> TerminalTestContext<'a> { - pub fn new(cx: &'a mut TestAppContext) -> Self { - cx.set_condition_duration(Some(Duration::from_secs(5))); - - TerminalTestContext { cx } - } - - ///Creates a worktree with 1 file: /root.txt - pub async fn blank_workspace(&mut self) -> (ModelHandle, ViewHandle) { - let params = self.cx.update(AppState::test); - - let project = Project::test(params.fs.clone(), [], self.cx).await; - let (_, workspace) = self.cx.add_window(|cx| { - Workspace::new( - Default::default(), - 0, - project.clone(), - |_, _| unimplemented!(), - cx, - ) - }); - - (project, workspace) - } - - ///Creates a worktree with 1 folder: /root{suffix}/ - pub async fn create_folder_wt( - &mut self, - project: ModelHandle, - path: impl AsRef, - ) -> (ModelHandle, Entry) { - self.create_wt(project, true, path).await - } - - ///Creates a worktree with 1 file: /root{suffix}.txt - pub async fn create_file_wt( - &mut self, - project: ModelHandle, - path: impl AsRef, - ) -> (ModelHandle, Entry) { - self.create_wt(project, false, path).await - } - - async fn create_wt( - &mut self, - project: ModelHandle, - is_dir: bool, - path: impl AsRef, - ) -> (ModelHandle, Entry) { - let (wt, _) = project - .update(self.cx, |project, cx| { - project.find_or_create_local_worktree(path, true, cx) - }) - .await - .unwrap(); - - let entry = self - .cx - .update(|cx| { - wt.update(cx, |wt, cx| { - wt.as_local() - .unwrap() - .create_entry(Path::new(""), is_dir, cx) - }) - }) - .await - .unwrap(); - - (wt, entry) - } - - pub fn insert_active_entry_for( - &mut self, - wt: ModelHandle, - entry: Entry, - project: ModelHandle, - ) { - self.cx.update(|cx| { - let p = ProjectPath { - worktree_id: wt.read(cx).id(), - path: entry.path, - }; - project.update(cx, |project, cx| project.set_active_path(Some(p), cx)); - }); - } - - pub fn create_terminal_content( - size: TerminalSize, - rng: &mut ThreadRng, - ) -> (TerminalContent, Vec>) { - let mut ic = Vec::new(); - let mut cells = Vec::new(); - - for row in 0..((size.height() / size.line_height()) as usize) { - let mut row_vec = Vec::new(); - for col in 0..((size.width() / size.cell_width()) as usize) { - let cell_char = rng.gen(); - ic.push(IndexedCell { - point: Point::new(Line(row as i32), Column(col)), - cell: Cell { - c: cell_char, - ..Default::default() - }, - }); - row_vec.push(cell_char) - } - cells.push(row_vec) - } - - ( - TerminalContent { - cells: ic, - size, - ..Default::default() - }, - cells, - ) - } -} - -impl<'a> Drop for TerminalTestContext<'a> { - fn drop(&mut self) { - self.cx.set_condition_duration(None); - } -} diff --git a/crates/terminal_view/Cargo.toml b/crates/terminal_view/Cargo.toml index 181ed606e0..fae60a943d 100644 --- a/crates/terminal_view/Cargo.toml +++ b/crates/terminal_view/Cargo.toml @@ -18,8 +18,8 @@ theme = { path = "../theme" } util = { path = "../util" } workspace = { path = "../workspace" } db = { path = "../db" } -alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } +terminal = { path = "../terminal" } smallvec = { version = "1.6", features = ["union"] } smol = "1.2.5" mio-extras = "2.0.6" diff --git a/crates/terminal/src/persistence.rs b/crates/terminal_view/src/persistence.rs similarity index 91% rename from crates/terminal/src/persistence.rs rename to crates/terminal_view/src/persistence.rs index 333911ee6d..db715aeef7 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal_view/src/persistence.rs @@ -1,11 +1,12 @@ use std::path::PathBuf; use db::{define_connection, query, sqlez_macros::sql}; +use workspace::{WorkspaceDb, WorkspaceId}; type ModelId = usize; define_connection! { - pub static ref TERMINAL_CONNECTION: TerminalDb<()> = + pub static ref TERMINAL_DB: TerminalDb = &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, @@ -34,7 +35,7 @@ impl TerminalDb { query! { pub async fn save_working_directory( item_id: ModelId, - workspace_id: WorkspaceId, + workspace_id: i64, working_directory: PathBuf ) -> Result<()> { INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) diff --git a/crates/terminal_view/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs index 8f4bfeeb53..bf1e7bbddb 100644 --- a/crates/terminal_view/src/terminal_container_view.rs +++ b/crates/terminal_view/src/terminal_container_view.rs @@ -1,13 +1,18 @@ -use crate::persistence::TERMINAL_CONNECTION; -use crate::terminal_view::TerminalView; -use crate::{Event, TerminalBuilder, TerminalError}; +mod persistence; +pub mod terminal_element; +pub mod terminal_view; + +use crate::persistence::TERMINAL_DB; +use crate::terminal_view::TerminalView; +use terminal::alacritty_terminal::index::Point; +use terminal::{Event, TerminalBuilder, TerminalError}; -use alacritty_terminal::index::Point; use dirs::home_dir; use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; +use terminal_view::regex_search_for_query; use util::{truncate_and_trailoff, ResultExt}; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; use workspace::{ @@ -30,6 +35,8 @@ pub fn init(cx: &mut MutableAppContext) { cx.add_action(TerminalContainer::deploy); register_deserializable_item::(cx); + + terminal_view::init(cx); } //Make terminal view an enum, that can give you views for the error and non-error states @@ -92,7 +99,7 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, - workspace_id: WorkspaceId, + _workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -119,8 +126,6 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), - cx.view_id(), - workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); @@ -389,7 +394,7 @@ impl Item for TerminalContainer { item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, workspace_id); + let working_directory = TERMINAL_DB.get_working_directory(item_id, workspace_id); Task::ready(Ok(cx.add_view(|cx| { TerminalContainer::new( working_directory.log_err().flatten(), @@ -400,11 +405,14 @@ impl Item for TerminalContainer { }))) } - fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { - if let Some(connected) = self.connected() { - let id = workspace.database_id(); - let terminal_handle = connected.read(cx).terminal().clone(); - terminal_handle.update(cx, |terminal, cx| terminal.set_workspace_id(id, cx)) + fn added_to_workspace(&mut self, _workspace: &mut Workspace, cx: &mut ViewContext) { + if let Some(_connected) = self.connected() { + // let id = workspace.database_id(); + // let terminal_handle = connected.read(cx).terminal().clone(); + //TODO + cx.background() + .spawn(TERMINAL_DB.update_workspace_id(0, 0, 0)) + .detach(); } } } @@ -477,7 +485,11 @@ impl SearchableItem for TerminalContainer { ) -> Task> { if let TerminalContainerContent::Connected(connected) = &self.content { let terminal = connected.read(cx).terminal().clone(); - terminal.update(cx, |term, cx| term.find_matches(query, cx)) + if let Some(searcher) = regex_search_for_query(query) { + terminal.update(cx, |term, cx| term.find_matches(searcher, cx)) + } else { + cx.background().spawn(async { Vec::new() }) + } } else { Task::ready(Vec::new()) } @@ -585,21 +597,20 @@ mod tests { use super::*; use gpui::TestAppContext; + use project::{Entry, Worktree}; + use workspace::AppState; use std::path::Path; - use crate::tests::terminal_test_context::TerminalTestContext; - ///Working directory calculation tests ///No Worktrees in project -> home_dir() #[gpui::test] async fn no_worktree(cx: &mut TestAppContext) { //Setup variables - let mut cx = TerminalTestContext::new(cx); - let (project, workspace) = cx.blank_workspace().await; + let (project, workspace) = blank_workspace(cx).await; //Test - cx.cx.read(|cx| { + cx.read(|cx| { let workspace = workspace.read(cx); let active_entry = project.read(cx).active_entry(); @@ -619,11 +630,10 @@ mod tests { async fn no_active_entry_worktree_is_file(cx: &mut TestAppContext) { //Setup variables - let mut cx = TerminalTestContext::new(cx); - let (project, workspace) = cx.blank_workspace().await; - cx.create_file_wt(project.clone(), "/root.txt").await; + let (project, workspace) = blank_workspace(cx).await; + create_file_wt(project.clone(), "/root.txt", cx).await; - cx.cx.read(|cx| { + cx.read(|cx| { let workspace = workspace.read(cx); let active_entry = project.read(cx).active_entry(); @@ -642,12 +652,11 @@ mod tests { #[gpui::test] async fn no_active_entry_worktree_is_dir(cx: &mut TestAppContext) { //Setup variables - let mut cx = TerminalTestContext::new(cx); - let (project, workspace) = cx.blank_workspace().await; - let (_wt, _entry) = cx.create_folder_wt(project.clone(), "/root/").await; + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root/", cx).await; //Test - cx.cx.update(|cx| { + cx.update(|cx| { let workspace = workspace.read(cx); let active_entry = project.read(cx).active_entry(); @@ -665,14 +674,14 @@ mod tests { #[gpui::test] async fn active_entry_worktree_is_file(cx: &mut TestAppContext) { //Setup variables - let mut cx = TerminalTestContext::new(cx); - let (project, workspace) = cx.blank_workspace().await; - let (_wt, _entry) = cx.create_folder_wt(project.clone(), "/root1/").await; - let (wt2, entry2) = cx.create_file_wt(project.clone(), "/root2.txt").await; - cx.insert_active_entry_for(wt2, entry2, project.clone()); + + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; + let (wt2, entry2) = create_file_wt(project.clone(), "/root2.txt", cx).await; + insert_active_entry_for(wt2, entry2, project.clone(), cx); //Test - cx.cx.update(|cx| { + cx.update(|cx| { let workspace = workspace.read(cx); let active_entry = project.read(cx).active_entry(); @@ -689,14 +698,13 @@ mod tests { #[gpui::test] async fn active_entry_worktree_is_dir(cx: &mut TestAppContext) { //Setup variables - let mut cx = TerminalTestContext::new(cx); - let (project, workspace) = cx.blank_workspace().await; - let (_wt, _entry) = cx.create_folder_wt(project.clone(), "/root1/").await; - let (wt2, entry2) = cx.create_folder_wt(project.clone(), "/root2/").await; - cx.insert_active_entry_for(wt2, entry2, project.clone()); + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; + let (wt2, entry2) = create_folder_wt(project.clone(), "/root2/", cx).await; + insert_active_entry_for(wt2, entry2, project.clone(), cx); //Test - cx.cx.update(|cx| { + cx.update(|cx| { let workspace = workspace.read(cx); let active_entry = project.read(cx).active_entry(); @@ -708,4 +716,84 @@ mod tests { assert_eq!(res, Some((Path::new("/root1/")).to_path_buf())); }); } + + ///Creates a worktree with 1 file: /root.txt + pub async fn blank_workspace( + cx: &mut TestAppContext, + ) -> (ModelHandle, ViewHandle) { + let params = cx.update(AppState::test); + + let project = Project::test(params.fs.clone(), [], cx).await; + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + 0, + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); + + (project, workspace) + } + + ///Creates a worktree with 1 folder: /root{suffix}/ + async fn create_folder_wt( + project: ModelHandle, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + create_wt(project, true, path, cx).await + } + + ///Creates a worktree with 1 file: /root{suffix}.txt + async fn create_file_wt( + project: ModelHandle, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + create_wt(project, false, path, cx).await + } + + async fn create_wt( + project: ModelHandle, + is_dir: bool, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + let (wt, _) = project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(path, true, cx) + }) + .await + .unwrap(); + + let entry = cx + .update(|cx| { + wt.update(cx, |wt, cx| { + wt.as_local() + .unwrap() + .create_entry(Path::new(""), is_dir, cx) + }) + }) + .await + .unwrap(); + + (wt, entry) + } + + pub fn insert_active_entry_for( + wt: ModelHandle, + entry: Entry, + project: ModelHandle, + cx: &mut TestAppContext, + ) { + cx.update(|cx| { + let p = ProjectPath { + worktree_id: wt.read(cx).id(), + path: entry.path, + }; + project.update(cx, |project, cx| project.set_active_path(Some(p), cx)); + }); + } } diff --git a/crates/terminal_view/src/terminal_element.rs b/crates/terminal_view/src/terminal_element.rs index adfcb47024..53a38ec20a 100644 --- a/crates/terminal_view/src/terminal_element.rs +++ b/crates/terminal_view/src/terminal_element.rs @@ -1,9 +1,3 @@ -use alacritty_terminal::{ - ansi::{Color as AnsiColor, Color::Named, CursorShape as AlacCursorShape, NamedColor}, - grid::Dimensions, - index::Point, - term::{cell::Flags, TermMode}, -}; use editor::{Cursor, HighlightedRange, HighlightedRangeLine}; use gpui::{ color::Color, @@ -22,17 +16,23 @@ use itertools::Itertools; use language::CursorShape; use ordered_float::OrderedFloat; use settings::Settings; +use terminal::{ + alacritty_terminal::{ + ansi::{Color as AnsiColor, CursorShape as AlacCursorShape, NamedColor}, + grid::Dimensions, + index::Point, + term::{cell::Flags, TermMode}, + }, + mappings::colors::convert_color, + IndexedCell, Terminal, TerminalContent, TerminalSize, +}; use theme::TerminalStyle; use util::ResultExt; use std::{fmt::Debug, ops::RangeInclusive}; use std::{mem, ops::Range}; -use crate::{ - mappings::colors::convert_color, - terminal_view::{DeployContextMenu, TerminalView}, - IndexedCell, Terminal, TerminalContent, TerminalSize, -}; +use crate::terminal_view::{DeployContextMenu, TerminalView}; ///The information generated during layout that is nescessary for painting pub struct LayoutState { @@ -198,7 +198,10 @@ impl TerminalElement { //Expand background rect range { - if matches!(bg, Named(NamedColor::Background)) { + if matches!( + bg, + terminal::alacritty_terminal::ansi::Color::Named(NamedColor::Background) + ) { //Continue to next cell, resetting variables if nescessary cur_alac_color = None; if let Some(rect) = cur_rect { @@ -299,7 +302,7 @@ impl TerminalElement { ///Convert the Alacritty cell styles to GPUI text styles and background color fn cell_style( indexed: &IndexedCell, - fg: AnsiColor, + fg: terminal::alacritty_terminal::ansi::Color, style: &TerminalStyle, text_style: &TextStyle, font_cache: &FontCache, @@ -636,7 +639,7 @@ impl Element for TerminalElement { //Layout cursor. Rectangle is used for IME, so we should lay it out even //if we don't end up showing it. - let cursor = if let AlacCursorShape::Hidden = cursor.shape { + let cursor = if let terminal::alacritty_terminal::ansi::CursorShape::Hidden = cursor.shape { None } else { let cursor_point = DisplayCursor::from(cursor.point, *display_offset); diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 21e055319a..ad0538e2ea 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1,6 +1,5 @@ -use std::{ops::RangeInclusive, time::Duration}; +use std::{ops::RangeInclusive, path::PathBuf, time::Duration}; -use alacritty_terminal::{index::Point, term::TermMode}; use context_menu::{ContextMenu, ContextMenuItem}; use gpui::{ actions, @@ -14,10 +13,17 @@ use gpui::{ use serde::Deserialize; use settings::{Settings, TerminalBlink}; use smol::Timer; +use terminal::{ + alacritty_terminal::{ + index::Point, + term::{search::RegexSearch, TermMode}, + }, + Terminal, +}; use util::ResultExt; use workspace::pane; -use crate::{terminal_element::TerminalElement, Event, Terminal}; +use crate::{persistence::TERMINAL_DB, terminal_element::TerminalElement, Event}; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); @@ -95,6 +101,22 @@ impl TerminalView { cx.emit(Event::Wakeup); } Event::BlinkChanged => this.blinking_on = !this.blinking_on, + Event::TitleChanged => { + // if let Some(foreground_info) = &terminal.read(cx).foreground_process_info { + // let cwd = foreground_info.cwd.clone(); + //TODO + // let item_id = self.item_id; + // let workspace_id = self.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_DB + .save_working_directory(0, 0, PathBuf::new()) + .await + .log_err(); + }) + .detach(); + // } + } _ => cx.emit(*event), }) .detach(); @@ -246,8 +268,14 @@ impl TerminalView { query: project::search::SearchQuery, cx: &mut ViewContext, ) -> Task>> { - self.terminal - .update(cx, |term, cx| term.find_matches(query, cx)) + let searcher = regex_search_for_query(query); + + if let Some(searcher) = searcher { + self.terminal + .update(cx, |term, cx| term.find_matches(searcher, cx)) + } else { + cx.background().spawn(async { Vec::new() }) + } } pub fn terminal(&self) -> &ModelHandle { @@ -302,6 +330,14 @@ impl TerminalView { } } +pub fn regex_search_for_query(query: project::search::SearchQuery) -> Option { + let searcher = match query { + project::search::SearchQuery::Text { query, .. } => RegexSearch::new(&query), + project::search::SearchQuery::Regex { query, .. } => RegexSearch::new(&query), + }; + searcher.ok() +} + impl View for TerminalView { fn ui_name() -> &'static str { "Terminal" diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 4163841d45..1b41613937 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -32,7 +32,7 @@ use settings::{ use smol::process::Command; use std::fs::OpenOptions; use std::{env, ffi::OsStr, panic, path::PathBuf, sync::Arc, thread, time::Duration}; -use terminal::terminal_container_view::{get_working_directory, TerminalContainer}; +use terminal_view::{get_working_directory, TerminalContainer}; use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; @@ -119,7 +119,7 @@ fn main() { diagnostics::init(cx); search::init(cx); vim::init(cx); - terminal::init(cx); + terminal_view::init(cx); theme_testbench::init(cx); cx.spawn(|cx| watch_themes(fs.clone(), themes.clone(), cx)) From 2733f91d8caa2d18ed5e63aa9487d0add3d6afd0 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 8 Dec 2022 15:18:24 -0800 Subject: [PATCH 226/240] Fix bugs resulting from refactoring the terminal into project and workspace halves --- crates/db/src/query.rs | 4 +- crates/settings/src/settings.rs | 28 ++++++++- crates/terminal/src/terminal.rs | 7 +-- crates/terminal_view/src/persistence.rs | 22 +++---- .../src/terminal_container_view.rs | 62 ++++++++----------- crates/terminal_view/src/terminal_element.rs | 9 +-- crates/terminal_view/src/terminal_view.rs | 45 +++++++++----- 7 files changed, 101 insertions(+), 76 deletions(-) diff --git a/crates/db/src/query.rs b/crates/db/src/query.rs index 731fca15cb..01132d383c 100644 --- a/crates/db/src/query.rs +++ b/crates/db/src/query.rs @@ -199,10 +199,10 @@ macro_rules! query { use $crate::anyhow::Context; - self.write(|connection| { + self.write(move |connection| { let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) + connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), diff --git a/crates/settings/src/settings.rs b/crates/settings/src/settings.rs index 5137751579..dd23f80abd 100644 --- a/crates/settings/src/settings.rs +++ b/crates/settings/src/settings.rs @@ -199,7 +199,7 @@ impl Default for Shell { } } -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum AlternateScroll { On, @@ -473,6 +473,32 @@ impl Settings { }) } + pub fn terminal_scroll(&self) -> AlternateScroll { + *self.terminal_overrides.alternate_scroll.as_ref().unwrap_or( + self.terminal_defaults + .alternate_scroll + .as_ref() + .unwrap_or_else(|| &AlternateScroll::On), + ) + } + + pub fn terminal_shell(&self) -> Option { + self.terminal_overrides + .shell + .as_ref() + .or(self.terminal_defaults.shell.as_ref()) + .cloned() + } + + pub fn terminal_env(&self) -> HashMap { + self.terminal_overrides.env.clone().unwrap_or_else(|| { + self.terminal_defaults + .env + .clone() + .unwrap_or_else(|| HashMap::default()) + }) + } + #[cfg(any(test, feature = "test-support"))] pub fn test(cx: &gpui::AppContext) -> Settings { Settings { diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 937678df0b..4b69de0bf2 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -269,9 +269,9 @@ impl TerminalBuilder { pub fn new( working_directory: Option, shell: Option, - env: Option>, + mut env: HashMap, blink_settings: Option, - alternate_scroll: &AlternateScroll, + alternate_scroll: AlternateScroll, window_id: usize, ) -> Result { let pty_config = { @@ -288,10 +288,9 @@ impl TerminalBuilder { } }; - let mut env = env.unwrap_or_default(); - //TODO: Properly set the current locale, env.insert("LC_ALL".to_string(), "en_US.UTF-8".to_string()); + env.insert("ZED_TERM".to_string(), true.to_string()); let alac_scrolling = Scrolling::default(); // alac_scrolling.set_history((BACK_BUFFER_SIZE * 2) as u32); diff --git a/crates/terminal_view/src/persistence.rs b/crates/terminal_view/src/persistence.rs index db715aeef7..f090b384a4 100644 --- a/crates/terminal_view/src/persistence.rs +++ b/crates/terminal_view/src/persistence.rs @@ -1,20 +1,18 @@ use std::path::PathBuf; use db::{define_connection, query, sqlez_macros::sql}; -use workspace::{WorkspaceDb, WorkspaceId}; - -type ModelId = usize; +use workspace::{ItemId, WorkspaceDb, WorkspaceId}; define_connection! { pub static ref TERMINAL_DB: TerminalDb = &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, - model_id INTEGER UNIQUE, + item_id INTEGER UNIQUE, working_directory BLOB, - PRIMARY KEY(workspace_id, model_id), + PRIMARY KEY(workspace_id, item_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE + ON DELETE CASCADE ) STRICT; )]; } @@ -24,7 +22,7 @@ impl TerminalDb { pub async fn update_workspace_id( new_id: WorkspaceId, old_id: WorkspaceId, - item_id: ModelId + item_id: ItemId ) -> Result<()> { UPDATE terminals SET workspace_id = ? @@ -34,8 +32,8 @@ impl TerminalDb { query! { pub async fn save_working_directory( - item_id: ModelId, - workspace_id: i64, + item_id: ItemId, + workspace_id: WorkspaceId, working_directory: PathBuf ) -> Result<()> { INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) @@ -44,10 +42,10 @@ impl TerminalDb { } query! { - pub fn get_working_directory(item_id: ModelId, workspace_id: WorkspaceId) -> Result> { - SELECT working_directory - FROM terminals + pub async fn take_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { + DELETE FROM terminals WHERE item_id = ? AND workspace_id = ? + RETURNING working_directory } } } diff --git a/crates/terminal_view/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs index bf1e7bbddb..9d8b79cd39 100644 --- a/crates/terminal_view/src/terminal_container_view.rs +++ b/crates/terminal_view/src/terminal_container_view.rs @@ -22,7 +22,7 @@ use workspace::{ use workspace::{register_deserializable_item, Pane, WorkspaceId}; use project::{LocalWorktree, Project, ProjectPath}; -use settings::{AlternateScroll, Settings, WorkingDirectory}; +use settings::{Settings, WorkingDirectory}; use smallvec::SmallVec; use std::ops::RangeInclusive; use std::path::{Path, PathBuf}; @@ -99,25 +99,13 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, - _workspace_id: WorkspaceId, + workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); - let shell = settings.terminal_overrides.shell.clone(); - let envs = settings.terminal_overrides.env.clone(); //Should be short and cheap. - - //TODO: move this pattern to settings - let scroll = settings - .terminal_overrides - .alternate_scroll - .as_ref() - .unwrap_or( - settings - .terminal_defaults - .alternate_scroll - .as_ref() - .unwrap_or_else(|| &AlternateScroll::On), - ); + let shell = settings.terminal_shell(); + let envs = settings.terminal_env(); + let scroll = settings.terminal_scroll(); let content = match TerminalBuilder::new( working_directory.clone(), @@ -129,7 +117,10 @@ impl TerminalContainer { ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); - let view = cx.add_view(|cx| TerminalView::from_terminal(terminal, modal, cx)); + let item_id = cx.view_id(); + let view = cx.add_view(|cx| { + TerminalView::from_terminal(terminal, modal, workspace_id, item_id, cx) + }); cx.subscribe(&view, |_this, _content, event, cx| cx.emit(*event)) .detach(); @@ -394,25 +385,26 @@ impl Item for TerminalContainer { item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - let working_directory = TERMINAL_DB.get_working_directory(item_id, workspace_id); - Task::ready(Ok(cx.add_view(|cx| { - TerminalContainer::new( - working_directory.log_err().flatten(), - false, - workspace_id, - cx, - ) - }))) + cx.spawn(|pane, mut cx| async move { + let cwd = TERMINAL_DB + .take_working_directory(item_id, workspace_id) + .await + .log_err() + .flatten(); + + cx.update(|cx| { + Ok(cx.add_view(pane, |cx| { + TerminalContainer::new(cwd, false, workspace_id, cx) + })) + }) + }) } - fn added_to_workspace(&mut self, _workspace: &mut Workspace, cx: &mut ViewContext) { - if let Some(_connected) = self.connected() { - // let id = workspace.database_id(); - // let terminal_handle = connected.read(cx).terminal().clone(); - //TODO - cx.background() - .spawn(TERMINAL_DB.update_workspace_id(0, 0, 0)) - .detach(); + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + if let Some(connected) = self.connected() { + connected.update(cx, |connected_view, cx| { + connected_view.added_to_workspace(workspace.database_id(), cx); + }) } } } diff --git a/crates/terminal_view/src/terminal_element.rs b/crates/terminal_view/src/terminal_element.rs index 53a38ec20a..506dd1423d 100644 --- a/crates/terminal_view/src/terminal_element.rs +++ b/crates/terminal_view/src/terminal_element.rs @@ -18,7 +18,7 @@ use ordered_float::OrderedFloat; use settings::Settings; use terminal::{ alacritty_terminal::{ - ansi::{Color as AnsiColor, CursorShape as AlacCursorShape, NamedColor}, + ansi::{Color as AnsiColor, Color::Named, CursorShape as AlacCursorShape, NamedColor}, grid::Dimensions, index::Point, term::{cell::Flags, TermMode}, @@ -198,10 +198,7 @@ impl TerminalElement { //Expand background rect range { - if matches!( - bg, - terminal::alacritty_terminal::ansi::Color::Named(NamedColor::Background) - ) { + if matches!(bg, Named(NamedColor::Background)) { //Continue to next cell, resetting variables if nescessary cur_alac_color = None; if let Some(rect) = cur_rect { @@ -639,7 +636,7 @@ impl Element for TerminalElement { //Layout cursor. Rectangle is used for IME, so we should lay it out even //if we don't end up showing it. - let cursor = if let terminal::alacritty_terminal::ansi::CursorShape::Hidden = cursor.shape { + let cursor = if let AlacCursorShape::Hidden = cursor.shape { None } else { let cursor_point = DisplayCursor::from(cursor.point, *display_offset); diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index ad0538e2ea..c2f5c5c114 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1,4 +1,4 @@ -use std::{ops::RangeInclusive, path::PathBuf, time::Duration}; +use std::{ops::RangeInclusive, time::Duration}; use context_menu::{ContextMenu, ContextMenuItem}; use gpui::{ @@ -21,7 +21,7 @@ use terminal::{ Terminal, }; use util::ResultExt; -use workspace::pane; +use workspace::{pane, ItemId, WorkspaceId}; use crate::{persistence::TERMINAL_DB, terminal_element::TerminalElement, Event}; @@ -75,6 +75,8 @@ pub struct TerminalView { blinking_on: bool, blinking_paused: bool, blink_epoch: usize, + workspace_id: WorkspaceId, + item_id: ItemId, } impl Entity for TerminalView { @@ -85,6 +87,8 @@ impl TerminalView { pub fn from_terminal( terminal: ModelHandle, modal: bool, + workspace_id: WorkspaceId, + item_id: ItemId, cx: &mut ViewContext, ) -> Self { cx.observe(&terminal, |_, _, cx| cx.notify()).detach(); @@ -102,20 +106,20 @@ impl TerminalView { } Event::BlinkChanged => this.blinking_on = !this.blinking_on, Event::TitleChanged => { - // if let Some(foreground_info) = &terminal.read(cx).foreground_process_info { - // let cwd = foreground_info.cwd.clone(); - //TODO - // let item_id = self.item_id; - // let workspace_id = self.workspace_id; - cx.background() - .spawn(async move { - TERMINAL_DB - .save_working_directory(0, 0, PathBuf::new()) - .await - .log_err(); - }) - .detach(); - // } + if let Some(foreground_info) = &this.terminal().read(cx).foreground_process_info { + let cwd = foreground_info.cwd.clone(); + + let item_id = this.item_id; + let workspace_id = this.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_DB + .save_working_directory(item_id, workspace_id, cwd) + .await + .log_err(); + }) + .detach(); + } } _ => cx.emit(*event), }) @@ -131,6 +135,8 @@ impl TerminalView { blinking_on: false, blinking_paused: false, blink_epoch: 0, + workspace_id, + item_id, } } @@ -282,6 +288,13 @@ impl TerminalView { &self.terminal } + pub fn added_to_workspace(&mut self, new_id: WorkspaceId, cx: &mut ViewContext) { + cx.background() + .spawn(TERMINAL_DB.update_workspace_id(new_id, self.workspace_id, self.item_id)) + .detach(); + self.workspace_id = new_id; + } + fn next_blink_epoch(&mut self) -> usize { self.blink_epoch += 1; self.blink_epoch From c42da5c9b9185dbefb70de5e144f3c70d9d7528b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 8 Dec 2022 16:10:22 -0800 Subject: [PATCH 227/240] WIP --- crates/project/src/project.rs | 21 ++++++- crates/settings/src/settings.rs | 44 +++++++------- crates/terminal/src/terminal.rs | 28 ++++----- crates/terminal_view/Cargo.toml | 2 +- .../src/terminal_container_view.rs | 57 ++++++------------- crates/terminal_view/src/terminal_view.rs | 5 ++ 6 files changed, 77 insertions(+), 80 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 40f1c93e51..545570da89 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -62,7 +62,7 @@ use std::{ }, time::Instant, }; -use terminal::Terminal; +use terminal::{Terminal, TerminalBuilder}; use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; @@ -1196,14 +1196,29 @@ impl Project { pub fn create_terminal_connection( &mut self, - _cx: &mut ModelContext, + working_directory: Option, + window_id: usize, + cx: &mut ModelContext, ) -> Result> { if self.is_remote() { return Err(anyhow!( "creating terminals as a guest is not supported yet" )); } else { - unimplemented!() + let settings = cx.global::(); + let shell = settings.terminal_shell(); + let envs = settings.terminal_env(); + let scroll = settings.terminal_scroll(); + + TerminalBuilder::new( + working_directory.clone(), + shell, + envs, + settings.terminal_overrides.blinking.clone(), + scroll, + window_id, + ) + .map(|builder| cx.add_model(|cx| builder.subscribe(cx))) } } diff --git a/crates/settings/src/settings.rs b/crates/settings/src/settings.rs index dd23f80abd..f0c64a1bb9 100644 --- a/crates/settings/src/settings.rs +++ b/crates/settings/src/settings.rs @@ -221,6 +221,12 @@ pub enum WorkingDirectory { Always { directory: String }, } +impl Default for WorkingDirectory { + fn default() -> Self { + Self::CurrentProjectDirectory + } +} + #[derive(PartialEq, Eq, Debug, Default, Copy, Clone, Hash, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum DockAnchor { @@ -473,30 +479,30 @@ impl Settings { }) } - pub fn terminal_scroll(&self) -> AlternateScroll { - *self.terminal_overrides.alternate_scroll.as_ref().unwrap_or( - self.terminal_defaults - .alternate_scroll - .as_ref() - .unwrap_or_else(|| &AlternateScroll::On), - ) + fn terminal_setting(&self, f: F) -> R + where + F: Fn(&TerminalSettings) -> Option<&R>, + { + f(&self.terminal_overrides) + .or_else(|| f(&self.terminal_defaults)) + .cloned() + .unwrap_or_else(|| R::default()) } - pub fn terminal_shell(&self) -> Option { - self.terminal_overrides - .shell - .as_ref() - .or(self.terminal_defaults.shell.as_ref()) - .cloned() + pub fn terminal_scroll(&self) -> AlternateScroll { + self.terminal_setting(|terminal_setting| terminal_setting.alternate_scroll.as_ref()) + } + + pub fn terminal_shell(&self) -> Shell { + self.terminal_setting(|terminal_setting| terminal_setting.shell.as_ref()) } pub fn terminal_env(&self) -> HashMap { - self.terminal_overrides.env.clone().unwrap_or_else(|| { - self.terminal_defaults - .env - .clone() - .unwrap_or_else(|| HashMap::default()) - }) + self.terminal_setting(|terminal_setting| terminal_setting.env.as_ref()) + } + + pub fn terminal_strategy(&self) -> WorkingDirectory { + self.terminal_setting(|terminal_setting| terminal_setting.working_directory.as_ref()) } #[cfg(any(test, feature = "test-support"))] diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 4b69de0bf2..7cdac33cda 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -198,7 +198,7 @@ impl Dimensions for TerminalSize { #[derive(Error, Debug)] pub struct TerminalError { pub directory: Option, - pub shell: Option, + pub shell: Shell, pub source: std::io::Error, } @@ -226,24 +226,20 @@ impl TerminalError { }) } - pub fn shell_to_string(&self) -> Option { - self.shell.as_ref().map(|shell| match shell { + pub fn shell_to_string(&self) -> String { + match &self.shell { Shell::System => "".to_string(), Shell::Program(p) => p.to_string(), Shell::WithArguments { program, args } => format!("{} {}", program, args.join(" ")), - }) + } } pub fn fmt_shell(&self) -> String { - self.shell - .clone() - .map(|shell| match shell { - Shell::System => "".to_string(), - - Shell::Program(s) => s, - Shell::WithArguments { program, args } => format!("{} {}", program, args.join(" ")), - }) - .unwrap_or_else(|| "".to_string()) + match &self.shell { + Shell::System => "".to_string(), + Shell::Program(s) => s.to_string(), + Shell::WithArguments { program, args } => format!("{} {}", program, args.join(" ")), + } } } @@ -268,18 +264,18 @@ pub struct TerminalBuilder { impl TerminalBuilder { pub fn new( working_directory: Option, - shell: Option, + shell: Shell, mut env: HashMap, blink_settings: Option, alternate_scroll: AlternateScroll, window_id: usize, ) -> Result { let pty_config = { - let alac_shell = shell.clone().and_then(|shell| match shell { + let alac_shell = match shell.clone() { Shell::System => None, Shell::Program(program) => Some(Program::Just(program)), Shell::WithArguments { program, args } => Some(Program::WithArgs { program, args }), - }); + }; PtyConfig { shell: alac_shell, diff --git a/crates/terminal_view/Cargo.toml b/crates/terminal_view/Cargo.toml index fae60a943d..05fda2c75f 100644 --- a/crates/terminal_view/Cargo.toml +++ b/crates/terminal_view/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [lib] -path = "src/terminal_container_view.rs" +path = "src/terminal_view.rs" doctest = false [dependencies] diff --git a/crates/terminal_view/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs index 9d8b79cd39..322bf5ab52 100644 --- a/crates/terminal_view/src/terminal_container_view.rs +++ b/crates/terminal_view/src/terminal_container_view.rs @@ -1,18 +1,14 @@ -mod persistence; -pub mod terminal_element; -pub mod terminal_view; - use crate::persistence::TERMINAL_DB; -use crate::terminal_view::TerminalView; +use crate::TerminalView; use terminal::alacritty_terminal::index::Point; -use terminal::{Event, TerminalBuilder, TerminalError}; +use terminal::{Event, Terminal, TerminalError}; +use crate::regex_search_for_query; use dirs::home_dir; use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; -use terminal_view::regex_search_for_query; use util::{truncate_and_trailoff, ResultExt}; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; use workspace::{ @@ -36,7 +32,7 @@ pub fn init(cx: &mut MutableAppContext) { register_deserializable_item::(cx); - terminal_view::init(cx); + // terminal_view::init(cx); } //Make terminal view an enum, that can give you views for the error and non-error states @@ -81,47 +77,31 @@ impl TerminalContainer { _: &workspace::NewTerminal, cx: &mut ViewContext, ) { - let strategy = cx - .global::() - .terminal_overrides - .working_directory - .clone() - .unwrap_or(WorkingDirectory::CurrentProjectDirectory); + let strategy = cx.global::().terminal_strategy(); let working_directory = get_working_directory(workspace, cx, strategy); - let view = cx.add_view(|cx| { - TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + + let window_id = cx.window_id(); + let terminal = workspace.project().update(cx, |project, cx| { + project.create_terminal_connection(working_directory, window_id, cx) }); + + let view = cx.add_view(|cx| TerminalContainer::new(terminal, workspace.database_id(), cx)); workspace.add_item(Box::new(view), cx); } ///Create a new Terminal view. This spawns a task, a thread, and opens the TTY devices pub fn new( - working_directory: Option, - modal: bool, + model: anyhow::Result>, workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { - let settings = cx.global::(); - let shell = settings.terminal_shell(); - let envs = settings.terminal_env(); - let scroll = settings.terminal_scroll(); - - let content = match TerminalBuilder::new( - working_directory.clone(), - shell, - envs, - settings.terminal_overrides.blinking.clone(), - scroll, - cx.window_id(), - ) { + let content = match model { Ok(terminal) => { - let terminal = cx.add_model(|cx| terminal.subscribe(cx)); let item_id = cx.view_id(); let view = cx.add_view(|cx| { - TerminalView::from_terminal(terminal, modal, workspace_id, item_id, cx) + TerminalView::from_terminal(terminal, false, workspace_id, item_id, cx) }); - cx.subscribe(&view, |_this, _content, event, cx| cx.emit(*event)) .detach(); TerminalContainerContent::Connected(view) @@ -136,7 +116,7 @@ impl TerminalContainer { TerminalContainer { content, - associated_directory: working_directory, + associated_directory: None, //working_directory, } } @@ -183,12 +163,7 @@ impl View for ErrorView { //We want to be able to select the text //Want to be able to scroll if the error message is massive somehow (resiliency) - let program_text = { - match self.error.shell_to_string() { - Some(shell_txt) => format!("Shell Program: `{}`", shell_txt), - None => "No program specified".to_string(), - } - }; + let program_text = format!("Shell Program: `{}`", self.error.shell_to_string()); let directory_text = { match self.error.directory.as_ref() { diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index c2f5c5c114..dbe861b781 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1,3 +1,7 @@ +mod persistence; +pub mod terminal_container_view; +pub mod terminal_element; + use std::{ops::RangeInclusive, time::Duration}; use context_menu::{ContextMenu, ContextMenuItem}; @@ -52,6 +56,7 @@ impl_actions!(terminal, [SendText, SendKeystroke]); impl_internal_actions!(project_panel, [DeployContextMenu]); pub fn init(cx: &mut MutableAppContext) { + terminal_container_view::init(cx); //Useful terminal views cx.add_action(TerminalView::send_text); cx.add_action(TerminalView::send_keystroke); From da100a09fb1a6e0f9778dc49da7bfd4f3d0aa614 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 8 Dec 2022 19:05:26 -0800 Subject: [PATCH 228/240] WIP --- crates/project/src/project.rs | 2 +- .../src/terminal_container_view.rs | 21 ++++++++++++------- crates/terminal_view/src/terminal_element.rs | 2 +- crates/terminal_view/src/terminal_view.rs | 4 ++-- crates/zed/src/main.rs | 8 +++++-- 5 files changed, 23 insertions(+), 14 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 545570da89..9b4a163af4 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1194,7 +1194,7 @@ impl Project { !self.is_local() } - pub fn create_terminal_connection( + pub fn create_terminal( &mut self, working_directory: Option, window_id: usize, diff --git a/crates/terminal_view/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs index 322bf5ab52..4a0d47794a 100644 --- a/crates/terminal_view/src/terminal_container_view.rs +++ b/crates/terminal_view/src/terminal_container_view.rs @@ -82,21 +82,22 @@ impl TerminalContainer { let working_directory = get_working_directory(workspace, cx, strategy); let window_id = cx.window_id(); + let project = workspace.project().clone(); let terminal = workspace.project().update(cx, |project, cx| { - project.create_terminal_connection(working_directory, window_id, cx) + project.create_terminal(working_directory, window_id, cx) }); let view = cx.add_view(|cx| TerminalContainer::new(terminal, workspace.database_id(), cx)); workspace.add_item(Box::new(view), cx); } - ///Create a new Terminal view. This spawns a task, a thread, and opens the TTY devices + ///Create a new Terminal view. pub fn new( - model: anyhow::Result>, + maybe_terminal: anyhow::Result>, workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { - let content = match model { + let content = match maybe_terminal { Ok(terminal) => { let item_id = cx.view_id(); let view = cx.add_view(|cx| { @@ -251,8 +252,7 @@ impl Item for TerminalContainer { //Directory of the terminal from outside the shell. There might be //solutions to this, but they are non-trivial and require more IPC Some(TerminalContainer::new( - self.associated_directory.clone(), - false, + Err(anyhow::anyhow!("failed to instantiate terminal")), workspace_id, cx, )) @@ -354,12 +354,13 @@ impl Item for TerminalContainer { } fn deserialize( - _project: ModelHandle, + project: ModelHandle, _workspace: WeakViewHandle, workspace_id: workspace::WorkspaceId, item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { + let window_id = cx.window_id(); cx.spawn(|pane, mut cx| async move { let cwd = TERMINAL_DB .take_working_directory(item_id, workspace_id) @@ -368,8 +369,12 @@ impl Item for TerminalContainer { .flatten(); cx.update(|cx| { + let terminal = project.update(cx, |project, cx| { + project.create_terminal(cwd, window_id, cx) + }); + Ok(cx.add_view(pane, |cx| { - TerminalContainer::new(cwd, false, workspace_id, cx) + TerminalContainer::new(terminal, workspace_id, cx) })) }) }) diff --git a/crates/terminal_view/src/terminal_element.rs b/crates/terminal_view/src/terminal_element.rs index 506dd1423d..08ed3ecc2d 100644 --- a/crates/terminal_view/src/terminal_element.rs +++ b/crates/terminal_view/src/terminal_element.rs @@ -32,7 +32,7 @@ use util::ResultExt; use std::{fmt::Debug, ops::RangeInclusive}; use std::{mem, ops::Range}; -use crate::terminal_view::{DeployContextMenu, TerminalView}; +use crate::{DeployContextMenu, TerminalView}; ///The information generated during layout that is nescessary for painting pub struct LayoutState { diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index dbe861b781..c2747e3ef2 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -22,12 +22,12 @@ use terminal::{ index::Point, term::{search::RegexSearch, TermMode}, }, - Terminal, + Event, Terminal, }; use util::ResultExt; use workspace::{pane, ItemId, WorkspaceId}; -use crate::{persistence::TERMINAL_DB, terminal_element::TerminalElement, Event}; +use crate::{persistence::TERMINAL_DB, terminal_element::TerminalElement}; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 1b41613937..2396af6465 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -32,7 +32,7 @@ use settings::{ use smol::process::Command; use std::fs::OpenOptions; use std::{env, ffi::OsStr, panic, path::PathBuf, sync::Arc, thread, time::Duration}; -use terminal_view::{get_working_directory, TerminalContainer}; +use terminal_view::terminal_container_view::{get_working_directory, TerminalContainer}; use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; @@ -595,7 +595,11 @@ pub fn default_item_factory( let working_directory = get_working_directory(workspace, cx, strategy); let terminal_handle = cx.add_view(|cx| { - TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + TerminalContainer::new( + Err(anyhow!("Don't have a project to open a terminal")), + workspace.database_id(), + cx, + ) }); Box::new(terminal_handle) } From 925c9e13bbf24435e5fe51b6e8abc5cff581c218 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 8 Dec 2022 20:14:43 -0800 Subject: [PATCH 229/240] Remove terminal container view, switch to notify errors --- crates/collab/src/integration_tests.rs | 2 +- crates/collab_ui/src/collab_ui.rs | 2 +- .../src/terminal_container_view.rs | 771 ------------------ crates/terminal_view/src/terminal_view.rs | 620 +++++++++++++- crates/workspace/src/dock.rs | 23 +- crates/workspace/src/notifications.rs | 80 +- crates/workspace/src/workspace.rs | 12 +- crates/zed/src/main.rs | 31 +- 8 files changed, 700 insertions(+), 841 deletions(-) delete mode 100644 crates/terminal_view/src/terminal_container_view.rs diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3639afd47c..a77ae4925d 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -6022,7 +6022,7 @@ impl TestServer { fs: fs.clone(), build_window_options: Default::default, initialize_workspace: |_, _, _| unimplemented!(), - default_item_factory: |_, _| unimplemented!(), + dock_default_item_factory: |_, _| unimplemented!(), }); Project::init(&client); diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index abc62605f9..1b851c3f75 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -54,7 +54,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { Default::default(), 0, project, - app_state.default_item_factory, + app_state.dock_default_item_factory, cx, ); (app_state.initialize_workspace)(&mut workspace, &app_state, cx); diff --git a/crates/terminal_view/src/terminal_container_view.rs b/crates/terminal_view/src/terminal_container_view.rs deleted file mode 100644 index 4a0d47794a..0000000000 --- a/crates/terminal_view/src/terminal_container_view.rs +++ /dev/null @@ -1,771 +0,0 @@ -use crate::persistence::TERMINAL_DB; -use crate::TerminalView; -use terminal::alacritty_terminal::index::Point; -use terminal::{Event, Terminal, TerminalError}; - -use crate::regex_search_for_query; -use dirs::home_dir; -use gpui::{ - actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, - View, ViewContext, ViewHandle, WeakViewHandle, -}; -use util::{truncate_and_trailoff, ResultExt}; -use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; -use workspace::{ - item::{Item, ItemEvent}, - ToolbarItemLocation, Workspace, -}; -use workspace::{register_deserializable_item, Pane, WorkspaceId}; - -use project::{LocalWorktree, Project, ProjectPath}; -use settings::{Settings, WorkingDirectory}; -use smallvec::SmallVec; -use std::ops::RangeInclusive; -use std::path::{Path, PathBuf}; - -use crate::terminal_element::TerminalElement; - -actions!(terminal, [DeployModal]); - -pub fn init(cx: &mut MutableAppContext) { - cx.add_action(TerminalContainer::deploy); - - register_deserializable_item::(cx); - - // terminal_view::init(cx); -} - -//Make terminal view an enum, that can give you views for the error and non-error states -//Take away all the result unwrapping in the current TerminalView by making it 'infallible' -//Bubble up to deploy(_modal)() calls - -pub enum TerminalContainerContent { - Connected(ViewHandle), - Error(ViewHandle), -} - -impl TerminalContainerContent { - fn handle(&self) -> AnyViewHandle { - match self { - Self::Connected(handle) => handle.into(), - Self::Error(handle) => handle.into(), - } - } -} - -pub struct TerminalContainer { - pub content: TerminalContainerContent, - associated_directory: Option, -} - -pub struct ErrorView { - error: TerminalError, -} - -impl Entity for TerminalContainer { - type Event = Event; -} - -impl Entity for ErrorView { - type Event = Event; -} - -impl TerminalContainer { - ///Create a new Terminal in the current working directory or the user's home directory - pub fn deploy( - workspace: &mut Workspace, - _: &workspace::NewTerminal, - cx: &mut ViewContext, - ) { - let strategy = cx.global::().terminal_strategy(); - - let working_directory = get_working_directory(workspace, cx, strategy); - - let window_id = cx.window_id(); - let project = workspace.project().clone(); - let terminal = workspace.project().update(cx, |project, cx| { - project.create_terminal(working_directory, window_id, cx) - }); - - let view = cx.add_view(|cx| TerminalContainer::new(terminal, workspace.database_id(), cx)); - workspace.add_item(Box::new(view), cx); - } - - ///Create a new Terminal view. - pub fn new( - maybe_terminal: anyhow::Result>, - workspace_id: WorkspaceId, - cx: &mut ViewContext, - ) -> Self { - let content = match maybe_terminal { - Ok(terminal) => { - let item_id = cx.view_id(); - let view = cx.add_view(|cx| { - TerminalView::from_terminal(terminal, false, workspace_id, item_id, cx) - }); - cx.subscribe(&view, |_this, _content, event, cx| cx.emit(*event)) - .detach(); - TerminalContainerContent::Connected(view) - } - Err(error) => { - let view = cx.add_view(|_| ErrorView { - error: error.downcast::().unwrap(), - }); - TerminalContainerContent::Error(view) - } - }; - - TerminalContainer { - content, - associated_directory: None, //working_directory, - } - } - - fn connected(&self) -> Option> { - match &self.content { - TerminalContainerContent::Connected(vh) => Some(vh.clone()), - TerminalContainerContent::Error(_) => None, - } - } -} - -impl View for TerminalContainer { - fn ui_name() -> &'static str { - "Terminal" - } - - fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> ElementBox { - match &self.content { - TerminalContainerContent::Connected(connected) => ChildView::new(connected, cx), - TerminalContainerContent::Error(error) => ChildView::new(error, cx), - } - .boxed() - } - - fn focus_in(&mut self, _: AnyViewHandle, cx: &mut ViewContext) { - if cx.is_self_focused() { - cx.focus(self.content.handle()); - } - } -} - -impl View for ErrorView { - fn ui_name() -> &'static str { - "Terminal Error" - } - - fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> ElementBox { - let settings = cx.global::(); - let style = TerminalElement::make_text_style(cx.font_cache(), settings); - - //TODO: - //We want markdown style highlighting so we can format the program and working directory with `` - //We want a max-width of 75% with word-wrap - //We want to be able to select the text - //Want to be able to scroll if the error message is massive somehow (resiliency) - - let program_text = format!("Shell Program: `{}`", self.error.shell_to_string()); - - let directory_text = { - match self.error.directory.as_ref() { - Some(path) => format!("Working directory: `{}`", path.to_string_lossy()), - None => "No working directory specified".to_string(), - } - }; - - let error_text = self.error.source.to_string(); - - Flex::column() - .with_child( - Text::new("Failed to open the terminal.".to_string(), style.clone()) - .contained() - .boxed(), - ) - .with_child(Text::new(program_text, style.clone()).contained().boxed()) - .with_child(Text::new(directory_text, style.clone()).contained().boxed()) - .with_child(Text::new(error_text, style).contained().boxed()) - .aligned() - .boxed() - } -} - -impl Item for TerminalContainer { - fn tab_content( - &self, - _detail: Option, - tab_theme: &theme::Tab, - cx: &gpui::AppContext, - ) -> ElementBox { - let title = match &self.content { - TerminalContainerContent::Connected(connected) => connected - .read(cx) - .handle() - .read(cx) - .foreground_process_info - .as_ref() - .map(|fpi| { - format!( - "{} — {}", - truncate_and_trailoff( - &fpi.cwd - .file_name() - .map(|name| name.to_string_lossy().to_string()) - .unwrap_or_default(), - 25 - ), - truncate_and_trailoff( - &{ - format!( - "{}{}", - fpi.name, - if fpi.argv.len() >= 1 { - format!(" {}", (&fpi.argv[1..]).join(" ")) - } else { - "".to_string() - } - ) - }, - 25 - ) - ) - }) - .unwrap_or_else(|| "Terminal".to_string()), - TerminalContainerContent::Error(_) => "Terminal".to_string(), - }; - - Flex::row() - .with_child( - Label::new(title, tab_theme.label.clone()) - .aligned() - .contained() - .boxed(), - ) - .boxed() - } - - fn clone_on_split( - &self, - workspace_id: WorkspaceId, - cx: &mut ViewContext, - ) -> Option { - //From what I can tell, there's no way to tell the current working - //Directory of the terminal from outside the shell. There might be - //solutions to this, but they are non-trivial and require more IPC - Some(TerminalContainer::new( - Err(anyhow::anyhow!("failed to instantiate terminal")), - workspace_id, - cx, - )) - } - - fn project_path(&self, _cx: &gpui::AppContext) -> Option { - None - } - - fn project_entry_ids(&self, _cx: &gpui::AppContext) -> SmallVec<[project::ProjectEntryId; 3]> { - SmallVec::new() - } - - fn is_singleton(&self, _cx: &gpui::AppContext) -> bool { - false - } - - fn set_nav_history(&mut self, _: workspace::ItemNavHistory, _: &mut ViewContext) {} - - fn can_save(&self, _cx: &gpui::AppContext) -> bool { - false - } - - fn save( - &mut self, - _project: gpui::ModelHandle, - _cx: &mut ViewContext, - ) -> gpui::Task> { - unreachable!("save should not have been called"); - } - - fn save_as( - &mut self, - _project: gpui::ModelHandle, - _abs_path: std::path::PathBuf, - _cx: &mut ViewContext, - ) -> gpui::Task> { - unreachable!("save_as should not have been called"); - } - - fn reload( - &mut self, - _project: gpui::ModelHandle, - _cx: &mut ViewContext, - ) -> gpui::Task> { - gpui::Task::ready(Ok(())) - } - - fn is_dirty(&self, cx: &gpui::AppContext) -> bool { - if let TerminalContainerContent::Connected(connected) = &self.content { - connected.read(cx).has_bell() - } else { - false - } - } - - fn has_conflict(&self, _cx: &AppContext) -> bool { - false - } - - fn as_searchable(&self, handle: &ViewHandle) -> Option> { - Some(Box::new(handle.clone())) - } - - fn to_item_events(event: &Self::Event) -> Vec { - match event { - Event::BreadcrumbsChanged => vec![ItemEvent::UpdateBreadcrumbs], - Event::TitleChanged | Event::Wakeup => vec![ItemEvent::UpdateTab], - Event::CloseTerminal => vec![ItemEvent::CloseItem], - _ => vec![], - } - } - - fn breadcrumb_location(&self) -> ToolbarItemLocation { - if self.connected().is_some() { - ToolbarItemLocation::PrimaryLeft { flex: None } - } else { - ToolbarItemLocation::Hidden - } - } - - fn breadcrumbs(&self, theme: &theme::Theme, cx: &AppContext) -> Option> { - let connected = self.connected()?; - - Some(vec![Text::new( - connected - .read(cx) - .terminal() - .read(cx) - .breadcrumb_text - .to_string(), - theme.breadcrumbs.text.clone(), - ) - .boxed()]) - } - - fn serialized_item_kind() -> Option<&'static str> { - Some("Terminal") - } - - fn deserialize( - project: ModelHandle, - _workspace: WeakViewHandle, - workspace_id: workspace::WorkspaceId, - item_id: workspace::ItemId, - cx: &mut ViewContext, - ) -> Task>> { - let window_id = cx.window_id(); - cx.spawn(|pane, mut cx| async move { - let cwd = TERMINAL_DB - .take_working_directory(item_id, workspace_id) - .await - .log_err() - .flatten(); - - cx.update(|cx| { - let terminal = project.update(cx, |project, cx| { - project.create_terminal(cwd, window_id, cx) - }); - - Ok(cx.add_view(pane, |cx| { - TerminalContainer::new(terminal, workspace_id, cx) - })) - }) - }) - } - - fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { - if let Some(connected) = self.connected() { - connected.update(cx, |connected_view, cx| { - connected_view.added_to_workspace(workspace.database_id(), cx); - }) - } - } -} - -impl SearchableItem for TerminalContainer { - type Match = RangeInclusive; - - fn supported_options() -> SearchOptions { - SearchOptions { - case: false, - word: false, - regex: false, - } - } - - /// Convert events raised by this item into search-relevant events (if applicable) - fn to_search_event(event: &Self::Event) -> Option { - match event { - Event::Wakeup => Some(SearchEvent::MatchesInvalidated), - Event::SelectionsChanged => Some(SearchEvent::ActiveMatchChanged), - _ => None, - } - } - - /// Clear stored matches - fn clear_matches(&mut self, cx: &mut ViewContext) { - if let TerminalContainerContent::Connected(connected) = &self.content { - let terminal = connected.read(cx).terminal().clone(); - terminal.update(cx, |term, _| term.matches.clear()) - } - } - - /// Store matches returned from find_matches somewhere for rendering - fn update_matches(&mut self, matches: Vec, cx: &mut ViewContext) { - if let TerminalContainerContent::Connected(connected) = &self.content { - let terminal = connected.read(cx).terminal().clone(); - terminal.update(cx, |term, _| term.matches = matches) - } - } - - /// Return the selection content to pre-load into this search - fn query_suggestion(&mut self, cx: &mut ViewContext) -> String { - if let TerminalContainerContent::Connected(connected) = &self.content { - let terminal = connected.read(cx).terminal().clone(); - terminal - .read(cx) - .last_content - .selection_text - .clone() - .unwrap_or_default() - } else { - Default::default() - } - } - - /// Focus match at given index into the Vec of matches - fn activate_match(&mut self, index: usize, _: Vec, cx: &mut ViewContext) { - if let TerminalContainerContent::Connected(connected) = &self.content { - let terminal = connected.read(cx).terminal().clone(); - terminal.update(cx, |term, _| term.activate_match(index)); - cx.notify(); - } - } - - /// Get all of the matches for this query, should be done on the background - fn find_matches( - &mut self, - query: project::search::SearchQuery, - cx: &mut ViewContext, - ) -> Task> { - if let TerminalContainerContent::Connected(connected) = &self.content { - let terminal = connected.read(cx).terminal().clone(); - if let Some(searcher) = regex_search_for_query(query) { - terminal.update(cx, |term, cx| term.find_matches(searcher, cx)) - } else { - cx.background().spawn(async { Vec::new() }) - } - } else { - Task::ready(Vec::new()) - } - } - - /// Reports back to the search toolbar what the active match should be (the selection) - fn active_match_index( - &mut self, - matches: Vec, - cx: &mut ViewContext, - ) -> Option { - let connected = self.connected(); - // Selection head might have a value if there's a selection that isn't - // associated with a match. Therefore, if there are no matches, we should - // report None, no matter the state of the terminal - let res = if matches.len() > 0 && connected.is_some() { - if let Some(selection_head) = connected - .unwrap() - .read(cx) - .terminal() - .read(cx) - .selection_head - { - // If selection head is contained in a match. Return that match - if let Some(ix) = matches - .iter() - .enumerate() - .find(|(_, search_match)| { - search_match.contains(&selection_head) - || search_match.start() > &selection_head - }) - .map(|(ix, _)| ix) - { - Some(ix) - } else { - // If no selection after selection head, return the last match - Some(matches.len().saturating_sub(1)) - } - } else { - // Matches found but no active selection, return the first last one (closest to cursor) - Some(matches.len().saturating_sub(1)) - } - } else { - None - }; - - res - } -} - -///Get's the working directory for the given workspace, respecting the user's settings. -pub fn get_working_directory( - workspace: &Workspace, - cx: &AppContext, - strategy: WorkingDirectory, -) -> Option { - let res = match strategy { - WorkingDirectory::CurrentProjectDirectory => current_project_directory(workspace, cx) - .or_else(|| first_project_directory(workspace, cx)), - WorkingDirectory::FirstProjectDirectory => first_project_directory(workspace, cx), - WorkingDirectory::AlwaysHome => None, - WorkingDirectory::Always { directory } => { - shellexpand::full(&directory) //TODO handle this better - .ok() - .map(|dir| Path::new(&dir.to_string()).to_path_buf()) - .filter(|dir| dir.is_dir()) - } - }; - res.or_else(home_dir) -} - -///Get's the first project's home directory, or the home directory -fn first_project_directory(workspace: &Workspace, cx: &AppContext) -> Option { - workspace - .worktrees(cx) - .next() - .and_then(|worktree_handle| worktree_handle.read(cx).as_local()) - .and_then(get_path_from_wt) -} - -///Gets the intuitively correct working directory from the given workspace -///If there is an active entry for this project, returns that entry's worktree root. -///If there's no active entry but there is a worktree, returns that worktrees root. -///If either of these roots are files, or if there are any other query failures, -/// returns the user's home directory -fn current_project_directory(workspace: &Workspace, cx: &AppContext) -> Option { - let project = workspace.project().read(cx); - - project - .active_entry() - .and_then(|entry_id| project.worktree_for_entry(entry_id, cx)) - .or_else(|| workspace.worktrees(cx).next()) - .and_then(|worktree_handle| worktree_handle.read(cx).as_local()) - .and_then(get_path_from_wt) -} - -fn get_path_from_wt(wt: &LocalWorktree) -> Option { - wt.root_entry() - .filter(|re| re.is_dir()) - .map(|_| wt.abs_path().to_path_buf()) -} - -#[cfg(test)] -mod tests { - - use super::*; - use gpui::TestAppContext; - use project::{Entry, Worktree}; - use workspace::AppState; - - use std::path::Path; - - ///Working directory calculation tests - - ///No Worktrees in project -> home_dir() - #[gpui::test] - async fn no_worktree(cx: &mut TestAppContext) { - //Setup variables - let (project, workspace) = blank_workspace(cx).await; - //Test - cx.read(|cx| { - let workspace = workspace.read(cx); - let active_entry = project.read(cx).active_entry(); - - //Make sure enviroment is as expeted - assert!(active_entry.is_none()); - assert!(workspace.worktrees(cx).next().is_none()); - - let res = current_project_directory(workspace, cx); - assert_eq!(res, None); - let res = first_project_directory(workspace, cx); - assert_eq!(res, None); - }); - } - - ///No active entry, but a worktree, worktree is a file -> home_dir() - #[gpui::test] - async fn no_active_entry_worktree_is_file(cx: &mut TestAppContext) { - //Setup variables - - let (project, workspace) = blank_workspace(cx).await; - create_file_wt(project.clone(), "/root.txt", cx).await; - - cx.read(|cx| { - let workspace = workspace.read(cx); - let active_entry = project.read(cx).active_entry(); - - //Make sure enviroment is as expeted - assert!(active_entry.is_none()); - assert!(workspace.worktrees(cx).next().is_some()); - - let res = current_project_directory(workspace, cx); - assert_eq!(res, None); - let res = first_project_directory(workspace, cx); - assert_eq!(res, None); - }); - } - - //No active entry, but a worktree, worktree is a folder -> worktree_folder - #[gpui::test] - async fn no_active_entry_worktree_is_dir(cx: &mut TestAppContext) { - //Setup variables - let (project, workspace) = blank_workspace(cx).await; - let (_wt, _entry) = create_folder_wt(project.clone(), "/root/", cx).await; - - //Test - cx.update(|cx| { - let workspace = workspace.read(cx); - let active_entry = project.read(cx).active_entry(); - - assert!(active_entry.is_none()); - assert!(workspace.worktrees(cx).next().is_some()); - - let res = current_project_directory(workspace, cx); - assert_eq!(res, Some((Path::new("/root/")).to_path_buf())); - let res = first_project_directory(workspace, cx); - assert_eq!(res, Some((Path::new("/root/")).to_path_buf())); - }); - } - - //Active entry with a work tree, worktree is a file -> home_dir() - #[gpui::test] - async fn active_entry_worktree_is_file(cx: &mut TestAppContext) { - //Setup variables - - let (project, workspace) = blank_workspace(cx).await; - let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; - let (wt2, entry2) = create_file_wt(project.clone(), "/root2.txt", cx).await; - insert_active_entry_for(wt2, entry2, project.clone(), cx); - - //Test - cx.update(|cx| { - let workspace = workspace.read(cx); - let active_entry = project.read(cx).active_entry(); - - assert!(active_entry.is_some()); - - let res = current_project_directory(workspace, cx); - assert_eq!(res, None); - let res = first_project_directory(workspace, cx); - assert_eq!(res, Some((Path::new("/root1/")).to_path_buf())); - }); - } - - //Active entry, with a worktree, worktree is a folder -> worktree_folder - #[gpui::test] - async fn active_entry_worktree_is_dir(cx: &mut TestAppContext) { - //Setup variables - let (project, workspace) = blank_workspace(cx).await; - let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; - let (wt2, entry2) = create_folder_wt(project.clone(), "/root2/", cx).await; - insert_active_entry_for(wt2, entry2, project.clone(), cx); - - //Test - cx.update(|cx| { - let workspace = workspace.read(cx); - let active_entry = project.read(cx).active_entry(); - - assert!(active_entry.is_some()); - - let res = current_project_directory(workspace, cx); - assert_eq!(res, Some((Path::new("/root2/")).to_path_buf())); - let res = first_project_directory(workspace, cx); - assert_eq!(res, Some((Path::new("/root1/")).to_path_buf())); - }); - } - - ///Creates a worktree with 1 file: /root.txt - pub async fn blank_workspace( - cx: &mut TestAppContext, - ) -> (ModelHandle, ViewHandle) { - let params = cx.update(AppState::test); - - let project = Project::test(params.fs.clone(), [], cx).await; - let (_, workspace) = cx.add_window(|cx| { - Workspace::new( - Default::default(), - 0, - project.clone(), - |_, _| unimplemented!(), - cx, - ) - }); - - (project, workspace) - } - - ///Creates a worktree with 1 folder: /root{suffix}/ - async fn create_folder_wt( - project: ModelHandle, - path: impl AsRef, - cx: &mut TestAppContext, - ) -> (ModelHandle, Entry) { - create_wt(project, true, path, cx).await - } - - ///Creates a worktree with 1 file: /root{suffix}.txt - async fn create_file_wt( - project: ModelHandle, - path: impl AsRef, - cx: &mut TestAppContext, - ) -> (ModelHandle, Entry) { - create_wt(project, false, path, cx).await - } - - async fn create_wt( - project: ModelHandle, - is_dir: bool, - path: impl AsRef, - cx: &mut TestAppContext, - ) -> (ModelHandle, Entry) { - let (wt, _) = project - .update(cx, |project, cx| { - project.find_or_create_local_worktree(path, true, cx) - }) - .await - .unwrap(); - - let entry = cx - .update(|cx| { - wt.update(cx, |wt, cx| { - wt.as_local() - .unwrap() - .create_entry(Path::new(""), is_dir, cx) - }) - }) - .await - .unwrap(); - - (wt, entry) - } - - pub fn insert_active_entry_for( - wt: ModelHandle, - entry: Entry, - project: ModelHandle, - cx: &mut TestAppContext, - ) { - cx.update(|cx| { - let p = ProjectPath { - worktree_id: wt.read(cx).id(), - path: entry.path, - }; - project.update(cx, |project, cx| project.set_active_path(Some(p), cx)); - }); - } -} diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index c2747e3ef2..7602a3db22 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1,21 +1,27 @@ mod persistence; -pub mod terminal_container_view; pub mod terminal_element; -use std::{ops::RangeInclusive, time::Duration}; +use std::{ + ops::RangeInclusive, + path::{Path, PathBuf}, + time::Duration, +}; use context_menu::{ContextMenu, ContextMenuItem}; +use dirs::home_dir; use gpui::{ actions, - elements::{AnchorCorner, ChildView, ParentElement, Stack}, + elements::{AnchorCorner, ChildView, Flex, Label, ParentElement, Stack, Text}, geometry::vector::Vector2F, impl_actions, impl_internal_actions, keymap::Keystroke, AnyViewHandle, AppContext, Element, ElementBox, Entity, ModelHandle, MutableAppContext, Task, - View, ViewContext, ViewHandle, + View, ViewContext, ViewHandle, WeakViewHandle, }; +use project::{LocalWorktree, Project, ProjectPath}; use serde::Deserialize; -use settings::{Settings, TerminalBlink}; +use settings::{Settings, TerminalBlink, WorkingDirectory}; +use smallvec::SmallVec; use smol::Timer; use terminal::{ alacritty_terminal::{ @@ -24,8 +30,14 @@ use terminal::{ }, Event, Terminal, }; -use util::ResultExt; -use workspace::{pane, ItemId, WorkspaceId}; +use util::{truncate_and_trailoff, ResultExt}; +use workspace::{ + item::{Item, ItemEvent}, + notifications::NotifyResultExt, + pane, register_deserializable_item, + searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}, + Pane, ToolbarItemLocation, Workspace, WorkspaceId, +}; use crate::{persistence::TERMINAL_DB, terminal_element::TerminalElement}; @@ -56,7 +68,10 @@ impl_actions!(terminal, [SendText, SendKeystroke]); impl_internal_actions!(project_panel, [DeployContextMenu]); pub fn init(cx: &mut MutableAppContext) { - terminal_container_view::init(cx); + cx.add_action(TerminalView::deploy); + + register_deserializable_item::(cx); + //Useful terminal views cx.add_action(TerminalView::send_text); cx.add_action(TerminalView::send_keystroke); @@ -73,15 +88,12 @@ pub struct TerminalView { has_new_content: bool, //Currently using iTerm bell, show bell emoji in tab until input is received has_bell: bool, - // Only for styling purposes. Doesn't effect behavior - modal: bool, context_menu: ViewHandle, blink_state: bool, blinking_on: bool, blinking_paused: bool, blink_epoch: usize, workspace_id: WorkspaceId, - item_id: ItemId, } impl Entity for TerminalView { @@ -89,11 +101,33 @@ impl Entity for TerminalView { } impl TerminalView { - pub fn from_terminal( + ///Create a new Terminal in the current working directory or the user's home directory + pub fn deploy( + workspace: &mut Workspace, + _: &workspace::NewTerminal, + cx: &mut ViewContext, + ) { + let strategy = cx.global::().terminal_strategy(); + + let working_directory = get_working_directory(workspace, cx, strategy); + + let window_id = cx.window_id(); + let terminal = workspace + .project() + .update(cx, |project, cx| { + project.create_terminal(working_directory, window_id, cx) + }) + .notify_err(workspace, cx); + + if let Some(terminal) = terminal { + let view = cx.add_view(|cx| TerminalView::new(terminal, workspace.database_id(), cx)); + workspace.add_item(Box::new(view), cx) + } + } + + pub fn new( terminal: ModelHandle, - modal: bool, workspace_id: WorkspaceId, - item_id: ItemId, cx: &mut ViewContext, ) -> Self { cx.observe(&terminal, |_, _, cx| cx.notify()).detach(); @@ -114,7 +148,7 @@ impl TerminalView { if let Some(foreground_info) = &this.terminal().read(cx).foreground_process_info { let cwd = foreground_info.cwd.clone(); - let item_id = this.item_id; + let item_id = cx.view_id(); let workspace_id = this.workspace_id; cx.background() .spawn(async move { @@ -134,14 +168,12 @@ impl TerminalView { terminal, has_new_content: true, has_bell: false, - modal, context_menu: cx.add_view(ContextMenu::new), blink_state: true, blinking_on: false, blinking_paused: false, blink_epoch: 0, workspace_id, - item_id, } } @@ -293,13 +325,6 @@ impl TerminalView { &self.terminal } - pub fn added_to_workspace(&mut self, new_id: WorkspaceId, cx: &mut ViewContext) { - cx.background() - .spawn(TERMINAL_DB.update_workspace_id(new_id, self.workspace_id, self.item_id)) - .detach(); - self.workspace_id = new_id; - } - fn next_blink_epoch(&mut self) -> usize { self.blink_epoch += 1; self.blink_epoch @@ -442,9 +467,7 @@ impl View for TerminalView { fn keymap_context(&self, cx: &gpui::AppContext) -> gpui::keymap::Context { let mut context = Self::default_keymap_context(); - if self.modal { - context.set.insert("ModalTerminal".into()); - } + let mode = self.terminal.read(cx).last_content.mode; context.map.insert( "screen".to_string(), @@ -523,3 +546,546 @@ impl View for TerminalView { context } } + +impl Item for TerminalView { + fn tab_content( + &self, + _detail: Option, + tab_theme: &theme::Tab, + cx: &gpui::AppContext, + ) -> ElementBox { + let title = self + .terminal() + .read(cx) + .foreground_process_info + .as_ref() + .map(|fpi| { + format!( + "{} — {}", + truncate_and_trailoff( + &fpi.cwd + .file_name() + .map(|name| name.to_string_lossy().to_string()) + .unwrap_or_default(), + 25 + ), + truncate_and_trailoff( + &{ + format!( + "{}{}", + fpi.name, + if fpi.argv.len() >= 1 { + format!(" {}", (&fpi.argv[1..]).join(" ")) + } else { + "".to_string() + } + ) + }, + 25 + ) + ) + }) + .unwrap_or_else(|| "Terminal".to_string()); + + Flex::row() + .with_child( + Label::new(title, tab_theme.label.clone()) + .aligned() + .contained() + .boxed(), + ) + .boxed() + } + + fn clone_on_split( + &self, + _workspace_id: WorkspaceId, + _cx: &mut ViewContext, + ) -> Option { + //From what I can tell, there's no way to tell the current working + //Directory of the terminal from outside the shell. There might be + //solutions to this, but they are non-trivial and require more IPC + + // Some(TerminalContainer::new( + // Err(anyhow::anyhow!("failed to instantiate terminal")), + // workspace_id, + // cx, + // )) + + // TODO + None + } + + fn project_path(&self, _cx: &gpui::AppContext) -> Option { + None + } + + fn project_entry_ids(&self, _cx: &gpui::AppContext) -> SmallVec<[project::ProjectEntryId; 3]> { + SmallVec::new() + } + + fn is_singleton(&self, _cx: &gpui::AppContext) -> bool { + false + } + + fn set_nav_history(&mut self, _: workspace::ItemNavHistory, _: &mut ViewContext) {} + + fn can_save(&self, _cx: &gpui::AppContext) -> bool { + false + } + + fn save( + &mut self, + _project: gpui::ModelHandle, + _cx: &mut ViewContext, + ) -> gpui::Task> { + unreachable!("save should not have been called"); + } + + fn save_as( + &mut self, + _project: gpui::ModelHandle, + _abs_path: std::path::PathBuf, + _cx: &mut ViewContext, + ) -> gpui::Task> { + unreachable!("save_as should not have been called"); + } + + fn reload( + &mut self, + _project: gpui::ModelHandle, + _cx: &mut ViewContext, + ) -> gpui::Task> { + gpui::Task::ready(Ok(())) + } + + fn is_dirty(&self, _cx: &gpui::AppContext) -> bool { + self.has_bell() + } + + fn has_conflict(&self, _cx: &AppContext) -> bool { + false + } + + fn as_searchable(&self, handle: &ViewHandle) -> Option> { + Some(Box::new(handle.clone())) + } + + fn to_item_events(event: &Self::Event) -> Vec { + match event { + Event::BreadcrumbsChanged => vec![ItemEvent::UpdateBreadcrumbs], + Event::TitleChanged | Event::Wakeup => vec![ItemEvent::UpdateTab], + Event::CloseTerminal => vec![ItemEvent::CloseItem], + _ => vec![], + } + } + + fn breadcrumb_location(&self) -> ToolbarItemLocation { + ToolbarItemLocation::PrimaryLeft { flex: None } + } + + fn breadcrumbs(&self, theme: &theme::Theme, cx: &AppContext) -> Option> { + Some(vec![Text::new( + self.terminal().read(cx).breadcrumb_text.to_string(), + theme.breadcrumbs.text.clone(), + ) + .boxed()]) + } + + fn serialized_item_kind() -> Option<&'static str> { + Some("Terminal") + } + + fn deserialize( + project: ModelHandle, + _workspace: WeakViewHandle, + workspace_id: workspace::WorkspaceId, + item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + let window_id = cx.window_id(); + cx.spawn(|pane, mut cx| async move { + let cwd = TERMINAL_DB + .take_working_directory(item_id, workspace_id) + .await + .log_err() + .flatten(); + + cx.update(|cx| { + let terminal = project.update(cx, |project, cx| { + project.create_terminal(cwd, window_id, cx) + })?; + + Ok(cx.add_view(pane, |cx| TerminalView::new(terminal, workspace_id, cx))) + }) + }) + } + + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + cx.background() + .spawn(TERMINAL_DB.update_workspace_id( + workspace.database_id(), + self.workspace_id, + cx.view_id(), + )) + .detach(); + self.workspace_id = workspace.database_id(); + } +} + +impl SearchableItem for TerminalView { + type Match = RangeInclusive; + + fn supported_options() -> SearchOptions { + SearchOptions { + case: false, + word: false, + regex: false, + } + } + + /// Convert events raised by this item into search-relevant events (if applicable) + fn to_search_event(event: &Self::Event) -> Option { + match event { + Event::Wakeup => Some(SearchEvent::MatchesInvalidated), + Event::SelectionsChanged => Some(SearchEvent::ActiveMatchChanged), + _ => None, + } + } + + /// Clear stored matches + fn clear_matches(&mut self, cx: &mut ViewContext) { + self.terminal().update(cx, |term, _| term.matches.clear()) + } + + /// Store matches returned from find_matches somewhere for rendering + fn update_matches(&mut self, matches: Vec, cx: &mut ViewContext) { + self.terminal().update(cx, |term, _| term.matches = matches) + } + + /// Return the selection content to pre-load into this search + fn query_suggestion(&mut self, cx: &mut ViewContext) -> String { + self.terminal() + .read(cx) + .last_content + .selection_text + .clone() + .unwrap_or_default() + } + + /// Focus match at given index into the Vec of matches + fn activate_match(&mut self, index: usize, _: Vec, cx: &mut ViewContext) { + self.terminal() + .update(cx, |term, _| term.activate_match(index)); + cx.notify(); + } + + /// Get all of the matches for this query, should be done on the background + fn find_matches( + &mut self, + query: project::search::SearchQuery, + cx: &mut ViewContext, + ) -> Task> { + if let Some(searcher) = regex_search_for_query(query) { + self.terminal() + .update(cx, |term, cx| term.find_matches(searcher, cx)) + } else { + Task::ready(vec![]) + } + } + + /// Reports back to the search toolbar what the active match should be (the selection) + fn active_match_index( + &mut self, + matches: Vec, + cx: &mut ViewContext, + ) -> Option { + // Selection head might have a value if there's a selection that isn't + // associated with a match. Therefore, if there are no matches, we should + // report None, no matter the state of the terminal + let res = if matches.len() > 0 { + if let Some(selection_head) = self.terminal().read(cx).selection_head { + // If selection head is contained in a match. Return that match + if let Some(ix) = matches + .iter() + .enumerate() + .find(|(_, search_match)| { + search_match.contains(&selection_head) + || search_match.start() > &selection_head + }) + .map(|(ix, _)| ix) + { + Some(ix) + } else { + // If no selection after selection head, return the last match + Some(matches.len().saturating_sub(1)) + } + } else { + // Matches found but no active selection, return the first last one (closest to cursor) + Some(matches.len().saturating_sub(1)) + } + } else { + None + }; + + res + } +} + +///Get's the working directory for the given workspace, respecting the user's settings. +pub fn get_working_directory( + workspace: &Workspace, + cx: &AppContext, + strategy: WorkingDirectory, +) -> Option { + let res = match strategy { + WorkingDirectory::CurrentProjectDirectory => current_project_directory(workspace, cx) + .or_else(|| first_project_directory(workspace, cx)), + WorkingDirectory::FirstProjectDirectory => first_project_directory(workspace, cx), + WorkingDirectory::AlwaysHome => None, + WorkingDirectory::Always { directory } => { + shellexpand::full(&directory) //TODO handle this better + .ok() + .map(|dir| Path::new(&dir.to_string()).to_path_buf()) + .filter(|dir| dir.is_dir()) + } + }; + res.or_else(home_dir) +} + +///Get's the first project's home directory, or the home directory +fn first_project_directory(workspace: &Workspace, cx: &AppContext) -> Option { + workspace + .worktrees(cx) + .next() + .and_then(|worktree_handle| worktree_handle.read(cx).as_local()) + .and_then(get_path_from_wt) +} + +///Gets the intuitively correct working directory from the given workspace +///If there is an active entry for this project, returns that entry's worktree root. +///If there's no active entry but there is a worktree, returns that worktrees root. +///If either of these roots are files, or if there are any other query failures, +/// returns the user's home directory +fn current_project_directory(workspace: &Workspace, cx: &AppContext) -> Option { + let project = workspace.project().read(cx); + + project + .active_entry() + .and_then(|entry_id| project.worktree_for_entry(entry_id, cx)) + .or_else(|| workspace.worktrees(cx).next()) + .and_then(|worktree_handle| worktree_handle.read(cx).as_local()) + .and_then(get_path_from_wt) +} + +fn get_path_from_wt(wt: &LocalWorktree) -> Option { + wt.root_entry() + .filter(|re| re.is_dir()) + .map(|_| wt.abs_path().to_path_buf()) +} + +#[cfg(test)] +mod tests { + + use super::*; + use gpui::TestAppContext; + use project::{Entry, Project, ProjectPath, Worktree}; + use workspace::AppState; + + use std::path::Path; + + ///Working directory calculation tests + + ///No Worktrees in project -> home_dir() + #[gpui::test] + async fn no_worktree(cx: &mut TestAppContext) { + //Setup variables + let (project, workspace) = blank_workspace(cx).await; + //Test + cx.read(|cx| { + let workspace = workspace.read(cx); + let active_entry = project.read(cx).active_entry(); + + //Make sure enviroment is as expeted + assert!(active_entry.is_none()); + assert!(workspace.worktrees(cx).next().is_none()); + + let res = current_project_directory(workspace, cx); + assert_eq!(res, None); + let res = first_project_directory(workspace, cx); + assert_eq!(res, None); + }); + } + + ///No active entry, but a worktree, worktree is a file -> home_dir() + #[gpui::test] + async fn no_active_entry_worktree_is_file(cx: &mut TestAppContext) { + //Setup variables + + let (project, workspace) = blank_workspace(cx).await; + create_file_wt(project.clone(), "/root.txt", cx).await; + + cx.read(|cx| { + let workspace = workspace.read(cx); + let active_entry = project.read(cx).active_entry(); + + //Make sure enviroment is as expeted + assert!(active_entry.is_none()); + assert!(workspace.worktrees(cx).next().is_some()); + + let res = current_project_directory(workspace, cx); + assert_eq!(res, None); + let res = first_project_directory(workspace, cx); + assert_eq!(res, None); + }); + } + + //No active entry, but a worktree, worktree is a folder -> worktree_folder + #[gpui::test] + async fn no_active_entry_worktree_is_dir(cx: &mut TestAppContext) { + //Setup variables + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root/", cx).await; + + //Test + cx.update(|cx| { + let workspace = workspace.read(cx); + let active_entry = project.read(cx).active_entry(); + + assert!(active_entry.is_none()); + assert!(workspace.worktrees(cx).next().is_some()); + + let res = current_project_directory(workspace, cx); + assert_eq!(res, Some((Path::new("/root/")).to_path_buf())); + let res = first_project_directory(workspace, cx); + assert_eq!(res, Some((Path::new("/root/")).to_path_buf())); + }); + } + + //Active entry with a work tree, worktree is a file -> home_dir() + #[gpui::test] + async fn active_entry_worktree_is_file(cx: &mut TestAppContext) { + //Setup variables + + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; + let (wt2, entry2) = create_file_wt(project.clone(), "/root2.txt", cx).await; + insert_active_entry_for(wt2, entry2, project.clone(), cx); + + //Test + cx.update(|cx| { + let workspace = workspace.read(cx); + let active_entry = project.read(cx).active_entry(); + + assert!(active_entry.is_some()); + + let res = current_project_directory(workspace, cx); + assert_eq!(res, None); + let res = first_project_directory(workspace, cx); + assert_eq!(res, Some((Path::new("/root1/")).to_path_buf())); + }); + } + + //Active entry, with a worktree, worktree is a folder -> worktree_folder + #[gpui::test] + async fn active_entry_worktree_is_dir(cx: &mut TestAppContext) { + //Setup variables + let (project, workspace) = blank_workspace(cx).await; + let (_wt, _entry) = create_folder_wt(project.clone(), "/root1/", cx).await; + let (wt2, entry2) = create_folder_wt(project.clone(), "/root2/", cx).await; + insert_active_entry_for(wt2, entry2, project.clone(), cx); + + //Test + cx.update(|cx| { + let workspace = workspace.read(cx); + let active_entry = project.read(cx).active_entry(); + + assert!(active_entry.is_some()); + + let res = current_project_directory(workspace, cx); + assert_eq!(res, Some((Path::new("/root2/")).to_path_buf())); + let res = first_project_directory(workspace, cx); + assert_eq!(res, Some((Path::new("/root1/")).to_path_buf())); + }); + } + + ///Creates a worktree with 1 file: /root.txt + pub async fn blank_workspace( + cx: &mut TestAppContext, + ) -> (ModelHandle, ViewHandle) { + let params = cx.update(AppState::test); + + let project = Project::test(params.fs.clone(), [], cx).await; + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + 0, + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); + + (project, workspace) + } + + ///Creates a worktree with 1 folder: /root{suffix}/ + async fn create_folder_wt( + project: ModelHandle, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + create_wt(project, true, path, cx).await + } + + ///Creates a worktree with 1 file: /root{suffix}.txt + async fn create_file_wt( + project: ModelHandle, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + create_wt(project, false, path, cx).await + } + + async fn create_wt( + project: ModelHandle, + is_dir: bool, + path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, Entry) { + let (wt, _) = project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(path, true, cx) + }) + .await + .unwrap(); + + let entry = cx + .update(|cx| { + wt.update(cx, |wt, cx| { + wt.as_local() + .unwrap() + .create_entry(Path::new(""), is_dir, cx) + }) + }) + .await + .unwrap(); + + (wt, entry) + } + + pub fn insert_active_entry_for( + wt: ModelHandle, + entry: Entry, + project: ModelHandle, + cx: &mut TestAppContext, + ) { + cx.update(|cx| { + let p = ProjectPath { + worktree_id: wt.read(cx).id(), + path: entry.path, + }; + project.update(cx, |project, cx| project.set_active_path(Some(p), cx)); + }); + } +} diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 0879166bbe..78ee56f188 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -126,18 +126,21 @@ impl DockPosition { } } -pub type DefaultItemFactory = - fn(&mut Workspace, &mut ViewContext) -> Box; +pub type DockDefaultItemFactory = + fn(workspace: &mut Workspace, cx: &mut ViewContext) -> Option>; pub struct Dock { position: DockPosition, panel_sizes: HashMap, pane: ViewHandle, - default_item_factory: DefaultItemFactory, + default_item_factory: DockDefaultItemFactory, } impl Dock { - pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { + pub fn new( + default_item_factory: DockDefaultItemFactory, + cx: &mut ViewContext, + ) -> Self { let position = DockPosition::Hidden(cx.global::().default_dock_anchor); let pane = cx.add_view(|cx| Pane::new(Some(position.anchor()), cx)); @@ -192,9 +195,11 @@ impl Dock { // Ensure that the pane has at least one item or construct a default item to put in it let pane = workspace.dock.pane.clone(); if pane.read(cx).items().next().is_none() { - let item_to_add = (workspace.dock.default_item_factory)(workspace, cx); - // Adding the item focuses the pane by default - Pane::add_item(workspace, &pane, item_to_add, true, true, None, cx); + if let Some(item_to_add) = (workspace.dock.default_item_factory)(workspace, cx) { + Pane::add_item(workspace, &pane, item_to_add, true, true, None, cx); + } else { + workspace.dock.position = workspace.dock.position.hide(); + } } else { cx.focus(pane); } @@ -465,8 +470,8 @@ mod tests { pub fn default_item_factory( _workspace: &mut Workspace, cx: &mut ViewContext, - ) -> Box { - Box::new(cx.add_view(|_| TestItem::new())) + ) -> Option> { + Some(Box::new(cx.add_view(|_| TestItem::new()))) } #[gpui::test] diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 91656727d0..0e76d45518 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -161,8 +161,8 @@ pub mod simple_message_notification { pub struct MessageNotification { message: String, - click_action: Box, - click_message: String, + click_action: Option>, + click_message: Option, } pub enum MessageNotificationEvent { @@ -174,6 +174,14 @@ pub mod simple_message_notification { } impl MessageNotification { + pub fn new_messsage>(message: S) -> MessageNotification { + Self { + message: message.as_ref().to_string(), + click_action: None, + click_message: None, + } + } + pub fn new, A: Action, S2: AsRef>( message: S1, click_action: A, @@ -181,8 +189,8 @@ pub mod simple_message_notification { ) -> Self { Self { message: message.as_ref().to_string(), - click_action: Box::new(click_action) as Box, - click_message: click_message.as_ref().to_string(), + click_action: Some(Box::new(click_action) as Box), + click_message: Some(click_message.as_ref().to_string()), } } @@ -202,8 +210,11 @@ pub mod simple_message_notification { enum MessageNotificationTag {} - let click_action = self.click_action.boxed_clone(); - let click_message = self.click_message.clone(); + let click_action = self + .click_action + .as_ref() + .map(|action| action.boxed_clone()); + let click_message = self.click_message.as_ref().map(|message| message.clone()); let message = self.message.clone(); MouseEventHandler::::new(0, cx, |state, cx| { @@ -251,20 +262,28 @@ pub mod simple_message_notification { ) .boxed(), ) - .with_child({ + .with_children({ let style = theme.action_message.style_for(state, false); - - Text::new(click_message, style.text.clone()) - .contained() - .with_style(style.container) - .boxed() + if let Some(click_message) = click_message { + Some( + Text::new(click_message, style.text.clone()) + .contained() + .with_style(style.container) + .boxed(), + ) + } else { + None + } + .into_iter() }) .contained() .boxed() }) .with_cursor_style(CursorStyle::PointingHand) .on_click(MouseButton::Left, move |_, cx| { - cx.dispatch_any_action(click_action.boxed_clone()) + if let Some(click_action) = click_action.as_ref() { + cx.dispatch_any_action(click_action.boxed_clone()) + } }) .boxed() } @@ -278,3 +297,38 @@ pub mod simple_message_notification { } } } + +pub trait NotifyResultExt { + type Ok; + + fn notify_err( + self, + workspace: &mut Workspace, + cx: &mut ViewContext, + ) -> Option; +} + +impl NotifyResultExt for Result +where + E: std::fmt::Debug, +{ + type Ok = T; + + fn notify_err(self, workspace: &mut Workspace, cx: &mut ViewContext) -> Option { + match self { + Ok(value) => Some(value), + Err(err) => { + workspace.show_notification(0, cx, |cx| { + cx.add_view(|_cx| { + simple_message_notification::MessageNotification::new_messsage(format!( + "Error: {:?}", + err, + )) + }) + }); + + None + } + } + } +} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index a0c353b3f8..d38cf96ed2 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -27,7 +27,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use dock::{DefaultItemFactory, Dock, ToggleDockButton}; +use dock::{Dock, DockDefaultItemFactory, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; use futures::{channel::oneshot, FutureExt, StreamExt}; @@ -375,7 +375,7 @@ pub struct AppState { pub fs: Arc, pub build_window_options: fn() -> WindowOptions<'static>, pub initialize_workspace: fn(&mut Workspace, &Arc, &mut ViewContext), - pub default_item_factory: DefaultItemFactory, + pub dock_default_item_factory: DockDefaultItemFactory, } impl AppState { @@ -401,7 +401,7 @@ impl AppState { user_store, initialize_workspace: |_, _, _| {}, build_window_options: Default::default, - default_item_factory: |_, _| unimplemented!(), + dock_default_item_factory: |_, _| unimplemented!(), }) } } @@ -515,7 +515,7 @@ impl Workspace { serialized_workspace: Option, workspace_id: WorkspaceId, project: ModelHandle, - dock_default_factory: DefaultItemFactory, + dock_default_factory: DockDefaultItemFactory, cx: &mut ViewContext, ) -> Self { cx.observe_fullscreen(|_, _, cx| cx.notify()).detach(); @@ -703,7 +703,7 @@ impl Workspace { serialized_workspace, workspace_id, project_handle, - app_state.default_item_factory, + app_state.dock_default_item_factory, cx, ); (app_state.initialize_workspace)(&mut workspace, &app_state, cx); @@ -2694,7 +2694,7 @@ mod tests { pub fn default_item_factory( _workspace: &mut Workspace, _cx: &mut ViewContext, - ) -> Box { + ) -> Option> { unimplemented!(); } diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 2396af6465..09a20b5660 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -32,13 +32,15 @@ use settings::{ use smol::process::Command; use std::fs::OpenOptions; use std::{env, ffi::OsStr, panic, path::PathBuf, sync::Arc, thread, time::Duration}; -use terminal_view::terminal_container_view::{get_working_directory, TerminalContainer}; +use terminal_view::{get_working_directory, TerminalView}; use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; use util::{channel::RELEASE_CHANNEL, paths, ResultExt, TryFutureExt}; -use workspace::{self, item::ItemHandle, AppState, NewFile, OpenPaths, Workspace}; +use workspace::{ + self, item::ItemHandle, notifications::NotifyResultExt, AppState, NewFile, OpenPaths, Workspace, +}; use zed::{self, build_window_options, initialize_workspace, languages, menus}; fn main() { @@ -150,7 +152,7 @@ fn main() { fs, build_window_options, initialize_workspace, - default_item_factory, + dock_default_item_factory, }); auto_update::init(http, client::ZED_SERVER_URL.clone(), cx); @@ -581,10 +583,10 @@ async fn handle_cli_connection( } } -pub fn default_item_factory( +pub fn dock_default_item_factory( workspace: &mut Workspace, cx: &mut ViewContext, -) -> Box { +) -> Option> { let strategy = cx .global::() .terminal_overrides @@ -594,12 +596,15 @@ pub fn default_item_factory( let working_directory = get_working_directory(workspace, cx, strategy); - let terminal_handle = cx.add_view(|cx| { - TerminalContainer::new( - Err(anyhow!("Don't have a project to open a terminal")), - workspace.database_id(), - cx, - ) - }); - Box::new(terminal_handle) + let window_id = cx.window_id(); + let terminal = workspace + .project() + .update(cx, |project, cx| { + project.create_terminal(working_directory, window_id, cx) + }) + .notify_err(workspace, cx)?; + + let terminal_view = cx.add_view(|cx| TerminalView::new(terminal, workspace.database_id(), cx)); + + Some(Box::new(terminal_view)) } From 2697112a8a5e429197287bcb999c53ddebe4c080 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 08:11:18 +0100 Subject: [PATCH 230/240] Don't drop `unregistered` column in reconnection support migration We don't use this column anymore because, when a project is unshared, we simply remove it from the `projects` table. However, this column is expected in the stable version of the server and the database is shared between stable and preview. If we dropped it, stable would start throwing errors. --- crates/collab/migrations.sqlite/20221109000000_test_schema.sql | 3 ++- .../collab/migrations/20221111092550_reconnection_support.sql | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 90fd8ace12..68caf4fad7 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -44,7 +44,8 @@ CREATE TABLE "projects" ( "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, "host_connection_id" INTEGER NOT NULL, - "host_connection_epoch" TEXT NOT NULL + "host_connection_epoch" TEXT NOT NULL, + "unregistered" BOOLEAN NOT NULL DEFAULT FALSE ); CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); diff --git a/crates/collab/migrations/20221111092550_reconnection_support.sql b/crates/collab/migrations/20221111092550_reconnection_support.sql index 5e8bada2f9..3289f6bbdd 100644 --- a/crates/collab/migrations/20221111092550_reconnection_support.sql +++ b/crates/collab/migrations/20221111092550_reconnection_support.sql @@ -6,8 +6,7 @@ CREATE TABLE IF NOT EXISTS "rooms" ( ALTER TABLE "projects" ADD "room_id" INTEGER REFERENCES rooms (id), ADD "host_connection_id" INTEGER, - ADD "host_connection_epoch" UUID, - DROP COLUMN "unregistered"; + ADD "host_connection_epoch" UUID; CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); CREATE TABLE "worktrees" ( From 0366d725ea2a71f55f468822dae606a267e2d9e4 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 08:19:41 +0100 Subject: [PATCH 231/240] collab 0.3.1 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ddd7a0f7fd..95c604cae8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.0" +version = "0.3.1" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index a4ccabf099..f152f2c74f 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.0" +version = "0.3.1" [[bin]] name = "collab" From 3a4f8d267a06f4128be1e994ab3dfb5ad30a01f8 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 9 Dec 2022 11:49:17 -0800 Subject: [PATCH 232/240] Fix infinite loop in dock position when deserializing --- crates/workspace/src/dock.rs | 61 ++++++++++++++++++++++++++++++- crates/workspace/src/item.rs | 16 ++++++-- crates/workspace/src/workspace.rs | 9 ++++- 3 files changed, 79 insertions(+), 7 deletions(-) diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 78ee56f188..19fed4bf59 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -458,14 +458,26 @@ impl StatusItemView for ToggleDockButton { #[cfg(test)] mod tests { - use std::ops::{Deref, DerefMut}; + use std::{ + ops::{Deref, DerefMut}, + path::PathBuf, + }; use gpui::{AppContext, TestAppContext, UpdateView, ViewContext}; use project::{FakeFs, Project}; use settings::Settings; use super::*; - use crate::{item::test::TestItem, sidebar::Sidebar, ItemHandle, Workspace}; + use crate::{ + dock, + item::test::TestItem, + persistence::model::{ + SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, + }, + register_deserializable_item, + sidebar::Sidebar, + ItemHandle, Workspace, + }; pub fn default_item_factory( _workspace: &mut Workspace, @@ -474,6 +486,51 @@ mod tests { Some(Box::new(cx.add_view(|_| TestItem::new()))) } + #[gpui::test] + async fn test_dock_workspace_infinite_loop(cx: &mut TestAppContext) { + cx.foreground().forbid_parking(); + Settings::test_async(cx); + + cx.update(|cx| { + register_deserializable_item::(cx); + }); + + let serialized_workspace = SerializedWorkspace { + id: 0, + location: Vec::::new().into(), + dock_position: dock::DockPosition::Shown(DockAnchor::Expanded), + center_group: SerializedPaneGroup::Pane(SerializedPane { + active: false, + children: vec![], + }), + dock_pane: SerializedPane { + active: true, + children: vec![SerializedItem { + active: true, + item_id: 0, + kind: "test".into(), + }], + }, + left_sidebar_open: false, + }; + + let fs = FakeFs::new(cx.background()); + let project = Project::test(fs, [], cx).await; + + let (_, _workspace) = cx.add_window(|cx| { + Workspace::new( + Some(serialized_workspace), + 0, + project.clone(), + default_item_factory, + cx, + ) + }); + + cx.foreground().run_until_parked(); + //Should terminate + } + #[gpui::test] async fn test_dock_hides_when_pane_empty(cx: &mut TestAppContext) { let mut cx = DockTestContext::new(cx).await; diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index e44e7ca09d..14f847fd54 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -681,6 +681,7 @@ pub(crate) mod test { use super::{Item, ItemEvent}; pub struct TestItem { + pub workspace_id: WorkspaceId, pub state: String, pub label: String, pub save_count: usize, @@ -716,6 +717,7 @@ pub(crate) mod test { nav_history: None, tab_descriptions: None, tab_detail: Default::default(), + workspace_id: self.workspace_id, } } } @@ -736,9 +738,16 @@ pub(crate) mod test { nav_history: None, tab_descriptions: None, tab_detail: Default::default(), + workspace_id: 0, } } + pub fn new_deserialized(id: WorkspaceId) -> Self { + let mut this = Self::new(); + this.workspace_id = id; + this + } + pub fn with_label(mut self, state: &str) -> Self { self.label = state.to_string(); self @@ -893,11 +902,12 @@ pub(crate) mod test { fn deserialize( _project: ModelHandle, _workspace: WeakViewHandle, - _workspace_id: WorkspaceId, + workspace_id: WorkspaceId, _item_id: ItemId, - _cx: &mut ViewContext, + cx: &mut ViewContext, ) -> Task>> { - unreachable!("Cannot deserialize test item") + let view = cx.add_view(|_cx| Self::new_deserialized(workspace_id)); + Task::Ready(Some(anyhow::Ok(view))) } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index d38cf96ed2..6dd5c9c4fa 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2371,7 +2371,12 @@ impl Workspace { workspace.toggle_sidebar(SidebarSide::Left, cx); } - // Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); + // Note that without after_window, the focus_self() and + // the focus the dock generates start generating alternating + // focus due to the deferred execution each triggering each other + cx.after_window_update(move |workspace, cx| { + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); + }); cx.notify(); }); @@ -2695,7 +2700,7 @@ mod tests { _workspace: &mut Workspace, _cx: &mut ViewContext, ) -> Option> { - unimplemented!(); + unimplemented!() } #[gpui::test] From 34388a1d315554006f93bbacf07cca34b8626871 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 9 Dec 2022 12:07:49 -0800 Subject: [PATCH 233/240] Updated is_child() to omit self --- crates/gpui/src/app.rs | 5 +++-- crates/workspace/src/workspace.rs | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index bf78399914..f5ced700b6 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -1431,8 +1431,8 @@ impl MutableAppContext { true } - // Returns an iterator over all of the view ids from the passed view up to the root of the window - // Includes the passed view itself + /// Returns an iterator over all of the view ids from the passed view up to the root of the window + /// Includes the passed view itself fn ancestors(&self, window_id: usize, mut view_id: usize) -> impl Iterator + '_ { std::iter::once(view_id) .into_iter() @@ -3695,6 +3695,7 @@ impl<'a, T: View> ViewContext<'a, T> { return false; } self.ancestors(view.window_id, view.view_id) + .skip(1) // Skip self id .any(|parent| parent == self.view_id) } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 6dd5c9c4fa..7dc8ddab06 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2542,7 +2542,7 @@ impl View for Workspace { } else { for pane in self.panes() { let view = view.clone(); - if pane.update(cx, |_, cx| cx.is_child(view)) { + if pane.update(cx, |_, cx| view.id() == cx.view_id() || cx.is_child(view)) { self.handle_pane_focused(pane.clone(), cx); break; } From d74fb97158e5e886c1aab3c2b8aa45df35641661 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 6 Dec 2022 16:45:09 +0100 Subject: [PATCH 234/240] Remove `Executor` trait from `collab` and use an enum instead This will let us save off the executor and avoid using generics. --- crates/collab/src/executor.rs | 36 ++++++++++++++++++++++ crates/collab/src/integration_tests.rs | 23 ++++----------- crates/collab/src/lib.rs | 1 + crates/collab/src/rpc.rs | 41 ++++---------------------- 4 files changed, 48 insertions(+), 53 deletions(-) create mode 100644 crates/collab/src/executor.rs diff --git a/crates/collab/src/executor.rs b/crates/collab/src/executor.rs new file mode 100644 index 0000000000..d2253f8ccb --- /dev/null +++ b/crates/collab/src/executor.rs @@ -0,0 +1,36 @@ +use std::{future::Future, time::Duration}; + +#[derive(Clone)] +pub enum Executor { + Production, + #[cfg(test)] + Deterministic(std::sync::Arc), +} + +impl Executor { + pub fn spawn_detached(&self, future: F) + where + F: 'static + Send + Future, + { + match self { + Executor::Production => { + tokio::spawn(future); + } + #[cfg(test)] + Executor::Deterministic(background) => { + background.spawn(future).detach(); + } + } + } + + pub fn sleep(&self, duration: Duration) -> impl Future { + let this = self.clone(); + async move { + match this { + Executor::Production => tokio::time::sleep(duration).await, + #[cfg(test)] + Executor::Deterministic(background) => background.timer(duration).await, + } + } + } +} diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index a77ae4925d..96fed5887b 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,9 +1,9 @@ use crate::{ db::{self, NewUserParams, TestDb, UserId}, - rpc::{Executor, Server}, + executor::Executor, + rpc::Server, AppState, }; - use ::rpc::Peer; use anyhow::anyhow; use call::{room, ActiveCall, ParticipantLocation, Room}; @@ -17,7 +17,7 @@ use editor::{ ToggleCodeActions, Undo, }; use fs::{FakeFs, Fs as _, HomeDir, LineEnding}; -use futures::{channel::oneshot, Future, StreamExt as _}; +use futures::{channel::oneshot, StreamExt as _}; use gpui::{ executor::{self, Deterministic}, geometry::vector::vec2f, @@ -45,7 +45,6 @@ use std::{ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, Arc, }, - time::Duration, }; use theme::ThemeRegistry; use unindent::Unindent as _; @@ -417,7 +416,7 @@ async fn test_leaving_room_on_disconnection( // When user A disconnects, both client A and B clear their room on the active call. server.disconnect_client(client_a.peer_id().unwrap()); - cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); active_call_b.read_with(cx_b, |call, _| assert!(call.room().is_none())); assert_eq!( @@ -6000,7 +5999,7 @@ impl TestServer { client_name, user, Some(connection_id_tx), - cx.background(), + Executor::Deterministic(cx.background()), )) .detach(); let connection_id = connection_id_rx.await.unwrap(); @@ -6829,18 +6828,6 @@ impl Drop for TestClient { } } -impl Executor for Arc { - type Sleep = gpui::executor::Timer; - - fn spawn_detached>(&self, future: F) { - self.spawn(future).detach(); - } - - fn sleep(&self, duration: Duration) -> Self::Sleep { - self.as_ref().timer(duration) - } -} - #[derive(Debug, Eq, PartialEq)] struct RoomParticipants { remote: Vec, diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 24a9fc6117..b9d43cd2ee 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -2,6 +2,7 @@ pub mod api; pub mod auth; pub mod db; pub mod env; +mod executor; #[cfg(test)] mod integration_tests; pub mod rpc; diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 736f5eb31b..c1f9eb039b 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -3,6 +3,7 @@ mod connection_pool; use crate::{ auth, db::{self, Database, ProjectId, RoomId, User, UserId}, + executor::Executor, AppState, Result, }; use anyhow::anyhow; @@ -50,12 +51,8 @@ use std::{ atomic::{AtomicBool, Ordering::SeqCst}, Arc, }, - time::Duration, -}; -use tokio::{ - sync::{Mutex, MutexGuard}, - time::Sleep, }; +use tokio::sync::{Mutex, MutexGuard}; use tower::ServiceBuilder; use tracing::{info_span, instrument, Instrument}; @@ -145,15 +142,6 @@ pub struct Server { handlers: HashMap, } -pub trait Executor: Send + Clone { - type Sleep: Send + Future; - fn spawn_detached>(&self, future: F); - fn sleep(&self, duration: Duration) -> Self::Sleep; -} - -#[derive(Clone)] -pub struct RealExecutor; - pub(crate) struct ConnectionPoolGuard<'a> { guard: MutexGuard<'a, ConnectionPool>, _not_send: PhantomData>, @@ -330,13 +318,13 @@ impl Server { }) } - pub fn handle_connection( + pub fn handle_connection( self: &Arc, connection: Connection, address: String, user: User, mut send_connection_id: Option>, - executor: E, + executor: Executor, ) -> impl Future> { let this = self.clone(); let user_id = user.id; @@ -347,12 +335,7 @@ impl Server { .peer .add_connection(connection, { let executor = executor.clone(); - move |duration| { - let timer = executor.sleep(duration); - async move { - timer.await; - } - } + move |duration| executor.sleep(duration) }); tracing::info!(%user_id, %login, %connection_id, %address, "connection opened"); @@ -543,18 +526,6 @@ impl<'a> Drop for ConnectionPoolGuard<'a> { } } -impl Executor for RealExecutor { - type Sleep = Sleep; - - fn spawn_detached>(&self, future: F) { - tokio::task::spawn(future); - } - - fn sleep(&self, duration: Duration) -> Self::Sleep { - tokio::time::sleep(duration) - } -} - fn broadcast( sender_id: ConnectionId, receiver_ids: impl IntoIterator, @@ -636,7 +607,7 @@ pub async fn handle_websocket_request( let connection = Connection::new(Box::pin(socket)); async move { server - .handle_connection(connection, socket_address, user, None, RealExecutor) + .handle_connection(connection, socket_address, user, None, Executor::Production) .await .log_err(); } From aca3f025906a3236682534ebbbf0c50b9a26cefa Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 8 Dec 2022 12:14:12 +0100 Subject: [PATCH 235/240] Re-join room when client temporarily loses connection --- crates/call/src/participant.rs | 10 +- crates/call/src/room.rs | 114 ++++++++++++--- .../20221109000000_test_schema.sql | 1 + ...d_connection_lost_to_room_participants.sql | 2 + crates/collab/src/db.rs | 64 +++++++- crates/collab/src/db/room_participant.rs | 1 + crates/collab/src/integration_tests.rs | 30 +++- crates/collab/src/rpc.rs | 137 +++++++++--------- 8 files changed, 267 insertions(+), 92 deletions(-) create mode 100644 crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql diff --git a/crates/call/src/participant.rs b/crates/call/src/participant.rs index dfa456f734..d5c6d85154 100644 --- a/crates/call/src/participant.rs +++ b/crates/call/src/participant.rs @@ -4,7 +4,7 @@ use collections::HashMap; use gpui::WeakModelHandle; pub use live_kit_client::Frame; use project::Project; -use std::sync::Arc; +use std::{fmt, sync::Arc}; #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ParticipantLocation { @@ -36,7 +36,7 @@ pub struct LocalParticipant { pub active_project: Option>, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct RemoteParticipant { pub user: Arc, pub projects: Vec, @@ -49,6 +49,12 @@ pub struct RemoteVideoTrack { pub(crate) live_kit_track: Arc, } +impl fmt::Debug for RemoteVideoTrack { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RemoteVideoTrack").finish() + } +} + impl RemoteVideoTrack { pub fn frames(&self) -> async_broadcast::Receiver { self.live_kit_track.frames() diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index f8a55a3a93..828885e9bd 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -5,14 +5,18 @@ use crate::{ use anyhow::{anyhow, Result}; use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore}; use collections::{BTreeMap, HashSet}; -use futures::StreamExt; -use gpui::{AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task}; +use futures::{FutureExt, StreamExt}; +use gpui::{ + AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle, +}; use live_kit_client::{LocalTrackPublication, LocalVideoTrack, RemoteVideoTrackUpdate}; use postage::stream::Stream; use project::Project; -use std::{mem, sync::Arc}; +use std::{mem, sync::Arc, time::Duration}; use util::{post_inc, ResultExt}; +pub const RECONNECTION_TIMEOUT: Duration = client::RECEIVE_TIMEOUT; + #[derive(Clone, Debug, PartialEq, Eq)] pub enum Event { ParticipantLocationChanged { @@ -46,6 +50,7 @@ pub struct Room { user_store: ModelHandle, subscriptions: Vec, pending_room_update: Option>, + _maintain_connection: Task>, } impl Entity for Room { @@ -66,21 +71,6 @@ impl Room { user_store: ModelHandle, cx: &mut ModelContext, ) -> Self { - let mut client_status = client.status(); - cx.spawn_weak(|this, mut cx| async move { - let is_connected = client_status - .next() - .await - .map_or(false, |s| s.is_connected()); - // Even if we're initially connected, any future change of the status means we momentarily disconnected. - if !is_connected || client_status.next().await.is_some() { - if let Some(this) = this.upgrade(&cx) { - let _ = this.update(&mut cx, |this, cx| this.leave(cx)); - } - } - }) - .detach(); - let live_kit_room = if let Some(connection_info) = live_kit_connection_info { let room = live_kit_client::Room::new(); let mut status = room.status(); @@ -131,6 +121,9 @@ impl Room { None }; + let _maintain_connection = + cx.spawn_weak(|this, cx| Self::maintain_connection(this, client.clone(), cx)); + Self { id, live_kit: live_kit_room, @@ -145,6 +138,7 @@ impl Room { pending_room_update: None, client, user_store, + _maintain_connection, } } @@ -245,6 +239,83 @@ impl Room { Ok(()) } + async fn maintain_connection( + this: WeakModelHandle, + client: Arc, + mut cx: AsyncAppContext, + ) -> Result<()> { + let mut client_status = client.status(); + loop { + let is_connected = client_status + .next() + .await + .map_or(false, |s| s.is_connected()); + // Even if we're initially connected, any future change of the status means we momentarily disconnected. + if !is_connected || client_status.next().await.is_some() { + let room_id = this + .upgrade(&cx) + .ok_or_else(|| anyhow!("room was dropped"))? + .update(&mut cx, |this, cx| { + this.status = RoomStatus::Rejoining; + cx.notify(); + this.id + }); + + // Wait for client to re-establish a connection to the server. + let mut reconnection_timeout = cx.background().timer(RECONNECTION_TIMEOUT).fuse(); + let client_reconnection = async { + loop { + if let Some(status) = client_status.next().await { + if status.is_connected() { + return true; + } + } else { + return false; + } + } + } + .fuse(); + futures::pin_mut!(client_reconnection); + + futures::select_biased! { + reconnected = client_reconnection => { + if reconnected { + // Client managed to reconnect to the server. Now attempt to join the room. + let rejoin_room = async { + let response = client.request(proto::JoinRoom { id: room_id }).await?; + let room_proto = response.room.ok_or_else(|| anyhow!("invalid room"))?; + this.upgrade(&cx) + .ok_or_else(|| anyhow!("room was dropped"))? + .update(&mut cx, |this, cx| { + this.status = RoomStatus::Online; + this.apply_room_update(room_proto, cx) + })?; + anyhow::Ok(()) + }; + + // If we successfully joined the room, go back around the loop + // waiting for future connection status changes. + if rejoin_room.await.log_err().is_some() { + continue; + } + } + } + _ = reconnection_timeout => {} + } + + // The client failed to re-establish a connection to the server + // or an error occurred while trying to re-join the room. Either way + // we leave the room and return an error. + if let Some(this) = this.upgrade(&cx) { + let _ = this.update(&mut cx, |this, cx| this.leave(cx)); + } + return Err(anyhow!( + "can't reconnect to room: client failed to re-establish connection" + )); + } + } + } + pub fn id(&self) -> u64 { self.id } @@ -325,9 +396,11 @@ impl Room { } if let Some(participants) = remote_participants.log_err() { + let mut participant_peer_ids = HashSet::default(); for (participant, user) in room.participants.into_iter().zip(participants) { let peer_id = PeerId(participant.peer_id); this.participant_user_ids.insert(participant.user_id); + participant_peer_ids.insert(peer_id); let old_projects = this .remote_participants @@ -394,8 +467,8 @@ impl Room { } } - this.remote_participants.retain(|_, participant| { - if this.participant_user_ids.contains(&participant.user.id) { + this.remote_participants.retain(|peer_id, participant| { + if participant_peer_ids.contains(peer_id) { true } else { for project in &participant.projects { @@ -751,6 +824,7 @@ impl Default for ScreenTrack { #[derive(Copy, Clone, PartialEq, Eq)] pub enum RoomStatus { Online, + Rejoining, Offline, } diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 68caf4fad7..4eba8d2302 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -118,6 +118,7 @@ CREATE TABLE "room_participants" ( "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, "answering_connection_epoch" TEXT, + "connection_lost" BOOLEAN NOT NULL, "location_kind" INTEGER, "location_project_id" INTEGER, "initial_project_id" INTEGER, diff --git a/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql new file mode 100644 index 0000000000..d49eda41b8 --- /dev/null +++ b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql @@ -0,0 +1,2 @@ +ALTER TABLE "room_participants" + ADD "connection_lost" BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index aae4d92964..063d82f932 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1034,6 +1034,7 @@ impl Database { user_id: ActiveValue::set(user_id), answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), answering_connection_epoch: ActiveValue::set(Some(self.epoch)), + connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(user_id), calling_connection_id: ActiveValue::set(connection_id.0 as i32), calling_connection_epoch: ActiveValue::set(self.epoch), @@ -1060,6 +1061,7 @@ impl Database { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(called_user_id), + connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(calling_user_id), calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), calling_connection_epoch: ActiveValue::set(self.epoch), @@ -1175,11 +1177,16 @@ impl Database { room_participant::Column::RoomId .eq(room_id) .and(room_participant::Column::UserId.eq(user_id)) - .and(room_participant::Column::AnsweringConnectionId.is_null()), + .and( + room_participant::Column::AnsweringConnectionId + .is_null() + .or(room_participant::Column::ConnectionLost.eq(true)), + ), ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), answering_connection_epoch: ActiveValue::set(Some(self.epoch)), + connection_lost: ActiveValue::set(false), ..Default::default() }) .exec(&*tx) @@ -1367,6 +1374,61 @@ impl Database { .await } + pub async fn connection_lost( + &self, + connection_id: ConnectionId, + ) -> Result>> { + self.room_transaction(|tx| async move { + let participant = room_participant::Entity::find() + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) + .one(&*tx) + .await? + .ok_or_else(|| anyhow!("not a participant in any room"))?; + let room_id = participant.room_id; + + room_participant::Entity::update(room_participant::ActiveModel { + connection_lost: ActiveValue::set(true), + ..participant.into_active_model() + }) + .exec(&*tx) + .await?; + + let collaborator_on_projects = project_collaborator::Entity::find() + .find_also_related(project::Entity) + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) + .all(&*tx) + .await?; + project_collaborator::Entity::delete_many() + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .exec(&*tx) + .await?; + + let mut left_projects = Vec::new(); + for (_, project) in collaborator_on_projects { + if let Some(project) = project { + let collaborators = project + .find_related(project_collaborator::Entity) + .all(&*tx) + .await?; + let connection_ids = collaborators + .into_iter() + .map(|collaborator| ConnectionId(collaborator.connection_id as u32)) + .collect(); + + left_projects.push(LeftProject { + id: project.id, + host_user_id: project.host_user_id, + host_connection_id: ConnectionId(project.host_connection_id as u32), + connection_ids, + }); + } + } + + Ok((room_id, left_projects)) + }) + .await + } + fn build_incoming_call( room: &proto::Room, called_user_id: UserId, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index 783f45aa93..3ab3fbbdda 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -10,6 +10,7 @@ pub struct Model { pub user_id: UserId, pub answering_connection_id: Option, pub answering_connection_epoch: Option, + pub connection_lost: bool, pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 96fed5887b..f31022afc4 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -365,7 +365,7 @@ async fn test_room_uniqueness( } #[gpui::test(iterations = 10)] -async fn test_leaving_room_on_disconnection( +async fn test_disconnecting_from_room( deterministic: Arc, cx_a: &mut TestAppContext, cx_b: &mut TestAppContext, @@ -414,9 +414,30 @@ async fn test_leaving_room_on_disconnection( } ); - // When user A disconnects, both client A and B clear their room on the active call. + // User A automatically reconnects to the room upon disconnection. server.disconnect_client(client_a.peer_id().unwrap()); deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.run_until_parked(); + assert_eq!( + room_participants(&room_a, cx_a), + RoomParticipants { + remote: vec!["user_b".to_string()], + pending: Default::default() + } + ); + assert_eq!( + room_participants(&room_b, cx_b), + RoomParticipants { + remote: vec!["user_a".to_string()], + pending: Default::default() + } + ); + + // When user A disconnects, both client A and B clear their room on the active call. + server.forbid_connections(); + server.disconnect_client(client_a.peer_id().unwrap()); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); + deterministic.run_until_parked(); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); active_call_b.read_with(cx_b, |call, _| assert!(call.room().is_none())); assert_eq!( @@ -434,6 +455,11 @@ async fn test_leaving_room_on_disconnection( } ); + // Allow user A to reconnect to the server. + server.allow_connections(); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.run_until_parked(); + // Call user B again from client A. active_call_a .update(cx_a, |call, cx| { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index c1f9eb039b..3f70043bfb 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -51,11 +51,14 @@ use std::{ atomic::{AtomicBool, Ordering::SeqCst}, Arc, }, + time::Duration, }; use tokio::sync::{Mutex, MutexGuard}; use tower::ServiceBuilder; use tracing::{info_span, instrument, Instrument}; +pub const RECONNECTION_TIMEOUT: Duration = rpc::RECEIVE_TIMEOUT; + lazy_static! { static ref METRIC_CONNECTIONS: IntGauge = register_int_gauge!("connections", "number of connections").unwrap(); @@ -435,7 +438,7 @@ impl Server { drop(foreground_message_handlers); tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); - if let Err(error) = sign_out(session).await { + if let Err(error) = sign_out(session, executor).await { tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } @@ -636,29 +639,38 @@ pub async fn handle_metrics(Extension(server): Extension>) -> Result Ok(encoded_metrics) } -#[instrument(err)] -async fn sign_out(session: Session) -> Result<()> { +#[instrument(err, skip(executor))] +async fn sign_out(session: Session, executor: Executor) -> Result<()> { session.peer.disconnect(session.connection_id); - let decline_calls = { - let mut pool = session.connection_pool().await; - pool.remove_connection(session.connection_id)?; - let mut connections = pool.user_connection_ids(session.user_id); - connections.next().is_none() - }; + session + .connection_pool() + .await + .remove_connection(session.connection_id)?; - leave_room_for_session(&session).await.trace_err(); - if decline_calls { - if let Some(room) = session - .db() - .await - .decline_call(None, session.user_id) - .await - .trace_err() - { - room_updated(&room, &session); + if let Ok(mut left_projects) = session + .db() + .await + .connection_lost(session.connection_id) + .await + { + for left_project in mem::take(&mut *left_projects) { + project_left(&left_project, &session); } } + executor.sleep(RECONNECTION_TIMEOUT).await; + leave_room_for_session(&session).await.trace_err(); + + if !session + .connection_pool() + .await + .is_user_online(session.user_id) + { + let db = session.db().await; + if let Some(room) = db.decline_call(None, session.user_id).await.trace_err() { + room_updated(&room, &session); + } + } update_user_contacts(session.user_id, &session).await?; Ok(()) @@ -1089,20 +1101,7 @@ async fn leave_project(request: proto::LeaveProject, session: Session) -> Result host_connection_id = %project.host_connection_id, "leave project" ); - - broadcast( - sender_id, - project.connection_ids.iter().copied(), - |conn_id| { - session.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id: project_id.to_proto(), - peer_id: sender_id.0, - }, - ) - }, - ); + project_left(&project, &session); Ok(()) } @@ -1833,40 +1832,7 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { contacts_to_update.insert(session.user_id); for project in left_room.left_projects.values() { - for connection_id in &project.connection_ids { - if project.host_user_id == session.user_id { - session - .peer - .send( - *connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); - } else { - session - .peer - .send( - *connection_id, - proto::RemoveProjectCollaborator { - project_id: project.id.to_proto(), - peer_id: session.connection_id.0, - }, - ) - .trace_err(); - } - } - - session - .peer - .send( - session.connection_id, - proto::UnshareProject { - project_id: project.id.to_proto(), - }, - ) - .trace_err(); + project_left(project, session); } room_updated(&left_room.room, &session); @@ -1906,6 +1872,43 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { Ok(()) } +fn project_left(project: &db::LeftProject, session: &Session) { + for connection_id in &project.connection_ids { + if project.host_user_id == session.user_id { + session + .peer + .send( + *connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); + } else { + session + .peer + .send( + *connection_id, + proto::RemoveProjectCollaborator { + project_id: project.id.to_proto(), + peer_id: session.connection_id.0, + }, + ) + .trace_err(); + } + } + + session + .peer + .send( + session.connection_id, + proto::UnshareProject { + project_id: project.id.to_proto(), + }, + ) + .trace_err(); +} + pub trait ResultExt { type Ok; From 8fa26bfe18789fb02889b8a63470a2ae88656381 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 08:58:18 +0100 Subject: [PATCH 236/240] Fix `test_calls_on_multiple_connections` after adding room reconnection --- crates/collab/src/integration_tests.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index f31022afc4..a2639d7c58 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -437,7 +437,6 @@ async fn test_disconnecting_from_room( server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); - deterministic.run_until_parked(); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); active_call_b.read_with(cx_b, |call, _| assert!(call.room().is_none())); assert_eq!( @@ -458,7 +457,6 @@ async fn test_disconnecting_from_room( // Allow user A to reconnect to the server. server.allow_connections(); deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); - deterministic.run_until_parked(); // Call user B again from client A. active_call_a @@ -642,12 +640,15 @@ async fn test_calls_on_multiple_connections( assert!(incoming_call_b2.next().await.unwrap().is_some()); // User A disconnects, causing both connections to stop ringing. + server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); assert!(incoming_call_b1.next().await.unwrap().is_none()); assert!(incoming_call_b2.next().await.unwrap().is_none()); // User A reconnects automatically, then calls user B again. + server.allow_connections(); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); active_call_a .update(cx_a, |call, cx| { call.invite(client_b1.user_id().unwrap(), None, cx) @@ -662,7 +663,7 @@ async fn test_calls_on_multiple_connections( server.forbid_connections(); server.disconnect_client(client_b1.peer_id().unwrap()); server.disconnect_client(client_b2.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); } From 895c36548514c8d30631fc566587a8d48981fb50 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 11:20:22 +0100 Subject: [PATCH 237/240] Introduce random reconnections in the randomized test --- crates/call/src/room.rs | 2 + .../20221109000000_test_schema.sql | 14 ++-- crates/collab/src/integration_tests.rs | 82 +++++++++++++------ crates/collab/src/rpc.rs | 45 ++++++---- 4 files changed, 97 insertions(+), 46 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 828885e9bd..824ec49054 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -550,10 +550,12 @@ impl Room { { for participant in self.remote_participants.values() { assert!(self.participant_user_ids.contains(&participant.user.id)); + assert_ne!(participant.user.id, self.client.user_id().unwrap()); } for participant in &self.pending_participants { assert!(self.participant_user_ids.contains(&participant.id)); + assert_ne!(participant.id, self.client.user_id().unwrap()); } assert_eq!( diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 4eba8d2302..9f03541f44 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -1,5 +1,5 @@ CREATE TABLE "users" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "github_login" VARCHAR, "admin" BOOLEAN, "email_address" VARCHAR(255) DEFAULT NULL, @@ -17,14 +17,14 @@ CREATE INDEX "index_users_on_email_address" ON "users" ("email_address"); CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id"); CREATE TABLE "access_tokens" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "user_id" INTEGER REFERENCES users (id), "hash" VARCHAR(128) ); CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id"); CREATE TABLE "contacts" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "user_id_a" INTEGER REFERENCES users (id) NOT NULL, "user_id_b" INTEGER REFERENCES users (id) NOT NULL, "a_to_b" BOOLEAN NOT NULL, @@ -35,12 +35,12 @@ CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_ CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b"); CREATE TABLE "rooms" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "live_kit_room" VARCHAR NOT NULL ); CREATE TABLE "projects" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, "host_connection_id" INTEGER NOT NULL, @@ -100,7 +100,7 @@ CREATE TABLE "language_servers" ( CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id"); CREATE TABLE "project_collaborators" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, "connection_epoch" TEXT NOT NULL, @@ -113,7 +113,7 @@ CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" O CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch"); CREATE TABLE "room_participants" ( - "id" INTEGER PRIMARY KEY, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index a2639d7c58..aca5f77fe9 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,7 +1,7 @@ use crate::{ db::{self, NewUserParams, TestDb, UserId}, executor::Executor, - rpc::Server, + rpc::{Server, RECONNECT_TIMEOUT}, AppState, }; use ::rpc::Peer; @@ -416,7 +416,7 @@ async fn test_disconnecting_from_room( // User A automatically reconnects to the room upon disconnection. server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); deterministic.run_until_parked(); assert_eq!( room_participants(&room_a, cx_a), @@ -436,7 +436,7 @@ async fn test_disconnecting_from_room( // When user A disconnects, both client A and B clear their room on the active call. server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); active_call_b.read_with(cx_b, |call, _| assert!(call.room().is_none())); assert_eq!( @@ -456,7 +456,7 @@ async fn test_disconnecting_from_room( // Allow user A to reconnect to the server. server.allow_connections(); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); // Call user B again from client A. active_call_a @@ -581,7 +581,7 @@ async fn test_calls_on_multiple_connections( // User B disconnects the client that is not on the call. Everything should be fine. client_b1.disconnect(&cx_b1.to_async()).unwrap(); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); client_b1 .authenticate_and_connect(false, &cx_b1.to_async()) .await @@ -642,13 +642,13 @@ async fn test_calls_on_multiple_connections( // User A disconnects, causing both connections to stop ringing. server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); assert!(incoming_call_b1.next().await.unwrap().is_none()); assert!(incoming_call_b2.next().await.unwrap().is_none()); // User A reconnects automatically, then calls user B again. server.allow_connections(); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); active_call_a .update(cx_a, |call, cx| { call.invite(client_b1.user_id().unwrap(), None, cx) @@ -663,7 +663,7 @@ async fn test_calls_on_multiple_connections( server.forbid_connections(); server.disconnect_client(client_b1.peer_id().unwrap()); server.disconnect_client(client_b2.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT + crate::rpc::RECONNECTION_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); active_call_a.read_with(cx_a, |call, _| assert!(call.room().is_none())); } @@ -953,8 +953,9 @@ async fn test_host_disconnect( assert!(cx_b.is_window_edited(workspace_b.window_id())); // Drop client A's connection. Collaborators should disappear and the project should not be shown as shared. + server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); project_a .condition(cx_a, |project, _| project.collaborators().is_empty()) .await; @@ -977,6 +978,11 @@ async fn test_host_disconnect( .unwrap(); assert!(can_close); + // Allow client A to reconnect to the server. + server.allow_connections(); + deterministic.advance_clock(RECEIVE_TIMEOUT); + + // Client B calls client A again after they reconnected. let active_call_b = cx_b.read(ActiveCall::global); active_call_b .update(cx_b, |call, cx| { @@ -997,7 +1003,7 @@ async fn test_host_disconnect( // Drop client A's connection again. We should still unshare it successfully. server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); } @@ -2323,7 +2329,7 @@ async fn test_leaving_project( // Simulate connection loss for client C and ensure client A observes client C leaving the project. client_c.wait_for_current_user(cx_c).await; server.disconnect_client(client_c.peer_id().unwrap()); - cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); + cx_a.foreground().advance_clock(RECEIVE_TIMEOUT); deterministic.run_until_parked(); project_a.read_with(cx_a, |project, _| { assert_eq!(project.collaborators().len(), 0); @@ -4256,7 +4262,7 @@ async fn test_contacts( server.disconnect_client(client_c.peer_id().unwrap()); server.forbid_connections(); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); assert_eq!( contacts(&client_a, cx_a), [ @@ -4560,7 +4566,7 @@ async fn test_contacts( server.forbid_connections(); server.disconnect_client(client_a.peer_id().unwrap()); - deterministic.advance_clock(rpc::RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); assert_eq!(contacts(&client_a, cx_a), []); assert_eq!( contacts(&client_b, cx_b), @@ -5656,7 +5662,6 @@ async fn test_random_collaboration( let mut clients = Vec::new(); let mut user_ids = Vec::new(); - let mut peer_ids = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; @@ -5683,7 +5688,6 @@ async fn test_random_collaboration( let op_start_signal = futures::channel::mpsc::unbounded(); let guest = server.create_client(&mut guest_cx, &guest_username).await; user_ids.push(guest.current_user_id(&guest_cx)); - peer_ids.push(guest.peer_id().unwrap()); op_start_signals.push(op_start_signal.0); clients.push(guest_cx.foreground().spawn(guest.simulate( guest_username.clone(), @@ -5695,16 +5699,26 @@ async fn test_random_collaboration( log::info!("Added connection for {}", guest_username); operations += 1; } - 20..=29 if clients.len() > 1 => { + 20..=24 if clients.len() > 1 => { let guest_ix = rng.lock().gen_range(1..clients.len()); - log::info!("Removing guest {}", user_ids[guest_ix]); + log::info!( + "Simulating full disconnection of guest {}", + user_ids[guest_ix] + ); let removed_guest_id = user_ids.remove(guest_ix); - let removed_peer_id = peer_ids.remove(guest_ix); + let user_connection_ids = server + .connection_pool + .lock() + .await + .user_connection_ids(removed_guest_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let removed_peer_id = PeerId(user_connection_ids[0].0); let guest = clients.remove(guest_ix); op_start_signals.remove(guest_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); deterministic.start_waiting(); log::info!("Waiting for guest {} to exit...", removed_guest_id); let (guest, mut guest_cx) = guest.await; @@ -5738,6 +5752,22 @@ async fn test_random_collaboration( operations += 1; } + 25..=29 if clients.len() > 1 => { + let guest_ix = rng.lock().gen_range(1..clients.len()); + let user_id = user_ids[guest_ix]; + log::info!("Simulating temporary disconnection of guest {}", user_id); + let user_connection_ids = server + .connection_pool + .lock() + .await + .user_connection_ids(user_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let peer_id = PeerId(user_connection_ids[0].0); + server.disconnect_client(peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + operations += 1; + } _ if !op_start_signals.is_empty() => { while operations < max_operations && rng.lock().gen_bool(0.7) { op_start_signals @@ -6163,6 +6193,7 @@ impl Deref for TestServer { impl Drop for TestServer { fn drop(&mut self) { self.peer.reset(); + self.server.teardown(); self.test_live_kit_server.teardown().unwrap(); } } @@ -6423,11 +6454,14 @@ impl TestClient { .clone() } }; - if let Err(error) = active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - log::error!("{}: error sharing project, {:?}", username, error); + + if active_call.read_with(cx, |call, _| call.room().is_some()) { + if let Err(error) = active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + log::error!("{}: error sharing project, {:?}", username, error); + } } let buffers = client.buffers.entry(project.clone()).or_default(); diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 3f70043bfb..e1d318fd3e 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -53,11 +53,11 @@ use std::{ }, time::Duration, }; -use tokio::sync::{Mutex, MutexGuard}; +use tokio::sync::{watch, Mutex, MutexGuard}; use tower::ServiceBuilder; use tracing::{info_span, instrument, Instrument}; -pub const RECONNECTION_TIMEOUT: Duration = rpc::RECEIVE_TIMEOUT; +pub const RECONNECT_TIMEOUT: Duration = rpc::RECEIVE_TIMEOUT; lazy_static! { static ref METRIC_CONNECTIONS: IntGauge = @@ -143,6 +143,7 @@ pub struct Server { pub(crate) connection_pool: Arc>, app_state: Arc, handlers: HashMap, + teardown: watch::Sender<()>, } pub(crate) struct ConnectionPoolGuard<'a> { @@ -173,6 +174,7 @@ impl Server { app_state, connection_pool: Default::default(), handlers: Default::default(), + teardown: watch::channel(()).0, }; server @@ -235,6 +237,10 @@ impl Server { Arc::new(server) } + pub fn teardown(&self) { + let _ = self.teardown.send(()); + } + fn add_handler(&mut self, handler: F) -> &mut Self where F: 'static + Send + Sync + Fn(TypedEnvelope, Session) -> Fut, @@ -333,6 +339,7 @@ impl Server { let user_id = user.id; let login = user.github_login; let span = info_span!("handle connection", %user_id, %login, %address); + let teardown = self.teardown.subscribe(); async move { let (connection_id, handle_io, mut incoming_rx) = this .peer @@ -438,7 +445,7 @@ impl Server { drop(foreground_message_handlers); tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); - if let Err(error) = sign_out(session, executor).await { + if let Err(error) = sign_out(session, teardown, executor).await { tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } @@ -640,7 +647,11 @@ pub async fn handle_metrics(Extension(server): Extension>) -> Result } #[instrument(err, skip(executor))] -async fn sign_out(session: Session, executor: Executor) -> Result<()> { +async fn sign_out( + session: Session, + mut teardown: watch::Receiver<()>, + executor: Executor, +) -> Result<()> { session.peer.disconnect(session.connection_id); session .connection_pool() @@ -658,20 +669,24 @@ async fn sign_out(session: Session, executor: Executor) -> Result<()> { } } - executor.sleep(RECONNECTION_TIMEOUT).await; - leave_room_for_session(&session).await.trace_err(); + futures::select_biased! { + _ = executor.sleep(RECONNECT_TIMEOUT).fuse() => { + leave_room_for_session(&session).await.trace_err(); - if !session - .connection_pool() - .await - .is_user_online(session.user_id) - { - let db = session.db().await; - if let Some(room) = db.decline_call(None, session.user_id).await.trace_err() { - room_updated(&room, &session); + if !session + .connection_pool() + .await + .is_user_online(session.user_id) + { + let db = session.db().await; + if let Some(room) = db.decline_call(None, session.user_id).await.trace_err() { + room_updated(&room, &session); + } + } + update_user_contacts(session.user_id, &session).await?; } + _ = teardown.changed().fuse() => {} } - update_user_contacts(session.user_id, &session).await?; Ok(()) } From 26b565342725fee00868aba748bb5c75602eb706 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 12:06:12 +0100 Subject: [PATCH 238/240] Delete hosted projects from database when connection is lost --- crates/collab/src/db.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 063d82f932..5ab2b1b824 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1424,6 +1424,11 @@ impl Database { } } + project::Entity::delete_many() + .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) + .exec(&*tx) + .await?; + Ok((room_id, left_projects)) }) .await From 456396ca6e04de73e2d5d6e4748cc8f3b1754f22 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Fri, 9 Dec 2022 14:08:40 +0100 Subject: [PATCH 239/240] Rename `connection_lost` to `answering_connection_lost` --- .../migrations.sqlite/20221109000000_test_schema.sql | 2 +- ...165001_add_connection_lost_to_room_participants.sql | 2 +- crates/collab/src/db.rs | 10 +++++----- crates/collab/src/db/room_participant.rs | 2 +- crates/collab/src/rpc.rs | 3 ++- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 9f03541f44..d1bb7b8f65 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -118,7 +118,7 @@ CREATE TABLE "room_participants" ( "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, "answering_connection_epoch" TEXT, - "connection_lost" BOOLEAN NOT NULL, + "answering_connection_lost" BOOLEAN NOT NULL, "location_kind" INTEGER, "location_project_id" INTEGER, "initial_project_id" INTEGER, diff --git a/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql index d49eda41b8..2f4f38407c 100644 --- a/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql +++ b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql @@ -1,2 +1,2 @@ ALTER TABLE "room_participants" - ADD "connection_lost" BOOLEAN NOT NULL DEFAULT FALSE; + ADD "answering_connection_lost" BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 5ab2b1b824..c21ef7026c 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1034,7 +1034,7 @@ impl Database { user_id: ActiveValue::set(user_id), answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), answering_connection_epoch: ActiveValue::set(Some(self.epoch)), - connection_lost: ActiveValue::set(false), + answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(user_id), calling_connection_id: ActiveValue::set(connection_id.0 as i32), calling_connection_epoch: ActiveValue::set(self.epoch), @@ -1061,7 +1061,7 @@ impl Database { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(called_user_id), - connection_lost: ActiveValue::set(false), + answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(calling_user_id), calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), calling_connection_epoch: ActiveValue::set(self.epoch), @@ -1180,13 +1180,13 @@ impl Database { .and( room_participant::Column::AnsweringConnectionId .is_null() - .or(room_participant::Column::ConnectionLost.eq(true)), + .or(room_participant::Column::AnsweringConnectionLost.eq(true)), ), ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), answering_connection_epoch: ActiveValue::set(Some(self.epoch)), - connection_lost: ActiveValue::set(false), + answering_connection_lost: ActiveValue::set(false), ..Default::default() }) .exec(&*tx) @@ -1387,7 +1387,7 @@ impl Database { let room_id = participant.room_id; room_participant::Entity::update(room_participant::ActiveModel { - connection_lost: ActiveValue::set(true), + answering_connection_lost: ActiveValue::set(true), ..participant.into_active_model() }) .exec(&*tx) diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index 3ab3fbbdda..c80c10c1ba 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -10,7 +10,7 @@ pub struct Model { pub user_id: UserId, pub answering_connection_id: Option, pub answering_connection_epoch: Option, - pub connection_lost: bool, + pub answering_connection_lost: bool, pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index e1d318fd3e..a799837ad4 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -658,11 +658,12 @@ async fn sign_out( .await .remove_connection(session.connection_id)?; - if let Ok(mut left_projects) = session + if let Some(mut left_projects) = session .db() .await .connection_lost(session.connection_id) .await + .trace_err() { for left_project in mem::take(&mut *left_projects) { project_left(&left_project, &session); From 3cd77bfcc4946c7f760d721992235fddfc5ecab7 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 12 Dec 2022 11:16:27 +0100 Subject: [PATCH 240/240] Always cast connection ids to i32 Postgres doesn't support unsigned types. This also adds indices to support querying `project_collaborators` and `room_participants` by connection id. --- .../20221109000000_test_schema.sql | 4 +++ ...d_connection_lost_to_room_participants.sql | 5 ++++ crates/collab/src/db.rs | 28 +++++++++---------- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index d1bb7b8f65..0d4bcac5dd 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -111,6 +111,8 @@ CREATE TABLE "project_collaborators" ( CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch"); +CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch"); CREATE TABLE "room_participants" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, @@ -129,3 +131,5 @@ CREATE TABLE "room_participants" ( CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch"); CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch"); +CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id"); +CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch"); diff --git a/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql index 2f4f38407c..ed0cf972bc 100644 --- a/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql +++ b/crates/collab/migrations/20221207165001_add_connection_lost_to_room_participants.sql @@ -1,2 +1,7 @@ ALTER TABLE "room_participants" ADD "answering_connection_lost" BOOLEAN NOT NULL DEFAULT FALSE; + +CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch"); +CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id"); +CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch"); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index c21ef7026c..4a920841e8 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1204,7 +1204,7 @@ impl Database { pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { self.room_transaction(|tx| async move { let leaving_participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) .one(&*tx) .await?; @@ -1247,7 +1247,7 @@ impl Database { project_collaborator::Column::ProjectId, QueryProjectIds::ProjectId, ) - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) .into_values::<_, QueryProjectIds>() .all(&*tx) .await?; @@ -1284,7 +1284,7 @@ impl Database { // Leave projects. project_collaborator::Entity::delete_many() - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) .exec(&*tx) .await?; @@ -1293,7 +1293,7 @@ impl Database { .filter( project::Column::RoomId .eq(room_id) - .and(project::Column::HostConnectionId.eq(connection_id.0)), + .and(project::Column::HostConnectionId.eq(connection_id.0 as i32)), ) .exec(&*tx) .await?; @@ -1351,11 +1351,9 @@ impl Database { } let result = room_participant::Entity::update_many() - .filter( - room_participant::Column::RoomId - .eq(room_id) - .and(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)), - ) + .filter(room_participant::Column::RoomId.eq(room_id).and( + room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32), + )) .set(room_participant::ActiveModel { location_kind: ActiveValue::set(Some(location_kind)), location_project_id: ActiveValue::set(location_project_id), @@ -1399,7 +1397,7 @@ impl Database { .all(&*tx) .await?; project_collaborator::Entity::delete_many() - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0)) + .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) .exec(&*tx) .await?; @@ -1581,7 +1579,7 @@ impl Database { ) -> Result> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) .one(&*tx) .await? .ok_or_else(|| anyhow!("could not find participant"))?; @@ -1667,7 +1665,7 @@ impl Database { ) -> Result)>> { self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) - .filter(project::Column::HostConnectionId.eq(connection_id.0)) + .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; @@ -1721,7 +1719,7 @@ impl Database { // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) - .filter(project::Column::HostConnectionId.eq(connection_id.0)) + .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; @@ -1904,7 +1902,7 @@ impl Database { ) -> Result> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0)) + .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) .one(&*tx) .await? .ok_or_else(|| anyhow!("must join a room first"))?; @@ -2041,7 +2039,7 @@ impl Database { .filter( project_collaborator::Column::ProjectId .eq(project_id) - .and(project_collaborator::Column::ConnectionId.eq(connection_id.0)), + .and(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)), ) .exec(&*tx) .await?;