Start reworking join_project to use the database

This commit is contained in:
Antonio Scandurra 2022-11-15 11:44:26 +01:00
parent 6cbf197226
commit be523617c9
3 changed files with 164 additions and 34 deletions

View file

@ -62,6 +62,9 @@ CREATE TABLE "worktrees" (
"id" INTEGER NOT NULL, "id" INTEGER NOT NULL,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"root_name" VARCHAR NOT NULL, "root_name" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INTEGER NOT NULL,
"is_complete" BOOL NOT NULL,
PRIMARY KEY(project_id, id) PRIMARY KEY(project_id, id)
); );
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id"); CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");

View file

@ -1,7 +1,7 @@
use crate::{Error, Result}; use crate::{Error, Result};
use anyhow::anyhow; use anyhow::anyhow;
use axum::http::StatusCode; use axum::http::StatusCode;
use collections::HashMap; use collections::{BTreeMap, HashMap, HashSet};
use futures::{future::BoxFuture, FutureExt, StreamExt}; use futures::{future::BoxFuture, FutureExt, StreamExt};
use rpc::{proto, ConnectionId}; use rpc::{proto, ConnectionId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -10,7 +10,11 @@ use sqlx::{
types::Uuid, types::Uuid,
FromRow, FromRow,
}; };
use std::{future::Future, path::Path, time::Duration}; use std::{
future::Future,
path::{Path, PathBuf},
time::Duration,
};
use time::{OffsetDateTime, PrimitiveDateTime}; use time::{OffsetDateTime, PrimitiveDateTime};
#[cfg(test)] #[cfg(test)]
@ -1404,13 +1408,26 @@ where
pub async fn share_project( pub async fn share_project(
&self, &self,
room_id: RoomId, expected_room_id: RoomId,
user_id: UserId,
connection_id: ConnectionId, connection_id: ConnectionId,
worktrees: &[proto::WorktreeMetadata], worktrees: &[proto::WorktreeMetadata],
) -> Result<(ProjectId, proto::Room)> { ) -> Result<(ProjectId, proto::Room)> {
self.transact(|mut tx| async move { self.transact(|mut tx| async move {
let project_id = sqlx::query_scalar( let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>(
"
SELECT room_id, user_id
FROM room_participants
WHERE answering_connection_id = $1
",
)
.bind(connection_id.0 as i32)
.fetch_one(&mut tx)
.await?;
if room_id != expected_room_id {
return Err(anyhow!("shared project on unexpected room"))?;
}
let project_id: ProjectId = sqlx::query_scalar(
" "
INSERT INTO projects (room_id, host_user_id, host_connection_id) INSERT INTO projects (room_id, host_user_id, host_connection_id)
VALUES ($1, $2, $3) VALUES ($1, $2, $3)
@ -1421,8 +1438,7 @@ where
.bind(user_id) .bind(user_id)
.bind(connection_id.0 as i32) .bind(connection_id.0 as i32)
.fetch_one(&mut tx) .fetch_one(&mut tx)
.await .await?;
.map(ProjectId)?;
for worktree in worktrees { for worktree in worktrees {
sqlx::query( sqlx::query(
@ -1536,6 +1552,111 @@ where
.await .await
} }
pub async fn join_project(
&self,
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<(Project, i32)> {
self.transact(|mut tx| async move {
let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>(
"
SELECT room_id, user_id
FROM room_participants
WHERE answering_connection_id = $1
",
)
.bind(connection_id.0 as i32)
.fetch_one(&mut tx)
.await?;
// Ensure project id was shared on this room.
sqlx::query(
"
SELECT 1
FROM projects
WHERE project_id = $1 AND room_id = $2
",
)
.bind(project_id)
.bind(room_id)
.fetch_one(&mut tx)
.await?;
let replica_ids = sqlx::query_scalar::<_, i32>(
"
SELECT replica_id
FROM project_collaborators
WHERE project_id = $1
",
)
.bind(project_id)
.fetch_all(&mut tx)
.await?;
let replica_ids = HashSet::from_iter(replica_ids);
let mut replica_id = 1;
while replica_ids.contains(&replica_id) {
replica_id += 1;
}
sqlx::query(
"
INSERT INTO project_collaborators (
project_id,
connection_id,
user_id,
replica_id,
is_host
)
VALUES ($1, $2, $3, $4, $5)
",
)
.bind(project_id)
.bind(connection_id.0 as i32)
.bind(user_id)
.bind(replica_id)
.bind(false)
.execute(&mut tx)
.await?;
tx.commit().await?;
todo!()
})
.await
// sqlx::query(
// "
// SELECT replica_id
// FROM project_collaborators
// WHERE project_id = $
// ",
// )
// .bind(project_id)
// .bind(connection_id.0 as i32)
// .bind(user_id)
// .bind(0)
// .bind(true)
// .execute(&mut tx)
// .await?;
// sqlx::query(
// "
// INSERT INTO project_collaborators (
// project_id,
// connection_id,
// user_id,
// replica_id,
// is_host
// )
// VALUES ($1, $2, $3, $4, $5)
// ",
// )
// .bind(project_id)
// .bind(connection_id.0 as i32)
// .bind(user_id)
// .bind(0)
// .bind(true)
// .execute(&mut tx)
// .await?;
}
pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> { pub async fn unshare_project(&self, project_id: ProjectId) -> Result<()> {
todo!() todo!()
// test_support!(self, { // test_support!(self, {
@ -1967,11 +2088,11 @@ pub struct Room {
} }
id_type!(ProjectId); id_type!(ProjectId);
#[derive(Clone, Debug, Default, FromRow, Serialize, PartialEq)]
pub struct Project { pub struct Project {
pub id: ProjectId, pub id: ProjectId,
pub host_user_id: UserId, pub collaborators: Vec<ProjectCollaborator>,
pub unregistered: bool, pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>,
} }
#[derive(Clone, Debug, Default, FromRow, PartialEq)] #[derive(Clone, Debug, Default, FromRow, PartialEq)]
@ -1983,6 +2104,17 @@ pub struct ProjectCollaborator {
pub is_host: bool, pub is_host: bool,
} }
#[derive(Default)]
pub struct Worktree {
pub abs_path: PathBuf,
pub root_name: String,
pub visible: bool,
pub entries: BTreeMap<u64, proto::Entry>,
pub diagnostic_summaries: BTreeMap<PathBuf, proto::DiagnosticSummary>,
pub scan_id: u64,
pub is_complete: bool,
}
pub struct LeftProject { pub struct LeftProject {
pub id: ProjectId, pub id: ProjectId,
pub host_user_id: UserId, pub host_user_id: UserId,

View file

@ -862,7 +862,6 @@ impl Server {
.db .db
.share_project( .share_project(
RoomId::from_proto(request.payload.room_id), RoomId::from_proto(request.payload.room_id),
request.sender_user_id,
request.sender_connection_id, request.sender_connection_id,
&request.payload.worktrees, &request.payload.worktrees,
) )
@ -942,15 +941,21 @@ impl Server {
tracing::info!(%project_id, %host_user_id, %host_connection_id, "join project"); tracing::info!(%project_id, %host_user_id, %host_connection_id, "join project");
let mut store = self.store().await; let (project, replica_id) = self
let (project, replica_id) = store.join_project(request.sender_connection_id, project_id)?; .app_state
let peer_count = project.guests.len(); .db
let mut collaborators = Vec::with_capacity(peer_count); .join_project(project_id, request.sender_connection_id)
collaborators.push(proto::Collaborator { .await?;
peer_id: project.host_connection_id.0,
replica_id: 0, let collaborators = project
user_id: project.host.user_id.to_proto(), .collaborators
}); .iter()
.map(|collaborator| proto::Collaborator {
peer_id: collaborator.connection_id as u32,
replica_id: collaborator.replica_id as u32,
user_id: collaborator.user_id.to_proto(),
})
.collect::<Vec<_>>();
let worktrees = project let worktrees = project
.worktrees .worktrees
.iter() .iter()
@ -962,22 +967,12 @@ impl Server {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Add all guests other than the requesting user's own connections as collaborators for collaborator in &project.collaborators {
for (guest_conn_id, guest) in &project.guests { let connection_id = ConnectionId(collaborator.connection_id as u32);
if request.sender_connection_id != *guest_conn_id { if connection_id != request.sender_connection_id {
collaborators.push(proto::Collaborator {
peer_id: guest_conn_id.0,
replica_id: guest.replica_id as u32,
user_id: guest.user_id.to_proto(),
});
}
}
for conn_id in project.connection_ids() {
if conn_id != request.sender_connection_id {
self.peer self.peer
.send( .send(
conn_id, connection_id,
proto::AddProjectCollaborator { proto::AddProjectCollaborator {
project_id: project_id.to_proto(), project_id: project_id.to_proto(),
collaborator: Some(proto::Collaborator { collaborator: Some(proto::Collaborator {