Finish porting remaining db methods to sea-orm

This commit is contained in:
Antonio Scandurra 2022-12-02 13:58:23 +01:00
parent 585ac3e1be
commit dec5f37e4e
10 changed files with 399 additions and 418 deletions

View file

@ -72,6 +72,7 @@ CREATE TABLE "worktree_entries" (
PRIMARY KEY(project_id, worktree_id, id), PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
); );
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" ( CREATE TABLE "worktree_diagnostic_summaries" (
@ -84,6 +85,7 @@ CREATE TABLE "worktree_diagnostic_summaries" (
PRIMARY KEY(project_id, worktree_id, path), PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
); );
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" ( CREATE TABLE "language_servers" (

View file

@ -22,18 +22,19 @@ CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" ( CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL, "project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL, "worktree_id" INT8 NOT NULL,
"id" INTEGER NOT NULL, "id" INTEGER NOT NULL,
"is_dir" BOOL NOT NULL, "is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL, "path" VARCHAR NOT NULL,
"inode" INT8 NOT NULL, "inode" INT8 NOT NULL,
"mtime_seconds" INTEGER NOT NULL, "mtime_seconds" INT8 NOT NULL,
"mtime_nanos" INTEGER NOT NULL, "mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL, "is_symlink" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL, "is_ignored" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id), PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
); );
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id"); CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" ( CREATE TABLE "worktree_diagnostic_summaries" (
@ -46,6 +47,7 @@ CREATE TABLE "worktree_diagnostic_summaries" (
PRIMARY KEY(project_id, worktree_id, path), PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
); );
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id"); CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" ( CREATE TABLE "language_servers" (

View file

@ -1,5 +1,6 @@
mod access_token; mod access_token;
mod contact; mod contact;
mod language_server;
mod project; mod project;
mod project_collaborator; mod project_collaborator;
mod room; mod room;
@ -9,6 +10,7 @@ mod signup;
mod tests; mod tests;
mod user; mod user;
mod worktree; mod worktree;
mod worktree_diagnostic_summary;
mod worktree_entry; mod worktree_entry;
use crate::{Error, Result}; use crate::{Error, Result};
@ -1493,7 +1495,7 @@ impl Database {
.await?; .await?;
worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel {
id: ActiveValue::set(WorktreeId(worktree.id as u32)), id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id), project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()), abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()), root_name: ActiveValue::set(worktree.root_name.clone()),
@ -1563,7 +1565,7 @@ impl Database {
.ok_or_else(|| anyhow!("no such project"))?; .ok_or_else(|| anyhow!("no such project"))?;
worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel { worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel {
id: ActiveValue::set(WorktreeId(worktree.id as u32)), id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id), project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()), abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()), root_name: ActiveValue::set(worktree.root_name.clone()),
@ -1576,11 +1578,8 @@ impl Database {
worktree::Entity::delete_many() worktree::Entity::delete_many()
.filter( .filter(
worktree::Column::ProjectId.eq(project.id).and( worktree::Column::ProjectId.eq(project.id).and(
worktree::Column::Id.is_not_in( worktree::Column::Id
worktrees .is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)),
.iter()
.map(|worktree| WorktreeId(worktree.id as u32)),
),
), ),
) )
.exec(&tx) .exec(&tx)
@ -1601,7 +1600,7 @@ impl Database {
) -> Result<RoomGuard<Vec<ConnectionId>>> { ) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.transact(|tx| async move { self.transact(|tx| async move {
let project_id = ProjectId::from_proto(update.project_id); let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = WorktreeId::from_proto(update.worktree_id); let worktree_id = update.worktree_id as i64;
// Ensure the update comes from the host. // Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id) let project = project::Entity::find_by_id(project_id)
@ -1609,13 +1608,14 @@ impl Database {
.one(&tx) .one(&tx)
.await? .await?
.ok_or_else(|| anyhow!("no such project"))?; .ok_or_else(|| anyhow!("no such project"))?;
let room_id = project.room_id;
// Update metadata. // Update metadata.
worktree::Entity::update(worktree::ActiveModel { worktree::Entity::update(worktree::ActiveModel {
id: ActiveValue::set(worktree_id), id: ActiveValue::set(worktree_id),
project_id: ActiveValue::set(project_id), project_id: ActiveValue::set(project_id),
root_name: ActiveValue::set(update.root_name.clone()), root_name: ActiveValue::set(update.root_name.clone()),
scan_id: ActiveValue::set(update.scan_id as u32), scan_id: ActiveValue::set(update.scan_id as i64),
is_complete: ActiveValue::set(update.is_last_update), is_complete: ActiveValue::set(update.is_last_update),
abs_path: ActiveValue::set(update.abs_path.clone()), abs_path: ActiveValue::set(update.abs_path.clone()),
..Default::default() ..Default::default()
@ -1623,76 +1623,57 @@ impl Database {
.exec(&tx) .exec(&tx)
.await?; .await?;
// if !update.updated_entries.is_empty() { worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| {
// let mut params = let mtime = entry.mtime.clone().unwrap_or_default();
// "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?),".repeat(update.updated_entries.len()); worktree_entry::ActiveModel {
// params.pop(); project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
id: ActiveValue::set(entry.id as i64),
is_dir: ActiveValue::set(entry.is_dir),
path: ActiveValue::set(entry.path.clone()),
inode: ActiveValue::set(entry.inode as i64),
mtime_seconds: ActiveValue::set(mtime.seconds as i64),
mtime_nanos: ActiveValue::set(mtime.nanos),
is_symlink: ActiveValue::set(entry.is_symlink),
is_ignored: ActiveValue::set(entry.is_ignored),
}
}))
.on_conflict(
OnConflict::columns([
worktree_entry::Column::ProjectId,
worktree_entry::Column::WorktreeId,
worktree_entry::Column::Id,
])
.update_columns([
worktree_entry::Column::IsDir,
worktree_entry::Column::Path,
worktree_entry::Column::Inode,
worktree_entry::Column::MtimeSeconds,
worktree_entry::Column::MtimeNanos,
worktree_entry::Column::IsSymlink,
worktree_entry::Column::IsIgnored,
])
.to_owned(),
)
.exec(&tx)
.await?;
// let query = format!( worktree_entry::Entity::delete_many()
// " .filter(
// INSERT INTO worktree_entries ( worktree_entry::Column::ProjectId
// project_id, .eq(project_id)
// worktree_id, .and(worktree_entry::Column::WorktreeId.eq(worktree_id))
// id, .and(
// is_dir, worktree_entry::Column::Id
// path, .is_in(update.removed_entries.iter().map(|id| *id as i64)),
// inode, ),
// mtime_seconds, )
// mtime_nanos, .exec(&tx)
// is_symlink, .await?;
// is_ignored
// )
// VALUES {params}
// ON CONFLICT (project_id, worktree_id, id) DO UPDATE SET
// is_dir = excluded.is_dir,
// path = excluded.path,
// inode = excluded.inode,
// mtime_seconds = excluded.mtime_seconds,
// mtime_nanos = excluded.mtime_nanos,
// is_symlink = excluded.is_symlink,
// is_ignored = excluded.is_ignored
// "
// );
// let mut query = sqlx::query(&query);
// for entry in &update.updated_entries {
// let mtime = entry.mtime.clone().unwrap_or_default();
// query = query
// .bind(project_id)
// .bind(worktree_id)
// .bind(entry.id as i64)
// .bind(entry.is_dir)
// .bind(&entry.path)
// .bind(entry.inode as i64)
// .bind(mtime.seconds as i64)
// .bind(mtime.nanos as i32)
// .bind(entry.is_symlink)
// .bind(entry.is_ignored);
// }
// query.execute(&mut tx).await?;
// }
// if !update.removed_entries.is_empty() { let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
// let mut params = "?,".repeat(update.removed_entries.len()); self.commit_room_transaction(room_id, tx, connection_ids)
// params.pop(); .await
// let query = format!(
// "
// DELETE FROM worktree_entries
// WHERE project_id = ? AND worktree_id = ? AND id IN ({params})
// "
// );
// let mut query = sqlx::query(&query).bind(project_id).bind(worktree_id);
// for entry_id in &update.removed_entries {
// query = query.bind(*entry_id as i64);
// }
// query.execute(&mut tx).await?;
// }
// let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?;
// self.commit_room_transaction(room_id, tx, connection_ids)
// .await
todo!()
}) })
.await .await
} }
@ -1703,57 +1684,51 @@ impl Database {
connection_id: ConnectionId, connection_id: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> { ) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.transact(|tx| async { self.transact(|tx| async {
todo!() let project_id = ProjectId::from_proto(update.project_id);
// let project_id = ProjectId::from_proto(update.project_id); let worktree_id = update.worktree_id as i64;
// let worktree_id = WorktreeId::from_proto(update.worktree_id); let summary = update
// let summary = update .summary
// .summary .as_ref()
// .as_ref() .ok_or_else(|| anyhow!("invalid summary"))?;
// .ok_or_else(|| anyhow!("invalid summary"))?;
// // Ensure the update comes from the host. // Ensure the update comes from the host.
// let room_id: RoomId = sqlx::query_scalar( let project = project::Entity::find_by_id(project_id)
// " .one(&tx)
// SELECT room_id .await?
// FROM projects .ok_or_else(|| anyhow!("no such project"))?;
// WHERE id = $1 AND host_connection_id = $2 if project.host_connection_id != connection_id.0 {
// ", return Err(anyhow!("can't update a project hosted by someone else"))?;
// ) }
// .bind(project_id)
// .bind(connection_id.0 as i32)
// .fetch_one(&mut tx)
// .await?;
// // Update summary. // Update summary.
// sqlx::query( worktree_diagnostic_summary::Entity::insert(worktree_diagnostic_summary::ActiveModel {
// " project_id: ActiveValue::set(project_id),
// INSERT INTO worktree_diagnostic_summaries ( worktree_id: ActiveValue::set(worktree_id),
// project_id, path: ActiveValue::set(summary.path.clone()),
// worktree_id, language_server_id: ActiveValue::set(summary.language_server_id as i64),
// path, error_count: ActiveValue::set(summary.error_count),
// language_server_id, warning_count: ActiveValue::set(summary.warning_count),
// error_count, ..Default::default()
// warning_count })
// ) .on_conflict(
// VALUES ($1, $2, $3, $4, $5, $6) OnConflict::columns([
// ON CONFLICT (project_id, worktree_id, path) DO UPDATE SET worktree_diagnostic_summary::Column::ProjectId,
// language_server_id = excluded.language_server_id, worktree_diagnostic_summary::Column::WorktreeId,
// error_count = excluded.error_count, worktree_diagnostic_summary::Column::Path,
// warning_count = excluded.warning_count ])
// ", .update_columns([
// ) worktree_diagnostic_summary::Column::LanguageServerId,
// .bind(project_id) worktree_diagnostic_summary::Column::ErrorCount,
// .bind(worktree_id) worktree_diagnostic_summary::Column::WarningCount,
// .bind(&summary.path) ])
// .bind(summary.language_server_id as i64) .to_owned(),
// .bind(summary.error_count as i32) )
// .bind(summary.warning_count as i32) .exec(&tx)
// .execute(&mut tx) .await?;
// .await?;
// let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
// self.commit_room_transaction(room_id, tx, connection_ids) self.commit_room_transaction(project.room_id, tx, connection_ids)
// .await .await
}) })
.await .await
} }
@ -1764,44 +1739,42 @@ impl Database {
connection_id: ConnectionId, connection_id: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> { ) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.transact(|tx| async { self.transact(|tx| async {
todo!() let project_id = ProjectId::from_proto(update.project_id);
// let project_id = ProjectId::from_proto(update.project_id); let server = update
// let server = update .server
// .server .as_ref()
// .as_ref() .ok_or_else(|| anyhow!("invalid language server"))?;
// .ok_or_else(|| anyhow!("invalid language server"))?;
// // Ensure the update comes from the host. // Ensure the update comes from the host.
// let room_id: RoomId = sqlx::query_scalar( let project = project::Entity::find_by_id(project_id)
// " .one(&tx)
// SELECT room_id .await?
// FROM projects .ok_or_else(|| anyhow!("no such project"))?;
// WHERE id = $1 AND host_connection_id = $2 if project.host_connection_id != connection_id.0 {
// ", return Err(anyhow!("can't update a project hosted by someone else"))?;
// ) }
// .bind(project_id)
// .bind(connection_id.0 as i32)
// .fetch_one(&mut tx)
// .await?;
// // Add the newly-started language server. // Add the newly-started language server.
// sqlx::query( language_server::Entity::insert(language_server::ActiveModel {
// " project_id: ActiveValue::set(project_id),
// INSERT INTO language_servers (project_id, id, name) id: ActiveValue::set(server.id as i64),
// VALUES ($1, $2, $3) name: ActiveValue::set(server.name.clone()),
// ON CONFLICT (project_id, id) DO UPDATE SET ..Default::default()
// name = excluded.name })
// ", .on_conflict(
// ) OnConflict::columns([
// .bind(project_id) language_server::Column::ProjectId,
// .bind(server.id as i64) language_server::Column::Id,
// .bind(&server.name) ])
// .execute(&mut tx) .update_column(language_server::Column::Name)
// .await?; .to_owned(),
)
.exec(&tx)
.await?;
// let connection_ids = self.get_guest_connection_ids(project_id, &mut tx).await?; let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
// self.commit_room_transaction(room_id, tx, connection_ids) self.commit_room_transaction(project.room_id, tx, connection_ids)
// .await .await
}) })
.await .await
} }
@ -1812,194 +1785,135 @@ impl Database {
connection_id: ConnectionId, connection_id: ConnectionId,
) -> Result<RoomGuard<(Project, ReplicaId)>> { ) -> Result<RoomGuard<(Project, ReplicaId)>> {
self.transact(|tx| async move { self.transact(|tx| async move {
todo!() let participant = room_participant::Entity::find()
// let (room_id, user_id) = sqlx::query_as::<_, (RoomId, UserId)>( .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0))
// " .one(&tx)
// SELECT room_id, user_id .await?
// FROM room_participants .ok_or_else(|| anyhow!("must join a room first"))?;
// WHERE answering_connection_id = $1
// ",
// )
// .bind(connection_id.0 as i32)
// .fetch_one(&mut tx)
// .await?;
// // Ensure project id was shared on this room. let project = project::Entity::find_by_id(project_id)
// sqlx::query( .one(&tx)
// " .await?
// SELECT 1 .ok_or_else(|| anyhow!("no such project"))?;
// FROM projects if project.room_id != participant.room_id {
// WHERE id = $1 AND room_id = $2 return Err(anyhow!("no such project"))?;
// ", }
// )
// .bind(project_id)
// .bind(room_id)
// .fetch_one(&mut tx)
// .await?;
// let mut collaborators = sqlx::query_as::<_, ProjectCollaborator>( let mut collaborators = project
// " .find_related(project_collaborator::Entity)
// SELECT * .all(&tx)
// FROM project_collaborators .await?;
// WHERE project_id = $1 let replica_ids = collaborators
// ", .iter()
// ) .map(|c| c.replica_id)
// .bind(project_id) .collect::<HashSet<_>>();
// .fetch_all(&mut tx) let mut replica_id = ReplicaId(1);
// .await?; while replica_ids.contains(&replica_id) {
// let replica_ids = collaborators replica_id.0 += 1;
// .iter() }
// .map(|c| c.replica_id) let new_collaborator = project_collaborator::ActiveModel {
// .collect::<HashSet<_>>(); project_id: ActiveValue::set(project_id),
// let mut replica_id = ReplicaId(1); connection_id: ActiveValue::set(connection_id.0),
// while replica_ids.contains(&replica_id) { user_id: ActiveValue::set(participant.user_id),
// replica_id.0 += 1; replica_id: ActiveValue::set(replica_id),
// } is_host: ActiveValue::set(false),
// let new_collaborator = ProjectCollaborator { ..Default::default()
// project_id, }
// connection_id: connection_id.0 as i32, .insert(&tx)
// user_id, .await?;
// replica_id, collaborators.push(new_collaborator);
// is_host: false,
// };
// sqlx::query( let db_worktrees = project.find_related(worktree::Entity).all(&tx).await?;
// " let mut worktrees = db_worktrees
// INSERT INTO project_collaborators ( .into_iter()
// project_id, .map(|db_worktree| {
// connection_id, (
// user_id, db_worktree.id as u64,
// replica_id, Worktree {
// is_host id: db_worktree.id as u64,
// ) abs_path: db_worktree.abs_path,
// VALUES ($1, $2, $3, $4, $5) root_name: db_worktree.root_name,
// ", visible: db_worktree.visible,
// ) entries: Default::default(),
// .bind(new_collaborator.project_id) diagnostic_summaries: Default::default(),
// .bind(new_collaborator.connection_id) scan_id: db_worktree.scan_id as u64,
// .bind(new_collaborator.user_id) is_complete: db_worktree.is_complete,
// .bind(new_collaborator.replica_id) },
// .bind(new_collaborator.is_host) )
// .execute(&mut tx) })
// .await?; .collect::<BTreeMap<_, _>>();
// collaborators.push(new_collaborator);
// let worktree_rows = sqlx::query_as::<_, WorktreeRow>( // Populate worktree entries.
// " {
// SELECT * let mut db_entries = worktree_entry::Entity::find()
// FROM worktrees .filter(worktree_entry::Column::ProjectId.eq(project_id))
// WHERE project_id = $1 .stream(&tx)
// ", .await?;
// ) while let Some(db_entry) = db_entries.next().await {
// .bind(project_id) let db_entry = db_entry?;
// .fetch_all(&mut tx) if let Some(worktree) = worktrees.get_mut(&(db_entry.worktree_id as u64)) {
// .await?; worktree.entries.push(proto::Entry {
// let mut worktrees = worktree_rows id: db_entry.id as u64,
// .into_iter() is_dir: db_entry.is_dir,
// .map(|worktree_row| { path: db_entry.path,
// ( inode: db_entry.inode as u64,
// worktree_row.id, mtime: Some(proto::Timestamp {
// Worktree { seconds: db_entry.mtime_seconds as u64,
// id: worktree_row.id, nanos: db_entry.mtime_nanos,
// abs_path: worktree_row.abs_path, }),
// root_name: worktree_row.root_name, is_symlink: db_entry.is_symlink,
// visible: worktree_row.visible, is_ignored: db_entry.is_ignored,
// entries: Default::default(), });
// diagnostic_summaries: Default::default(), }
// scan_id: worktree_row.scan_id as u64, }
// is_complete: worktree_row.is_complete, }
// },
// )
// })
// .collect::<BTreeMap<_, _>>();
// // Populate worktree entries. // Populate worktree diagnostic summaries.
// { {
// let mut entries = sqlx::query_as::<_, WorktreeEntry>( let mut db_summaries = worktree_diagnostic_summary::Entity::find()
// " .filter(worktree_diagnostic_summary::Column::ProjectId.eq(project_id))
// SELECT * .stream(&tx)
// FROM worktree_entries .await?;
// WHERE project_id = $1 while let Some(db_summary) = db_summaries.next().await {
// ", let db_summary = db_summary?;
// ) if let Some(worktree) = worktrees.get_mut(&(db_summary.worktree_id as u64)) {
// .bind(project_id) worktree
// .fetch(&mut tx); .diagnostic_summaries
// while let Some(entry) = entries.next().await { .push(proto::DiagnosticSummary {
// let entry = entry?; path: db_summary.path,
// if let Some(worktree) = worktrees.get_mut(&entry.worktree_id) { language_server_id: db_summary.language_server_id as u64,
// worktree.entries.push(proto::Entry { error_count: db_summary.error_count as u32,
// id: entry.id as u64, warning_count: db_summary.warning_count as u32,
// is_dir: entry.is_dir, });
// path: entry.path, }
// inode: entry.inode as u64, }
// mtime: Some(proto::Timestamp { }
// seconds: entry.mtime_seconds as u64,
// nanos: entry.mtime_nanos as u32,
// }),
// is_symlink: entry.is_symlink,
// is_ignored: entry.is_ignored,
// });
// }
// }
// }
// // Populate worktree diagnostic summaries. // Populate language servers.
// { let language_servers = project
// let mut summaries = sqlx::query_as::<_, WorktreeDiagnosticSummary>( .find_related(language_server::Entity)
// " .all(&tx)
// SELECT * .await?;
// FROM worktree_diagnostic_summaries
// WHERE project_id = $1
// ",
// )
// .bind(project_id)
// .fetch(&mut tx);
// while let Some(summary) = summaries.next().await {
// let summary = summary?;
// if let Some(worktree) = worktrees.get_mut(&summary.worktree_id) {
// worktree
// .diagnostic_summaries
// .push(proto::DiagnosticSummary {
// path: summary.path,
// language_server_id: summary.language_server_id as u64,
// error_count: summary.error_count as u32,
// warning_count: summary.warning_count as u32,
// });
// }
// }
// }
// // Populate language servers. self.commit_room_transaction(
// let language_servers = sqlx::query_as::<_, LanguageServer>( project.room_id,
// " tx,
// SELECT * (
// FROM language_servers Project {
// WHERE project_id = $1 collaborators,
// ", worktrees,
// ) language_servers: language_servers
// .bind(project_id) .into_iter()
// .fetch_all(&mut tx) .map(|language_server| proto::LanguageServer {
// .await?; id: language_server.id as u64,
name: language_server.name,
// self.commit_room_transaction( })
// room_id, .collect(),
// tx, },
// ( replica_id as ReplicaId,
// Project { ),
// collaborators, )
// worktrees, .await
// language_servers: language_servers
// .into_iter()
// .map(|language_server| proto::LanguageServer {
// id: language_server.id.to_proto(),
// name: language_server.name,
// })
// .collect(),
// },
// replica_id as ReplicaId,
// ),
// )
// .await
}) })
.await .await
} }
@ -2010,59 +1924,42 @@ impl Database {
connection_id: ConnectionId, connection_id: ConnectionId,
) -> Result<RoomGuard<LeftProject>> { ) -> Result<RoomGuard<LeftProject>> {
self.transact(|tx| async move { self.transact(|tx| async move {
todo!() let result = project_collaborator::Entity::delete_many()
// let result = sqlx::query( .filter(
// " project_collaborator::Column::ProjectId
// DELETE FROM project_collaborators .eq(project_id)
// WHERE project_id = $1 AND connection_id = $2 .and(project_collaborator::Column::ConnectionId.eq(connection_id.0)),
// ", )
// ) .exec(&tx)
// .bind(project_id) .await?;
// .bind(connection_id.0 as i32) if result.rows_affected == 0 {
// .execute(&mut tx) Err(anyhow!("not a collaborator on this project"))?;
// .await?; }
// if result.rows_affected() == 0 { let project = project::Entity::find_by_id(project_id)
// Err(anyhow!("not a collaborator on this project"))?; .one(&tx)
// } .await?
.ok_or_else(|| anyhow!("no such project"))?;
let collaborators = project
.find_related(project_collaborator::Entity)
.all(&tx)
.await?;
let connection_ids = collaborators
.into_iter()
.map(|collaborator| ConnectionId(collaborator.connection_id))
.collect();
// let connection_ids = sqlx::query_scalar::<_, i32>( self.commit_room_transaction(
// " project.room_id,
// SELECT connection_id tx,
// FROM project_collaborators LeftProject {
// WHERE project_id = $1 id: project_id,
// ", host_user_id: project.host_user_id,
// ) host_connection_id: ConnectionId(project.host_connection_id),
// .bind(project_id) connection_ids,
// .fetch_all(&mut tx) },
// .await? )
// .into_iter() .await
// .map(|id| ConnectionId(id as u32))
// .collect();
// let (room_id, host_user_id, host_connection_id) =
// sqlx::query_as::<_, (RoomId, i32, i32)>(
// "
// SELECT room_id, host_user_id, host_connection_id
// FROM projects
// WHERE id = $1
// ",
// )
// .bind(project_id)
// .fetch_one(&mut tx)
// .await?;
// self.commit_room_transaction(
// room_id,
// tx,
// LeftProject {
// id: project_id,
// host_user_id: UserId(host_user_id),
// host_connection_id: ConnectionId(host_connection_id as u32),
// connection_ids,
// },
// )
// .await
}) })
.await .await
} }
@ -2442,8 +2339,6 @@ id_type!(ProjectCollaboratorId);
id_type!(ReplicaId); id_type!(ReplicaId);
id_type!(SignupId); id_type!(SignupId);
id_type!(UserId); id_type!(UserId);
id_type!(WorktreeId);
id_type!(WorktreeEntryId);
pub struct LeftRoom { pub struct LeftRoom {
pub room: proto::Room, pub room: proto::Room,
@ -2453,7 +2348,7 @@ pub struct LeftRoom {
pub struct Project { pub struct Project {
pub collaborators: Vec<project_collaborator::Model>, pub collaborators: Vec<project_collaborator::Model>,
pub worktrees: BTreeMap<WorktreeId, Worktree>, pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>, pub language_servers: Vec<proto::LanguageServer>,
} }
@ -2465,7 +2360,7 @@ pub struct LeftProject {
} }
pub struct Worktree { pub struct Worktree {
pub id: WorktreeId, pub id: u64,
pub abs_path: String, pub abs_path: String,
pub root_name: String, pub root_name: String,
pub visible: bool, pub visible: bool,

View file

@ -0,0 +1,30 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "language_servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub id: i64,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -26,7 +26,11 @@ pub enum Relation {
)] )]
Room, Room,
#[sea_orm(has_many = "super::worktree::Entity")] #[sea_orm(has_many = "super::worktree::Entity")]
Worktree, Worktrees,
#[sea_orm(has_many = "super::project_collaborator::Entity")]
Collaborators,
#[sea_orm(has_many = "super::language_server::Entity")]
LanguageServers,
} }
impl Related<super::user::Entity> for Entity { impl Related<super::user::Entity> for Entity {
@ -43,7 +47,19 @@ impl Related<super::room::Entity> for Entity {
impl Related<super::worktree::Entity> for Entity { impl Related<super::worktree::Entity> for Entity {
fn to() -> RelationDef { fn to() -> RelationDef {
Relation::Worktree.def() Relation::Worktrees.def()
}
}
impl Related<super::project_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::Collaborators.def()
}
}
impl Related<super::language_server::Entity> for Entity {
fn to() -> RelationDef {
Relation::LanguageServers.def()
} }
} }

View file

@ -14,6 +14,19 @@ pub struct Model {
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {} pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {} impl ActiveModelBehavior for ActiveModel {}

View file

@ -1,17 +1,17 @@
use super::{ProjectId, WorktreeId}; use super::ProjectId;
use sea_orm::entity::prelude::*; use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktrees")] #[sea_orm(table_name = "worktrees")]
pub struct Model { pub struct Model {
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
pub id: WorktreeId, pub id: i64,
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
pub project_id: ProjectId, pub project_id: ProjectId,
pub abs_path: String, pub abs_path: String,
pub root_name: String, pub root_name: String,
pub visible: bool, pub visible: bool,
pub scan_id: u32, pub scan_id: i64,
pub is_complete: bool, pub is_complete: bool,
} }

View file

@ -0,0 +1,21 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_diagnostic_summaries")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub path: String,
pub language_server_id: i64,
pub error_count: u32,
pub warning_count: u32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -1,23 +1,25 @@
use super::{ProjectId, WorktreeEntryId, WorktreeId}; use super::ProjectId;
use sea_orm::entity::prelude::*; use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_entries")] #[sea_orm(table_name = "worktree_entries")]
pub struct Model { pub struct Model {
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
project_id: ProjectId, pub project_id: ProjectId,
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
worktree_id: WorktreeId, pub worktree_id: i64,
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
id: WorktreeEntryId, pub id: i64,
is_dir: bool, pub is_dir: bool,
path: String, pub path: String,
inode: u64, pub inode: i64,
mtime_seconds: u64, pub mtime_seconds: i64,
mtime_nanos: u32, pub mtime_nanos: u32,
is_symlink: bool, pub is_symlink: bool,
is_ignored: bool, pub is_ignored: bool,
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {} pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -1019,7 +1019,7 @@ async fn join_project(
.worktrees .worktrees
.iter() .iter()
.map(|(id, worktree)| proto::WorktreeMetadata { .map(|(id, worktree)| proto::WorktreeMetadata {
id: id.to_proto(), id: *id,
root_name: worktree.root_name.clone(), root_name: worktree.root_name.clone(),
visible: worktree.visible, visible: worktree.visible,
abs_path: worktree.abs_path.clone(), abs_path: worktree.abs_path.clone(),
@ -1060,7 +1060,7 @@ async fn join_project(
// Stream this worktree's entries. // Stream this worktree's entries.
let message = proto::UpdateWorktree { let message = proto::UpdateWorktree {
project_id: project_id.to_proto(), project_id: project_id.to_proto(),
worktree_id: worktree_id.to_proto(), worktree_id,
abs_path: worktree.abs_path.clone(), abs_path: worktree.abs_path.clone(),
root_name: worktree.root_name, root_name: worktree.root_name,
updated_entries: worktree.entries, updated_entries: worktree.entries,
@ -1078,7 +1078,7 @@ async fn join_project(
session.connection_id, session.connection_id,
proto::UpdateDiagnosticSummary { proto::UpdateDiagnosticSummary {
project_id: project_id.to_proto(), project_id: project_id.to_proto(),
worktree_id: worktree.id.to_proto(), worktree_id: worktree.id,
summary: Some(summary), summary: Some(summary),
}, },
)?; )?;