Separate repository state synchronization from worktree synchronization (#27140)

This PR updates our DB schemas and wire protocol to separate the
synchronization of git statuses and other repository state from the
synchronization of worktrees. This paves the way for moving the code
that executes git status updates out of the `worktree` crate and onto
the new `GitStore`. That end goal is motivated by two (related) points:

- Disentangling git status updates from the worktree's
`BackgroundScanner` will allow us to implement a simpler concurrency
story for those updates, hopefully fixing some known but elusive bugs
(upstream state not updating after push; statuses getting out of sync in
remote projects).
- By moving git repository state to the project-scoped `GitStore`, we
can get rid of the duplication that currently happens when two worktrees
are associated with the same git repository.

Co-authored-by: Max <max@zed.dev>

Release Notes:

- N/A

---------

Co-authored-by: Max <max@zed.dev>
Co-authored-by: Max Brunsfeld <maxbrunsfeld@gmail.com>
This commit is contained in:
Cole Miller 2025-03-20 18:07:03 -04:00 committed by GitHub
parent 700af63c45
commit bc1c0a2297
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 1147 additions and 535 deletions

View file

@ -15,9 +15,13 @@ CREATE TABLE "users" (
"github_user_created_at" TIMESTAMP WITHOUT TIME ZONE,
"custom_llm_monthly_allowance_in_cents" INTEGER
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE UNIQUE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");
CREATE TABLE "access_tokens" (
@ -26,6 +30,7 @@ CREATE TABLE "access_tokens" (
"impersonated_user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");
CREATE TABLE "contacts" (
@ -36,7 +41,9 @@ CREATE TABLE "contacts" (
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");
CREATE TABLE "rooms" (
@ -45,6 +52,7 @@ CREATE TABLE "rooms" (
"environment" VARCHAR,
"channel_id" INTEGER REFERENCES channels (id) ON DELETE CASCADE
);
CREATE UNIQUE INDEX "index_rooms_on_channel_id" ON "rooms" ("channel_id");
CREATE TABLE "projects" (
@ -55,7 +63,9 @@ CREATE TABLE "projects" (
"host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"unregistered" BOOLEAN NOT NULL DEFAULT FALSE
);
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
CREATE TABLE "worktrees" (
@ -67,8 +77,9 @@ CREATE TABLE "worktrees" (
"scan_id" INTEGER NOT NULL,
"is_complete" BOOL NOT NULL DEFAULT FALSE,
"completed_scan_id" INTEGER NOT NULL,
PRIMARY KEY(project_id, id)
PRIMARY KEY (project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
@ -87,32 +98,33 @@ CREATE TABLE "worktree_entries" (
"is_deleted" BOOL NOT NULL,
"git_status" INTEGER,
"is_fifo" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
PRIMARY KEY (project_id, worktree_id, id),
FOREIGN KEY (project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_repositories" (
CREATE TABLE "project_repositories" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"work_directory_id" INTEGER NOT NULL,
"abs_path" VARCHAR,
"id" INTEGER NOT NULL,
"entry_ids" VARCHAR,
"legacy_worktree_id" INTEGER,
"branch" VARCHAR,
"scan_id" INTEGER NOT NULL,
"is_deleted" BOOL NOT NULL,
"current_merge_conflicts" VARCHAR,
"branch_summary" VARCHAR,
PRIMARY KEY(project_id, worktree_id, work_directory_id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
PRIMARY KEY (project_id, id)
);
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");
CREATE TABLE "worktree_repository_statuses" (
CREATE INDEX "index_project_repositories_on_project_id" ON "project_repositories" ("project_id");
CREATE TABLE "project_repository_statuses" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"repository_id" INTEGER NOT NULL,
"repo_path" VARCHAR NOT NULL,
"status" INT8 NOT NULL,
"status_kind" INT4 NOT NULL,
@ -120,13 +132,12 @@ CREATE TABLE "worktree_repository_statuses" (
"second_status" INT4 NULL,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
PRIMARY KEY (project_id, repository_id, repo_path)
);
CREATE INDEX "index_wt_repos_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id_and_wd_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");
CREATE INDEX "index_project_repos_statuses_on_project_id" ON "project_repository_statuses" ("project_id");
CREATE INDEX "index_project_repos_statuses_on_project_id_and_repo_id" ON "project_repository_statuses" ("project_id", "repository_id");
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
@ -134,10 +145,12 @@ CREATE TABLE "worktree_settings_files" (
"path" VARCHAR NOT NULL,
"content" TEXT,
"kind" VARCHAR,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
PRIMARY KEY (project_id, worktree_id, path),
FOREIGN KEY (project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_settings_files_on_project_id" ON "worktree_settings_files" ("project_id");
CREATE INDEX "index_worktree_settings_files_on_project_id_and_worktree_id" ON "worktree_settings_files" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
@ -147,18 +160,21 @@ CREATE TABLE "worktree_diagnostic_summaries" (
"language_server_id" INTEGER NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
PRIMARY KEY (project_id, worktree_id, path),
FOREIGN KEY (project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"id" INTEGER NOT NULL,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
PRIMARY KEY (project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
@ -170,11 +186,20 @@ CREATE TABLE "project_collaborators" (
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" (
"project_id",
"connection_id",
"connection_server_id"
);
CREATE TABLE "room_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
@ -193,12 +218,21 @@ CREATE TABLE "room_participants" (
"role" TEXT,
"in_call" BOOLEAN NOT NULL DEFAULT FALSE
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" (
"answering_connection_id",
"answering_connection_server_id"
);
CREATE TABLE "servers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
@ -214,9 +248,15 @@ CREATE TABLE "followers" (
"follower_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"follower_connection_id" INTEGER NOT NULL
);
CREATE UNIQUE INDEX
"index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id"
ON "followers" ("project_id", "leader_connection_server_id", "leader_connection_id", "follower_connection_server_id", "follower_connection_id");
CREATE UNIQUE INDEX "index_followers_on_project_id_and_leader_connection_server_id_and_leader_connection_id_and_follower_connection_server_id_and_follower_connection_id" ON "followers" (
"project_id",
"leader_connection_server_id",
"leader_connection_id",
"follower_connection_server_id",
"follower_connection_id"
);
CREATE INDEX "index_followers_on_room_id" ON "followers" ("room_id");
CREATE TABLE "channels" (
@ -237,6 +277,7 @@ CREATE TABLE IF NOT EXISTS "channel_chat_participants" (
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE
);
CREATE INDEX "index_channel_chat_participants_on_channel_id" ON "channel_chat_participants" ("channel_id");
CREATE TABLE IF NOT EXISTS "channel_messages" (
@ -249,7 +290,9 @@ CREATE TABLE IF NOT EXISTS "channel_messages" (
"nonce" BLOB NOT NULL,
"reply_to_message_id" INTEGER DEFAULT NULL
);
CREATE INDEX "index_channel_messages_on_channel_id" ON "channel_messages" ("channel_id");
CREATE UNIQUE INDEX "index_channel_messages_on_sender_id_nonce" ON "channel_messages" ("sender_id", "nonce");
CREATE TABLE "channel_message_mentions" (
@ -257,7 +300,7 @@ CREATE TABLE "channel_message_mentions" (
"start_offset" INTEGER NOT NULL,
"end_offset" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
PRIMARY KEY(message_id, start_offset)
PRIMARY KEY (message_id, start_offset)
);
CREATE TABLE "channel_members" (
@ -288,7 +331,7 @@ CREATE TABLE "buffer_operations" (
"replica_id" INTEGER NOT NULL,
"lamport_timestamp" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY(buffer_id, epoch, lamport_timestamp, replica_id)
PRIMARY KEY (buffer_id, epoch, lamport_timestamp, replica_id)
);
CREATE TABLE "buffer_snapshots" (
@ -296,7 +339,7 @@ CREATE TABLE "buffer_snapshots" (
"epoch" INTEGER NOT NULL,
"text" TEXT NOT NULL,
"operation_serialization_version" INTEGER NOT NULL,
PRIMARY KEY(buffer_id, epoch)
PRIMARY KEY (buffer_id, epoch)
);
CREATE TABLE "channel_buffer_collaborators" (
@ -310,11 +353,18 @@ CREATE TABLE "channel_buffer_collaborators" (
);
CREATE INDEX "index_channel_buffer_collaborators_on_channel_id" ON "channel_buffer_collaborators" ("channel_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" ("channel_id", "connection_id", "connection_server_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_and_replica_id" ON "channel_buffer_collaborators" ("channel_id", "replica_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_server_id" ON "channel_buffer_collaborators" ("connection_server_id");
CREATE INDEX "index_channel_buffer_collaborators_on_connection_id" ON "channel_buffer_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_channel_buffer_collaborators_on_channel_id_connection_id_and_server_id" ON "channel_buffer_collaborators" (
"channel_id",
"connection_id",
"connection_server_id"
);
CREATE TABLE "feature_flags" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
@ -324,7 +374,6 @@ CREATE TABLE "feature_flags" (
CREATE INDEX "index_feature_flags" ON "feature_flags" ("id");
CREATE TABLE "user_features" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
"feature_id" INTEGER NOT NULL REFERENCES feature_flags (id) ON DELETE CASCADE,
@ -332,9 +381,10 @@ CREATE TABLE "user_features" (
);
CREATE UNIQUE INDEX "index_user_features_user_id_and_feature_id" ON "user_features" ("user_id", "feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");
CREATE INDEX "index_user_features_on_user_id" ON "user_features" ("user_id");
CREATE INDEX "index_user_features_on_feature_id" ON "user_features" ("feature_id");
CREATE TABLE "observed_buffer_edits" (
"user_id" INTEGER NOT NULL REFERENCES users (id) ON DELETE CASCADE,
@ -374,13 +424,10 @@ CREATE TABLE "notifications" (
"response" BOOLEAN
);
CREATE INDEX
"index_notifications_on_recipient_id_is_read_kind_entity_id"
ON "notifications"
("recipient_id", "is_read", "kind", "entity_id");
CREATE INDEX "index_notifications_on_recipient_id_is_read_kind_entity_id" ON "notifications" ("recipient_id", "is_read", "kind", "entity_id");
CREATE TABLE contributors (
user_id INTEGER REFERENCES users(id),
user_id INTEGER REFERENCES users (id),
signed_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_id)
);
@ -394,7 +441,7 @@ CREATE TABLE extensions (
);
CREATE TABLE extension_versions (
extension_id INTEGER REFERENCES extensions(id),
extension_id INTEGER REFERENCES extensions (id),
version TEXT NOT NULL,
published_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
authors TEXT NOT NULL,
@ -416,6 +463,7 @@ CREATE TABLE extension_versions (
);
CREATE UNIQUE INDEX "index_extensions_external_id" ON "extensions" ("external_id");
CREATE INDEX "index_extensions_total_download_count" ON "extensions" ("total_download_count");
CREATE TABLE rate_buckets (
@ -424,14 +472,15 @@ CREATE TABLE rate_buckets (
token_count INT NOT NULL,
last_refill TIMESTAMP WITHOUT TIME ZONE NOT NULL,
PRIMARY KEY (user_id, rate_limit_name),
FOREIGN KEY (user_id) REFERENCES users(id)
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX idx_user_id_rate_limit ON rate_buckets (user_id, rate_limit_name);
CREATE TABLE IF NOT EXISTS billing_preferences (
id INTEGER PRIMARY KEY AUTOINCREMENT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
user_id INTEGER NOT NULL REFERENCES users(id),
user_id INTEGER NOT NULL REFERENCES users (id),
max_monthly_llm_usage_spending_in_cents INTEGER NOT NULL
);
@ -440,18 +489,19 @@ CREATE UNIQUE INDEX "uix_billing_preferences_on_user_id" ON billing_preferences
CREATE TABLE IF NOT EXISTS billing_customers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
user_id INTEGER NOT NULL REFERENCES users(id),
user_id INTEGER NOT NULL REFERENCES users (id),
has_overdue_invoices BOOLEAN NOT NULL DEFAULT FALSE,
stripe_customer_id TEXT NOT NULL
);
CREATE UNIQUE INDEX "uix_billing_customers_on_user_id" ON billing_customers (user_id);
CREATE UNIQUE INDEX "uix_billing_customers_on_stripe_customer_id" ON billing_customers (stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
billing_customer_id INTEGER NOT NULL REFERENCES billing_customers(id),
billing_customer_id INTEGER NOT NULL REFERENCES billing_customers (id),
stripe_subscription_id TEXT NOT NULL,
stripe_subscription_status TEXT NOT NULL,
stripe_cancel_at TIMESTAMP,
@ -459,6 +509,7 @@ CREATE TABLE IF NOT EXISTS billing_subscriptions (
);
CREATE INDEX "ix_billing_subscriptions_on_billing_customer_id" ON billing_subscriptions (billing_customer_id);
CREATE UNIQUE INDEX "uix_billing_subscriptions_on_stripe_subscription_id" ON billing_subscriptions (stripe_subscription_id);
CREATE TABLE IF NOT EXISTS processed_stripe_events (
@ -479,4 +530,5 @@ CREATE TABLE IF NOT EXISTS "breakpoints" (
"path" TEXT NOT NULL,
"kind" VARCHAR NOT NULL
);
CREATE INDEX "index_breakpoints_on_project_id" ON "breakpoints" ("project_id");

View file

@ -0,0 +1,32 @@
CREATE TABLE "project_repositories" (
"project_id" INTEGER NOT NULL,
"abs_path" VARCHAR,
"id" INT8 NOT NULL,
"legacy_worktree_id" INT8,
"entry_ids" VARCHAR,
"branch" VARCHAR,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
"current_merge_conflicts" VARCHAR,
"branch_summary" VARCHAR,
PRIMARY KEY (project_id, id)
);
CREATE INDEX "index_project_repositories_on_project_id" ON "project_repositories" ("project_id");
CREATE TABLE "project_repository_statuses" (
"project_id" INTEGER NOT NULL,
"repository_id" INT8 NOT NULL,
"repo_path" VARCHAR NOT NULL,
"status" INT8 NOT NULL,
"status_kind" INT4 NOT NULL,
"first_status" INT4 NULL,
"second_status" INT4 NULL,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY (project_id, repository_id, repo_path)
);
CREATE INDEX "index_project_repos_statuses_on_project_id" ON "project_repository_statuses" ("project_id");
CREATE INDEX "index_project_repos_statuses_on_project_id_and_repo_id" ON "project_repository_statuses" ("project_id", "repository_id");

View file

@ -9,6 +9,7 @@ use anyhow::anyhow;
use collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use dashmap::DashMap;
use futures::StreamExt;
use project_repository_statuses::StatusKind;
use rand::{prelude::StdRng, Rng, SeedableRng};
use rpc::ExtensionProvides;
use rpc::{
@ -36,7 +37,6 @@ use std::{
};
use time::PrimitiveDateTime;
use tokio::sync::{Mutex, OwnedMutexGuard};
use worktree_repository_statuses::StatusKind;
use worktree_settings_file::LocalSettingsKind;
#[cfg(test)]
@ -658,6 +658,8 @@ pub struct RejoinedProject {
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<RejoinedWorktree>,
pub updated_repositories: Vec<proto::UpdateRepository>,
pub removed_repositories: Vec<u64>,
pub language_servers: Vec<proto::LanguageServer>,
}
@ -726,6 +728,7 @@ pub struct Project {
pub role: ChannelRole,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: BTreeMap<u64, Worktree>,
pub repositories: Vec<proto::UpdateRepository>,
pub language_servers: Vec<proto::LanguageServer>,
}
@ -760,7 +763,7 @@ pub struct Worktree {
pub root_name: String,
pub visible: bool,
pub entries: Vec<proto::Entry>,
pub repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
pub legacy_repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
@ -810,7 +813,7 @@ impl LocalSettingsKind {
}
fn db_status_to_proto(
entry: worktree_repository_statuses::Model,
entry: project_repository_statuses::Model,
) -> anyhow::Result<proto::StatusEntry> {
use proto::git_file_status::{Tracked, Unmerged, Variant};

View file

@ -324,119 +324,135 @@ impl Database {
.await?;
}
if !update.updated_repositories.is_empty() {
worktree_repository::Entity::insert_many(update.updated_repositories.iter().map(
|repository| {
worktree_repository::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
work_directory_id: ActiveValue::set(
repository.work_directory_id as i64,
),
scan_id: ActiveValue::set(update.scan_id as i64),
branch: ActiveValue::set(repository.branch.clone()),
is_deleted: ActiveValue::set(false),
branch_summary: ActiveValue::Set(
repository
.branch_summary
.as_ref()
.map(|summary| serde_json::to_string(summary).unwrap()),
),
current_merge_conflicts: ActiveValue::Set(Some(
serde_json::to_string(&repository.current_merge_conflicts).unwrap(),
)),
}
},
))
.on_conflict(
OnConflict::columns([
worktree_repository::Column::ProjectId,
worktree_repository::Column::WorktreeId,
worktree_repository::Column::WorkDirectoryId,
])
.update_columns([
worktree_repository::Column::ScanId,
worktree_repository::Column::Branch,
worktree_repository::Column::BranchSummary,
worktree_repository::Column::CurrentMergeConflicts,
])
.to_owned(),
)
.exec(&*tx)
.await?;
// Backward-compatibility for old Zed clients.
//
// Remove this block when Zed 1.80 stable has been out for a week.
{
if !update.updated_repositories.is_empty() {
project_repository::Entity::insert_many(
update.updated_repositories.iter().map(|repository| {
project_repository::ActiveModel {
project_id: ActiveValue::set(project_id),
legacy_worktree_id: ActiveValue::set(Some(worktree_id)),
id: ActiveValue::set(repository.work_directory_id as i64),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
branch_summary: ActiveValue::Set(
repository
.branch_summary
.as_ref()
.map(|summary| serde_json::to_string(summary).unwrap()),
),
current_merge_conflicts: ActiveValue::Set(Some(
serde_json::to_string(&repository.current_merge_conflicts)
.unwrap(),
)),
let has_any_statuses = update
.updated_repositories
.iter()
.any(|repository| !repository.updated_statuses.is_empty());
if has_any_statuses {
worktree_repository_statuses::Entity::insert_many(
update.updated_repositories.iter().flat_map(
|repository: &proto::RepositoryEntry| {
repository.updated_statuses.iter().map(|status_entry| {
let (repo_path, status_kind, first_status, second_status) =
proto_status_to_db(status_entry.clone());
worktree_repository_statuses::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
work_directory_id: ActiveValue::set(
repository.work_directory_id as i64,
),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
repo_path: ActiveValue::set(repo_path),
status: ActiveValue::set(0),
status_kind: ActiveValue::set(status_kind),
first_status: ActiveValue::set(first_status),
second_status: ActiveValue::set(second_status),
}
})
},
),
// Old clients do not use abs path or entry ids.
abs_path: ActiveValue::set(String::new()),
entry_ids: ActiveValue::set("[]".into()),
}
}),
)
.on_conflict(
OnConflict::columns([
worktree_repository_statuses::Column::ProjectId,
worktree_repository_statuses::Column::WorktreeId,
worktree_repository_statuses::Column::WorkDirectoryId,
worktree_repository_statuses::Column::RepoPath,
project_repository::Column::ProjectId,
project_repository::Column::Id,
])
.update_columns([
worktree_repository_statuses::Column::ScanId,
worktree_repository_statuses::Column::StatusKind,
worktree_repository_statuses::Column::FirstStatus,
worktree_repository_statuses::Column::SecondStatus,
project_repository::Column::ScanId,
project_repository::Column::BranchSummary,
project_repository::Column::CurrentMergeConflicts,
])
.to_owned(),
)
.exec(&*tx)
.await?;
let has_any_statuses = update
.updated_repositories
.iter()
.any(|repository| !repository.updated_statuses.is_empty());
if has_any_statuses {
project_repository_statuses::Entity::insert_many(
update.updated_repositories.iter().flat_map(
|repository: &proto::RepositoryEntry| {
repository.updated_statuses.iter().map(|status_entry| {
let (repo_path, status_kind, first_status, second_status) =
proto_status_to_db(status_entry.clone());
project_repository_statuses::ActiveModel {
project_id: ActiveValue::set(project_id),
repository_id: ActiveValue::set(
repository.work_directory_id as i64,
),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
repo_path: ActiveValue::set(repo_path),
status: ActiveValue::set(0),
status_kind: ActiveValue::set(status_kind),
first_status: ActiveValue::set(first_status),
second_status: ActiveValue::set(second_status),
}
})
},
),
)
.on_conflict(
OnConflict::columns([
project_repository_statuses::Column::ProjectId,
project_repository_statuses::Column::RepositoryId,
project_repository_statuses::Column::RepoPath,
])
.update_columns([
project_repository_statuses::Column::ScanId,
project_repository_statuses::Column::StatusKind,
project_repository_statuses::Column::FirstStatus,
project_repository_statuses::Column::SecondStatus,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
for repo in &update.updated_repositories {
if !repo.removed_statuses.is_empty() {
project_repository_statuses::Entity::update_many()
.filter(
project_repository_statuses::Column::ProjectId
.eq(project_id)
.and(
project_repository_statuses::Column::RepositoryId
.eq(repo.work_directory_id),
)
.and(
project_repository_statuses::Column::RepoPath
.is_in(repo.removed_statuses.iter()),
),
)
.set(project_repository_statuses::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
}
}
let has_any_removed_statuses = update
.updated_repositories
.iter()
.any(|repository| !repository.removed_statuses.is_empty());
if has_any_removed_statuses {
worktree_repository_statuses::Entity::update_many()
if !update.removed_repositories.is_empty() {
project_repository::Entity::update_many()
.filter(
worktree_repository_statuses::Column::ProjectId
project_repository::Column::ProjectId
.eq(project_id)
.and(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree_id),
)
.and(
worktree_repository_statuses::Column::RepoPath.is_in(
update.updated_repositories.iter().flat_map(|repository| {
repository.removed_statuses.iter()
}),
),
),
.and(project_repository::Column::LegacyWorktreeId.eq(worktree_id))
.and(project_repository::Column::Id.is_in(
update.removed_repositories.iter().map(|id| *id as i64),
)),
)
.set(worktree_repository_statuses::ActiveModel {
.set(project_repository::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
@ -446,18 +462,109 @@ impl Database {
}
}
if !update.removed_repositories.is_empty() {
worktree_repository::Entity::update_many()
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn update_repository(
&self,
update: &proto::UpdateRepository,
_connection: ConnectionId,
) -> Result<TransactionGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let repository_id = update.id as i64;
self.project_transaction(project_id, |tx| async move {
project_repository::Entity::insert(project_repository::ActiveModel {
project_id: ActiveValue::set(project_id),
id: ActiveValue::set(repository_id),
legacy_worktree_id: ActiveValue::set(None),
abs_path: ActiveValue::set(update.abs_path.clone()),
entry_ids: ActiveValue::Set(serde_json::to_string(&update.entry_ids).unwrap()),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
branch_summary: ActiveValue::Set(
update
.branch_summary
.as_ref()
.map(|summary| serde_json::to_string(summary).unwrap()),
),
current_merge_conflicts: ActiveValue::Set(Some(
serde_json::to_string(&update.current_merge_conflicts).unwrap(),
)),
})
.on_conflict(
OnConflict::columns([
project_repository::Column::ProjectId,
project_repository::Column::Id,
])
.update_columns([
project_repository::Column::ScanId,
project_repository::Column::BranchSummary,
project_repository::Column::EntryIds,
project_repository::Column::AbsPath,
project_repository::Column::CurrentMergeConflicts,
])
.to_owned(),
)
.exec(&*tx)
.await?;
let has_any_statuses = !update.updated_statuses.is_empty();
if has_any_statuses {
project_repository_statuses::Entity::insert_many(
update.updated_statuses.iter().map(|status_entry| {
let (repo_path, status_kind, first_status, second_status) =
proto_status_to_db(status_entry.clone());
project_repository_statuses::ActiveModel {
project_id: ActiveValue::set(project_id),
repository_id: ActiveValue::set(repository_id),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
repo_path: ActiveValue::set(repo_path),
status: ActiveValue::set(0),
status_kind: ActiveValue::set(status_kind),
first_status: ActiveValue::set(first_status),
second_status: ActiveValue::set(second_status),
}
}),
)
.on_conflict(
OnConflict::columns([
project_repository_statuses::Column::ProjectId,
project_repository_statuses::Column::RepositoryId,
project_repository_statuses::Column::RepoPath,
])
.update_columns([
project_repository_statuses::Column::ScanId,
project_repository_statuses::Column::StatusKind,
project_repository_statuses::Column::FirstStatus,
project_repository_statuses::Column::SecondStatus,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
let has_any_removed_statuses = !update.removed_statuses.is_empty();
if has_any_removed_statuses {
project_repository_statuses::Entity::update_many()
.filter(
worktree_repository::Column::ProjectId
project_repository_statuses::Column::ProjectId
.eq(project_id)
.and(worktree_repository::Column::WorktreeId.eq(worktree_id))
.and(
worktree_repository::Column::WorkDirectoryId
.is_in(update.removed_repositories.iter().map(|id| *id as i64)),
project_repository_statuses::Column::RepositoryId.eq(repository_id),
)
.and(
project_repository_statuses::Column::RepoPath
.is_in(update.removed_statuses.iter()),
),
)
.set(worktree_repository::ActiveModel {
.set(project_repository_statuses::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
@ -472,6 +579,34 @@ impl Database {
.await
}
pub async fn remove_repository(
&self,
remove: &proto::RemoveRepository,
_connection: ConnectionId,
) -> Result<TransactionGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(remove.project_id);
let repository_id = remove.id as i64;
self.project_transaction(project_id, |tx| async move {
project_repository::Entity::update_many()
.filter(
project_repository::Column::ProjectId
.eq(project_id)
.and(project_repository::Column::Id.eq(repository_id)),
)
.set(project_repository::ActiveModel {
is_deleted: ActiveValue::Set(true),
// scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
/// Updates the diagnostic summary for the given connection.
pub async fn update_diagnostic_summary(
&self,
@ -703,11 +838,11 @@ impl Database {
root_name: db_worktree.root_name,
visible: db_worktree.visible,
entries: Default::default(),
repository_entries: Default::default(),
diagnostic_summaries: Default::default(),
settings_files: Default::default(),
scan_id: db_worktree.scan_id as u64,
completed_scan_id: db_worktree.completed_scan_id as u64,
legacy_repository_entries: Default::default(),
},
)
})
@ -750,65 +885,77 @@ impl Database {
}
// Populate repository entries.
let mut repositories = Vec::new();
{
let db_repository_entries = worktree_repository::Entity::find()
let db_repository_entries = project_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project.id))
.add(worktree_repository::Column::IsDeleted.eq(false)),
.add(project_repository::Column::ProjectId.eq(project.id))
.add(project_repository::Column::IsDeleted.eq(false)),
)
.all(tx)
.await?;
for db_repository_entry in db_repository_entries {
if let Some(worktree) = worktrees.get_mut(&(db_repository_entry.worktree_id as u64))
{
let mut repository_statuses = worktree_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(worktree_repository_statuses::Column::ProjectId.eq(project.id))
.add(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree.id),
)
.add(
worktree_repository_statuses::Column::WorkDirectoryId
.eq(db_repository_entry.work_directory_id),
)
.add(worktree_repository_statuses::Column::IsDeleted.eq(false)),
)
.stream(tx)
.await?;
let mut updated_statuses = Vec::new();
while let Some(status_entry) = repository_statuses.next().await {
let status_entry: worktree_repository_statuses::Model = status_entry?;
updated_statuses.push(db_status_to_proto(status_entry)?);
let mut repository_statuses = project_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(project_repository_statuses::Column::ProjectId.eq(project.id))
.add(
project_repository_statuses::Column::RepositoryId
.eq(db_repository_entry.id),
)
.add(project_repository_statuses::Column::IsDeleted.eq(false)),
)
.stream(tx)
.await?;
let mut updated_statuses = Vec::new();
while let Some(status_entry) = repository_statuses.next().await {
let status_entry = status_entry?;
updated_statuses.push(db_status_to_proto(status_entry)?);
}
let current_merge_conflicts = db_repository_entry
.current_merge_conflicts
.as_ref()
.map(|conflicts| serde_json::from_str(&conflicts))
.transpose()?
.unwrap_or_default();
let branch_summary = db_repository_entry
.branch_summary
.as_ref()
.map(|branch_summary| serde_json::from_str(&branch_summary))
.transpose()?
.unwrap_or_default();
let entry_ids = serde_json::from_str(&db_repository_entry.entry_ids)
.context("failed to deserialize repository's entry ids")?;
if let Some(worktree_id) = db_repository_entry.legacy_worktree_id {
if let Some(worktree) = worktrees.get_mut(&(worktree_id as u64)) {
worktree.legacy_repository_entries.insert(
db_repository_entry.id as u64,
proto::RepositoryEntry {
work_directory_id: db_repository_entry.id as u64,
updated_statuses,
removed_statuses: Vec::new(),
current_merge_conflicts,
branch_summary,
},
);
}
let current_merge_conflicts = db_repository_entry
.current_merge_conflicts
.as_ref()
.map(|conflicts| serde_json::from_str(&conflicts))
.transpose()?
.unwrap_or_default();
let branch_summary = db_repository_entry
.branch_summary
.as_ref()
.map(|branch_summary| serde_json::from_str(&branch_summary))
.transpose()?
.unwrap_or_default();
worktree.repository_entries.insert(
db_repository_entry.work_directory_id as u64,
proto::RepositoryEntry {
work_directory_id: db_repository_entry.work_directory_id as u64,
branch: db_repository_entry.branch,
updated_statuses,
removed_statuses: Vec::new(),
current_merge_conflicts,
branch_summary,
},
);
} else {
repositories.push(proto::UpdateRepository {
project_id: db_repository_entry.project_id.0 as u64,
id: db_repository_entry.id as u64,
abs_path: db_repository_entry.abs_path,
entry_ids,
updated_statuses,
removed_statuses: Vec::new(),
current_merge_conflicts,
branch_summary,
scan_id: db_repository_entry.scan_id as u64,
});
}
}
}
@ -871,6 +1018,7 @@ impl Database {
})
.collect(),
worktrees,
repositories,
language_servers: language_servers
.into_iter()
.map(|language_server| proto::LanguageServer {

View file

@ -1,3 +1,5 @@
use anyhow::Context as _;
use super::*;
impl Database {
@ -606,6 +608,11 @@ impl Database {
let mut worktrees = Vec::new();
let db_worktrees = project.find_related(worktree::Entity).all(tx).await?;
let db_repos = project
.find_related(project_repository::Entity)
.all(tx)
.await?;
for db_worktree in db_worktrees {
let mut worktree = RejoinedWorktree {
id: db_worktree.id as u64,
@ -673,96 +680,112 @@ impl Database {
}
}
// Repository Entries
{
let repository_entry_filter = if let Some(rejoined_worktree) = rejoined_worktree {
worktree_repository::Column::ScanId.gt(rejoined_worktree.scan_id)
worktrees.push(worktree);
}
let mut removed_repositories = Vec::new();
let mut updated_repositories = Vec::new();
for db_repo in db_repos {
let rejoined_repository = rejoined_project
.repositories
.iter()
.find(|repo| repo.id == db_repo.id as u64);
let repository_filter = if let Some(rejoined_repository) = rejoined_repository {
project_repository::Column::ScanId.gt(rejoined_repository.scan_id)
} else {
project_repository::Column::IsDeleted.eq(false)
};
let db_repositories = project_repository::Entity::find()
.filter(
Condition::all()
.add(project_repository::Column::ProjectId.eq(project.id))
.add(repository_filter),
)
.all(tx)
.await?;
for db_repository in db_repositories.into_iter() {
if db_repository.is_deleted {
removed_repositories.push(db_repository.id as u64);
} else {
worktree_repository::Column::IsDeleted.eq(false)
};
let db_repositories = worktree_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project.id))
.add(worktree_repository::Column::WorktreeId.eq(worktree.id))
.add(repository_entry_filter),
)
.all(tx)
.await?;
for db_repository in db_repositories.into_iter() {
if db_repository.is_deleted {
worktree
.removed_repositories
.push(db_repository.work_directory_id as u64);
let status_entry_filter = if let Some(rejoined_repository) = rejoined_repository
{
project_repository_statuses::Column::ScanId.gt(rejoined_repository.scan_id)
} else {
let status_entry_filter = if let Some(rejoined_worktree) = rejoined_worktree
{
worktree_repository_statuses::Column::ScanId
.gt(rejoined_worktree.scan_id)
project_repository_statuses::Column::IsDeleted.eq(false)
};
let mut db_statuses = project_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(project_repository_statuses::Column::ProjectId.eq(project.id))
.add(
project_repository_statuses::Column::RepositoryId
.eq(db_repository.id),
)
.add(status_entry_filter),
)
.stream(tx)
.await?;
let mut removed_statuses = Vec::new();
let mut updated_statuses = Vec::new();
while let Some(db_status) = db_statuses.next().await {
let db_status: project_repository_statuses::Model = db_status?;
if db_status.is_deleted {
removed_statuses.push(db_status.repo_path);
} else {
worktree_repository_statuses::Column::IsDeleted.eq(false)
};
let mut db_statuses = worktree_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(
worktree_repository_statuses::Column::ProjectId
.eq(project.id),
)
.add(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree.id),
)
.add(
worktree_repository_statuses::Column::WorkDirectoryId
.eq(db_repository.work_directory_id),
)
.add(status_entry_filter),
)
.stream(tx)
.await?;
let mut removed_statuses = Vec::new();
let mut updated_statuses = Vec::new();
while let Some(db_status) = db_statuses.next().await {
let db_status: worktree_repository_statuses::Model = db_status?;
if db_status.is_deleted {
removed_statuses.push(db_status.repo_path);
} else {
updated_statuses.push(db_status_to_proto(db_status)?);
}
updated_statuses.push(db_status_to_proto(db_status)?);
}
}
let current_merge_conflicts = db_repository
.current_merge_conflicts
.as_ref()
.map(|conflicts| serde_json::from_str(&conflicts))
.transpose()?
.unwrap_or_default();
let current_merge_conflicts = db_repository
.current_merge_conflicts
.as_ref()
.map(|conflicts| serde_json::from_str(&conflicts))
.transpose()?
.unwrap_or_default();
let branch_summary = db_repository
.branch_summary
.as_ref()
.map(|branch_summary| serde_json::from_str(&branch_summary))
.transpose()?
.unwrap_or_default();
let branch_summary = db_repository
.branch_summary
.as_ref()
.map(|branch_summary| serde_json::from_str(&branch_summary))
.transpose()?
.unwrap_or_default();
worktree.updated_repositories.push(proto::RepositoryEntry {
work_directory_id: db_repository.work_directory_id as u64,
branch: db_repository.branch,
let entry_ids = serde_json::from_str(&db_repository.entry_ids)
.context("failed to deserialize repository's entry ids")?;
if let Some(legacy_worktree_id) = db_repository.legacy_worktree_id {
if let Some(worktree) = worktrees
.iter_mut()
.find(|worktree| worktree.id as i64 == legacy_worktree_id)
{
worktree.updated_repositories.push(proto::RepositoryEntry {
work_directory_id: db_repository.id as u64,
updated_statuses,
removed_statuses,
current_merge_conflicts,
branch_summary,
});
}
} else {
updated_repositories.push(proto::UpdateRepository {
entry_ids,
updated_statuses,
removed_statuses,
current_merge_conflicts,
branch_summary,
project_id: project_id.to_proto(),
id: db_repository.id as u64,
abs_path: db_repository.abs_path,
scan_id: db_repository.scan_id as u64,
});
}
}
}
worktrees.push(worktree);
}
let language_servers = project
@ -832,6 +855,8 @@ impl Database {
id: project_id,
old_connection_id,
collaborators,
updated_repositories,
removed_repositories,
worktrees,
language_servers,
}))

View file

@ -26,6 +26,8 @@ pub mod observed_channel_messages;
pub mod processed_stripe_event;
pub mod project;
pub mod project_collaborator;
pub mod project_repository;
pub mod project_repository_statuses;
pub mod rate_buckets;
pub mod room;
pub mod room_participant;
@ -36,6 +38,4 @@ pub mod user_feature;
pub mod worktree;
pub mod worktree_diagnostic_summary;
pub mod worktree_entry;
pub mod worktree_repository;
pub mod worktree_repository_statuses;
pub mod worktree_settings_file;

View file

@ -45,6 +45,8 @@ pub enum Relation {
Room,
#[sea_orm(has_many = "super::worktree::Entity")]
Worktrees,
#[sea_orm(has_many = "super::project_repository::Entity")]
Repositories,
#[sea_orm(has_many = "super::project_collaborator::Entity")]
Collaborators,
#[sea_orm(has_many = "super::language_server::Entity")]
@ -69,6 +71,12 @@ impl Related<super::worktree::Entity> for Entity {
}
}
impl Related<super::project_repository::Entity> for Entity {
fn to() -> RelationDef {
Relation::Repositories.def()
}
}
impl Related<super::project_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::Collaborators.def()

View file

@ -2,16 +2,17 @@ use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_repositories")]
#[sea_orm(table_name = "project_repositories")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub work_directory_id: i64,
pub id: i64,
pub abs_path: String,
pub legacy_worktree_id: Option<i64>,
// JSON array containing 1 or more integer project entry ids
pub entry_ids: String,
pub scan_id: i64,
pub branch: Option<String>,
pub is_deleted: bool,
// JSON array typed string
pub current_merge_conflicts: Option<String>,
@ -20,6 +21,19 @@ pub struct Model {
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -2,14 +2,12 @@ use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_repository_statuses")]
#[sea_orm(table_name = "project_repository_statuses")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub work_directory_id: i64,
pub repository_id: i64,
#[sea_orm(primary_key)]
pub repo_path: String,
/// Old single-code status field, no longer used but kept here to mirror the DB schema.

View file

@ -37,6 +37,7 @@ use core::fmt::{self, Debug, Formatter};
use http_client::HttpClient;
use open_ai::{OpenAiEmbeddingModel, OPEN_AI_API_URL};
use reqwest_client::ReqwestClient;
use rpc::proto::split_repository_update;
use sha2::Digest;
use supermaven_api::{CreateExternalUserRequest, SupermavenAdminApi};
@ -291,6 +292,8 @@ impl Server {
.add_message_handler(leave_project)
.add_request_handler(update_project)
.add_request_handler(update_worktree)
.add_request_handler(update_repository)
.add_request_handler(remove_repository)
.add_message_handler(start_language_server)
.add_message_handler(update_language_server)
.add_message_handler(update_diagnostic_summary)
@ -1464,7 +1467,7 @@ fn notify_rejoined_projects(
removed_repositories: worktree.removed_repositories,
};
for update in proto::split_worktree_update(message) {
session.peer.send(session.connection_id, update.clone())?;
session.peer.send(session.connection_id, update)?;
}
// Stream this worktree's diagnostics.
@ -1493,21 +1496,23 @@ fn notify_rejoined_projects(
}
}
for language_server in &project.language_servers {
for repository in mem::take(&mut project.updated_repositories) {
for update in split_repository_update(repository) {
session.peer.send(session.connection_id, update)?;
}
}
for id in mem::take(&mut project.removed_repositories) {
session.peer.send(
session.connection_id,
proto::UpdateLanguageServer {
proto::RemoveRepository {
project_id: project.id.to_proto(),
language_server_id: language_server.id,
variant: Some(
proto::update_language_server::Variant::DiskBasedDiagnosticsUpdated(
proto::LspDiskBasedDiagnosticsUpdated {},
),
),
id,
},
)?;
}
}
Ok(())
}
@ -1893,7 +1898,7 @@ fn join_project_internal(
removed_entries: Default::default(),
scan_id: worktree.scan_id,
is_last_update: worktree.scan_id == worktree.completed_scan_id,
updated_repositories: worktree.repository_entries.into_values().collect(),
updated_repositories: worktree.legacy_repository_entries.into_values().collect(),
removed_repositories: Default::default(),
};
for update in proto::split_worktree_update(message) {
@ -1926,6 +1931,12 @@ fn join_project_internal(
}
}
for repository in mem::take(&mut project.repositories) {
for update in split_repository_update(repository) {
session.peer.send(session.connection_id, update)?;
}
}
for language_server in &project.language_servers {
session.peer.send(
session.connection_id,
@ -2018,6 +2029,54 @@ async fn update_worktree(
Ok(())
}
async fn update_repository(
request: proto::UpdateRepository,
response: Response<proto::UpdateRepository>,
session: Session,
) -> Result<()> {
let guest_connection_ids = session
.db()
.await
.update_repository(&request, session.connection_id)
.await?;
broadcast(
Some(session.connection_id),
guest_connection_ids.iter().copied(),
|connection_id| {
session
.peer
.forward_send(session.connection_id, connection_id, request.clone())
},
);
response.send(proto::Ack {})?;
Ok(())
}
async fn remove_repository(
request: proto::RemoveRepository,
response: Response<proto::RemoveRepository>,
session: Session,
) -> Result<()> {
let guest_connection_ids = session
.db()
.await
.remove_repository(&request, session.connection_id)
.await?;
broadcast(
Some(session.connection_id),
guest_connection_ids.iter().copied(),
|connection_id| {
session
.peer
.forward_send(session.connection_id, connection_id, request.clone())
},
);
response.send(proto::Ack {})?;
Ok(())
}
/// Updates other participants with changes to the diagnostics
async fn update_diagnostic_summary(
message: proto::UpdateDiagnosticSummary,

View file

@ -2847,7 +2847,7 @@ async fn test_git_diff_base_change(
});
}
#[gpui::test]
#[gpui::test(iterations = 10)]
async fn test_git_branch_name(
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,