Move git status out of Entry (#22224)

- [x] Rewrite worktree git handling
- [x] Fix tests
- [x] Fix `test_propagate_statuses_for_repos_under_project`
- [x] Replace `WorkDirectoryEntry` with `WorkDirectory` in
`RepositoryEntry`
- [x] Add a worktree event for capturing git status changes
- [x] Confirm that the local repositories are correctly updating the new
WorkDirectory field
- [x] Implement the git statuses query as a join when pulling entries
out of worktree
- [x] Use this new join to implement the project panel and outline
panel.
- [x] Synchronize git statuses over the wire for collab and remote dev
(use the existing `worktree_repository_statuses` table, adjust as
needed)
- [x] Only send changed statuses to collab

Release Notes:

- N/A

---------

Co-authored-by: Cole Miller <cole@zed.dev>
Co-authored-by: Mikayla <mikayla@zed.com>
Co-authored-by: Nathan <nathan@zed.dev>
This commit is contained in:
Mikayla Maki 2025-01-03 17:00:16 -08:00 committed by GitHub
parent 72057e5716
commit 9613084f59
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
57 changed files with 2824 additions and 1254 deletions

View file

@ -106,6 +106,22 @@ CREATE TABLE "worktree_repositories" (
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");
CREATE TABLE "worktree_repository_statuses" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"work_directory_id" INT8 NOT NULL,
"repo_path" VARCHAR NOT NULL,
"status" INT8 NOT NULL,
"scan_id" INT8 NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_wt_repos_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id_and_wd_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");
CREATE TABLE "worktree_settings_files" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,

View file

@ -1,4 +1,5 @@
use anyhow::Context as _;
use util::ResultExt;
use super::*;
@ -274,8 +275,8 @@ impl Database {
mtime_nanos: ActiveValue::set(mtime.nanos as i32),
canonical_path: ActiveValue::set(entry.canonical_path.clone()),
is_ignored: ActiveValue::set(entry.is_ignored),
git_status: ActiveValue::set(None),
is_external: ActiveValue::set(entry.is_external),
git_status: ActiveValue::set(entry.git_status.map(|status| status as i64)),
is_deleted: ActiveValue::set(false),
scan_id: ActiveValue::set(update.scan_id as i64),
is_fifo: ActiveValue::set(entry.is_fifo),
@ -295,7 +296,6 @@ impl Database {
worktree_entry::Column::MtimeNanos,
worktree_entry::Column::CanonicalPath,
worktree_entry::Column::IsIgnored,
worktree_entry::Column::GitStatus,
worktree_entry::Column::ScanId,
])
.to_owned(),
@ -349,6 +349,79 @@ impl Database {
)
.exec(&*tx)
.await?;
let has_any_statuses = update
.updated_repositories
.iter()
.any(|repository| !repository.updated_statuses.is_empty());
if has_any_statuses {
worktree_repository_statuses::Entity::insert_many(
update.updated_repositories.iter().flat_map(
|repository: &proto::RepositoryEntry| {
repository.updated_statuses.iter().map(|status_entry| {
worktree_repository_statuses::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
work_directory_id: ActiveValue::set(
repository.work_directory_id as i64,
),
scan_id: ActiveValue::set(update.scan_id as i64),
is_deleted: ActiveValue::set(false),
repo_path: ActiveValue::set(status_entry.repo_path.clone()),
status: ActiveValue::set(status_entry.status as i64),
}
})
},
),
)
.on_conflict(
OnConflict::columns([
worktree_repository_statuses::Column::ProjectId,
worktree_repository_statuses::Column::WorktreeId,
worktree_repository_statuses::Column::WorkDirectoryId,
worktree_repository_statuses::Column::RepoPath,
])
.update_columns([
worktree_repository_statuses::Column::ScanId,
worktree_repository_statuses::Column::Status,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
let has_any_removed_statuses = update
.updated_repositories
.iter()
.any(|repository| !repository.removed_statuses.is_empty());
if has_any_removed_statuses {
worktree_repository_statuses::Entity::update_many()
.filter(
worktree_repository_statuses::Column::ProjectId
.eq(project_id)
.and(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree_id),
)
.and(
worktree_repository_statuses::Column::RepoPath.is_in(
update.updated_repositories.iter().flat_map(|repository| {
repository.removed_statuses.iter()
}),
),
),
)
.set(worktree_repository_statuses::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
}
if !update.removed_repositories.is_empty() {
@ -643,7 +716,6 @@ impl Database {
canonical_path: db_entry.canonical_path,
is_ignored: db_entry.is_ignored,
is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
// This is only used in the summarization backlog, so if it's None,
// that just means we won't be able to detect when to resummarize
// based on total number of backlogged bytes - instead, we'd go
@ -657,23 +729,49 @@ impl Database {
// Populate repository entries.
{
let mut db_repository_entries = worktree_repository::Entity::find()
let db_repository_entries = worktree_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project.id))
.add(worktree_repository::Column::IsDeleted.eq(false)),
)
.stream(tx)
.all(tx)
.await?;
while let Some(db_repository_entry) = db_repository_entries.next().await {
let db_repository_entry = db_repository_entry?;
for db_repository_entry in db_repository_entries {
if let Some(worktree) = worktrees.get_mut(&(db_repository_entry.worktree_id as u64))
{
let mut repository_statuses = worktree_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(worktree_repository_statuses::Column::ProjectId.eq(project.id))
.add(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree.id),
)
.add(
worktree_repository_statuses::Column::WorkDirectoryId
.eq(db_repository_entry.work_directory_id),
)
.add(worktree_repository_statuses::Column::IsDeleted.eq(false)),
)
.stream(tx)
.await?;
let mut updated_statuses = Vec::new();
while let Some(status_entry) = repository_statuses.next().await {
let status_entry: worktree_repository_statuses::Model = status_entry?;
updated_statuses.push(proto::StatusEntry {
repo_path: status_entry.repo_path,
status: status_entry.status as i32,
});
}
worktree.repository_entries.insert(
db_repository_entry.work_directory_id as u64,
proto::RepositoryEntry {
work_directory_id: db_repository_entry.work_directory_id as u64,
branch: db_repository_entry.branch,
updated_statuses,
removed_statuses: Vec::new(),
},
);
}

View file

@ -662,7 +662,6 @@ impl Database {
canonical_path: db_entry.canonical_path,
is_ignored: db_entry.is_ignored,
is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
// This is only used in the summarization backlog, so if it's None,
// that just means we won't be able to detect when to resummarize
// based on total number of backlogged bytes - instead, we'd go
@ -682,26 +681,69 @@ impl Database {
worktree_repository::Column::IsDeleted.eq(false)
};
let mut db_repositories = worktree_repository::Entity::find()
let db_repositories = worktree_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project.id))
.add(worktree_repository::Column::WorktreeId.eq(worktree.id))
.add(repository_entry_filter),
)
.stream(tx)
.all(tx)
.await?;
while let Some(db_repository) = db_repositories.next().await {
let db_repository = db_repository?;
for db_repository in db_repositories.into_iter() {
if db_repository.is_deleted {
worktree
.removed_repositories
.push(db_repository.work_directory_id as u64);
} else {
let status_entry_filter = if let Some(rejoined_worktree) = rejoined_worktree
{
worktree_repository_statuses::Column::ScanId
.gt(rejoined_worktree.scan_id)
} else {
worktree_repository_statuses::Column::IsDeleted.eq(false)
};
let mut db_statuses = worktree_repository_statuses::Entity::find()
.filter(
Condition::all()
.add(
worktree_repository_statuses::Column::ProjectId
.eq(project.id),
)
.add(
worktree_repository_statuses::Column::WorktreeId
.eq(worktree.id),
)
.add(
worktree_repository_statuses::Column::WorkDirectoryId
.eq(db_repository.work_directory_id),
)
.add(status_entry_filter),
)
.stream(tx)
.await?;
let mut removed_statuses = Vec::new();
let mut updated_statuses = Vec::new();
while let Some(db_status) = db_statuses.next().await {
let db_status: worktree_repository_statuses::Model = db_status?;
if db_status.is_deleted {
removed_statuses.push(db_status.repo_path);
} else {
updated_statuses.push(proto::StatusEntry {
repo_path: db_status.repo_path,
status: db_status.status as i32,
});
}
}
worktree.updated_repositories.push(proto::RepositoryEntry {
work_directory_id: db_repository.work_directory_id as u64,
branch: db_repository.branch,
updated_statuses,
removed_statuses,
});
}
}

View file

@ -2925,8 +2925,6 @@ async fn test_git_status_sync(
assert_eq!(snapshot.status_for_file(file), status);
}
// Smoke test status reading
project_local.read_with(cx_a, |project, cx| {
assert_status(&Path::new(A_TXT), Some(GitFileStatus::Added), project, cx);
assert_status(&Path::new(B_TXT), Some(GitFileStatus::Added), project, cx);
@ -6669,6 +6667,10 @@ async fn test_remote_git_branches(
client_a
.fs()
.insert_branches(Path::new("/project/.git"), &branches);
let branches_set = branches
.into_iter()
.map(ToString::to_string)
.collect::<HashSet<_>>();
let (project_a, worktree_id) = client_a.build_local_project("/project", cx_a).await;
let project_id = active_call_a
@ -6690,10 +6692,10 @@ async fn test_remote_git_branches(
let branches_b = branches_b
.into_iter()
.map(|branch| branch.name)
.collect::<Vec<_>>();
.map(|branch| branch.name.to_string())
.collect::<HashSet<_>>();
assert_eq!(&branches_b, &branches);
assert_eq!(branches_b, branches_set);
cx_b.update(|cx| {
project_b.update(cx, |project, cx| {

View file

@ -229,6 +229,10 @@ async fn test_ssh_collaboration_git_branches(
.await;
let branches = ["main", "dev", "feature-1"];
let branches_set = branches
.iter()
.map(ToString::to_string)
.collect::<HashSet<_>>();
remote_fs.insert_branches(Path::new("/project/.git"), &branches);
// User A connects to the remote project via SSH.
@ -281,10 +285,10 @@ async fn test_ssh_collaboration_git_branches(
let branches_b = branches_b
.into_iter()
.map(|branch| branch.name)
.collect::<Vec<_>>();
.map(|branch| branch.name.to_string())
.collect::<HashSet<_>>();
assert_eq!(&branches_b, &branches);
assert_eq!(&branches_b, &branches_set);
cx_b.update(|cx| {
project_b.update(cx, |project, cx| {