Fix regression in handling git FS events (#2670)

As part of an optimization in
https://github.com/zed-industries/zed/pull/2663, I changed the way that
the worktree ignores FS events within unloaded directories. But this
accidentally prevented us from detecting some events that occur inside
of `.git` directories.

In this PR, I've made further tweaks to which FS events we can ignore.
We now explicitly opt *in* to scanning `.git` (shallowly) directories
(even though they are ignored). Note that we still don't recursively
scan the git directory (including all of the files inside `objects`
etc). This seems like the correct amount of work to do, and from my
testing (and our unit tests that use the real FS and real git
repositories), it seems to work correctly.

Release Notes:

- Fixed a bug where Zed would not detect some git repository changes
(preview only).
This commit is contained in:
Max Brunsfeld 2023-06-30 11:40:49 -07:00 committed by GitHub
commit f83514cde4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 69 additions and 61 deletions

View file

@ -2175,6 +2175,7 @@ impl LocalSnapshot {
impl BackgroundScannerState {
fn should_scan_directory(&self, entry: &Entry) -> bool {
(!entry.is_external && !entry.is_ignored)
|| entry.path.file_name() == Some(&*DOT_GIT)
|| self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
|| self
.paths_to_scan
@ -2354,6 +2355,7 @@ impl BackgroundScannerState {
.entry_for_id(entry_id)
.map(|entry| RepositoryWorkDirectory(entry.path.clone())) else { continue };
log::info!("reload git repository {:?}", dot_git_dir);
let repository = repository.repo_ptr.lock();
let branch = repository.branch_name();
repository.reload_index();
@ -2394,6 +2396,8 @@ impl BackgroundScannerState {
}
fn build_repository(&mut self, dot_git_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
log::info!("build git repository {:?}", dot_git_path);
let work_dir_path: Arc<Path> = dot_git_path.parent().unwrap().into();
// Guard against repositories inside the repository metadata
@ -3173,8 +3177,6 @@ impl BackgroundScanner {
}
async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
log::debug!("received fs events {:?}", abs_paths);
let root_path = self.state.lock().snapshot.abs_path.clone();
let root_canonical_path = match self.fs.canonicalize(&root_path).await {
Ok(path) => path,
@ -3185,7 +3187,6 @@ impl BackgroundScanner {
};
let mut relative_paths = Vec::with_capacity(abs_paths.len());
let mut unloaded_relative_paths = Vec::new();
abs_paths.sort_unstable();
abs_paths.dedup_by(|a, b| a.starts_with(&b));
abs_paths.retain(|abs_path| {
@ -3208,7 +3209,6 @@ impl BackgroundScanner {
});
if !parent_dir_is_loaded {
log::debug!("ignoring event {relative_path:?} within unloaded directory");
unloaded_relative_paths.push(relative_path);
return false;
}
@ -3217,27 +3217,30 @@ impl BackgroundScanner {
}
});
if !relative_paths.is_empty() {
let (scan_job_tx, scan_job_rx) = channel::unbounded();
self.reload_entries_for_paths(
root_path,
root_canonical_path,
&relative_paths,
abs_paths,
Some(scan_job_tx.clone()),
)
.await;
drop(scan_job_tx);
self.scan_dirs(false, scan_job_rx).await;
let (scan_job_tx, scan_job_rx) = channel::unbounded();
self.update_ignore_statuses(scan_job_tx).await;
self.scan_dirs(false, scan_job_rx).await;
if relative_paths.is_empty() {
return;
}
log::debug!("received fs events {:?}", relative_paths);
let (scan_job_tx, scan_job_rx) = channel::unbounded();
self.reload_entries_for_paths(
root_path,
root_canonical_path,
&relative_paths,
abs_paths,
Some(scan_job_tx.clone()),
)
.await;
drop(scan_job_tx);
self.scan_dirs(false, scan_job_rx).await;
let (scan_job_tx, scan_job_rx) = channel::unbounded();
self.update_ignore_statuses(scan_job_tx).await;
self.scan_dirs(false, scan_job_rx).await;
{
let mut state = self.state.lock();
relative_paths.extend(unloaded_relative_paths);
state.reload_repositories(&relative_paths, self.fs.as_ref());
state.snapshot.completed_scan_id = state.snapshot.scan_id;
for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
@ -3645,23 +3648,28 @@ impl BackgroundScanner {
}
}
let fs_entry = state.insert_entry(fs_entry, self.fs.as_ref());
if let Some(scan_queue_tx) = &scan_queue_tx {
let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
ancestor_inodes.insert(metadata.inode);
smol::block_on(scan_queue_tx.send(ScanJob {
abs_path,
path: path.clone(),
ignore_stack,
ancestor_inodes,
is_external: fs_entry.is_external,
scan_queue: scan_queue_tx.clone(),
}))
.unwrap();
if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
if state.should_scan_directory(&fs_entry) {
let mut ancestor_inodes =
state.snapshot.ancestor_inodes_for_path(&path);
if !ancestor_inodes.contains(&metadata.inode) {
ancestor_inodes.insert(metadata.inode);
smol::block_on(scan_queue_tx.send(ScanJob {
abs_path,
path: path.clone(),
ignore_stack,
ancestor_inodes,
is_external: fs_entry.is_external,
scan_queue: scan_queue_tx.clone(),
}))
.unwrap();
}
} else {
fs_entry.kind = EntryKind::UnloadedDir;
}
}
state.insert_entry(fs_entry, self.fs.as_ref());
}
Ok(None) => {
self.remove_repo_path(&path, &mut state.snapshot);