Add fs::MTime newtype to encourage != instead of > (#20830)

See ["mtime comparison considered
harmful"](https://apenwarr.ca/log/20181113) for details of why
comparators other than equality/inequality should not be used with
mtime.

Release Notes:

- N/A
This commit is contained in:
Michael Sloan 2024-11-21 19:21:18 -07:00 committed by GitHub
parent 477c6e6833
commit 14ea4621ab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 155 additions and 112 deletions

3
Cargo.lock generated
View file

@ -3731,6 +3731,7 @@ dependencies = [
"emojis", "emojis",
"env_logger 0.11.5", "env_logger 0.11.5",
"file_icons", "file_icons",
"fs",
"futures 0.3.31", "futures 0.3.31",
"fuzzy", "fuzzy",
"git", "git",
@ -4621,6 +4622,7 @@ dependencies = [
"objc", "objc",
"parking_lot", "parking_lot",
"paths", "paths",
"proto",
"rope", "rope",
"serde", "serde",
"serde_json", "serde_json",
@ -6487,6 +6489,7 @@ dependencies = [
"ctor", "ctor",
"ec4rs", "ec4rs",
"env_logger 0.11.5", "env_logger 0.11.5",
"fs",
"futures 0.3.31", "futures 0.3.31",
"fuzzy", "fuzzy",
"git", "git",

View file

@ -770,7 +770,7 @@ impl ContextStore {
contexts.push(SavedContextMetadata { contexts.push(SavedContextMetadata {
title: title.to_string(), title: title.to_string(),
path, path,
mtime: metadata.mtime.into(), mtime: metadata.mtime.timestamp_for_user().into(),
}); });
} }
} }

View file

@ -1231,7 +1231,7 @@ mod tests {
fn disk_state(&self) -> language::DiskState { fn disk_state(&self) -> language::DiskState {
language::DiskState::Present { language::DiskState::Present {
mtime: std::time::UNIX_EPOCH, mtime: ::fs::MTime::from_seconds_and_nanos(100, 42),
} }
} }

View file

@ -42,6 +42,7 @@ emojis.workspace = true
file_icons.workspace = true file_icons.workspace = true
futures.workspace = true futures.workspace = true
fuzzy.workspace = true fuzzy.workspace = true
fs.workspace = true
git.workspace = true git.workspace = true
gpui.workspace = true gpui.workspace = true
http_client.workspace = true http_client.workspace = true

View file

@ -1618,15 +1618,14 @@ fn path_for_file<'a>(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::editor_tests::init_test; use crate::editor_tests::init_test;
use fs::Fs;
use super::*; use super::*;
use fs::MTime;
use gpui::{AppContext, VisualTestContext}; use gpui::{AppContext, VisualTestContext};
use language::{LanguageMatcher, TestFile}; use language::{LanguageMatcher, TestFile};
use project::FakeFs; use project::FakeFs;
use std::{ use std::path::{Path, PathBuf};
path::{Path, PathBuf},
time::SystemTime,
};
#[gpui::test] #[gpui::test]
fn test_path_for_file(cx: &mut AppContext) { fn test_path_for_file(cx: &mut AppContext) {
@ -1679,9 +1678,7 @@ mod tests {
async fn test_deserialize(cx: &mut gpui::TestAppContext) { async fn test_deserialize(cx: &mut gpui::TestAppContext) {
init_test(cx, |_| {}); init_test(cx, |_| {});
let now = SystemTime::now();
let fs = FakeFs::new(cx.executor()); let fs = FakeFs::new(cx.executor());
fs.set_next_mtime(now);
fs.insert_file("/file.rs", Default::default()).await; fs.insert_file("/file.rs", Default::default()).await;
// Test case 1: Deserialize with path and contents // Test case 1: Deserialize with path and contents
@ -1690,12 +1687,18 @@ mod tests {
let (workspace, cx) = cx.add_window_view(|cx| Workspace::test_new(project.clone(), cx)); let (workspace, cx) = cx.add_window_view(|cx| Workspace::test_new(project.clone(), cx));
let workspace_id = workspace::WORKSPACE_DB.next_id().await.unwrap(); let workspace_id = workspace::WORKSPACE_DB.next_id().await.unwrap();
let item_id = 1234 as ItemId; let item_id = 1234 as ItemId;
let mtime = fs
.metadata(Path::new("/file.rs"))
.await
.unwrap()
.unwrap()
.mtime;
let serialized_editor = SerializedEditor { let serialized_editor = SerializedEditor {
abs_path: Some(PathBuf::from("/file.rs")), abs_path: Some(PathBuf::from("/file.rs")),
contents: Some("fn main() {}".to_string()), contents: Some("fn main() {}".to_string()),
language: Some("Rust".to_string()), language: Some("Rust".to_string()),
mtime: Some(now), mtime: Some(mtime),
}; };
DB.save_serialized_editor(item_id, workspace_id, serialized_editor.clone()) DB.save_serialized_editor(item_id, workspace_id, serialized_editor.clone())
@ -1792,9 +1795,7 @@ mod tests {
let workspace_id = workspace::WORKSPACE_DB.next_id().await.unwrap(); let workspace_id = workspace::WORKSPACE_DB.next_id().await.unwrap();
let item_id = 9345 as ItemId; let item_id = 9345 as ItemId;
let old_mtime = now let old_mtime = MTime::from_seconds_and_nanos(0, 50);
.checked_sub(std::time::Duration::from_secs(60 * 60 * 24))
.unwrap();
let serialized_editor = SerializedEditor { let serialized_editor = SerializedEditor {
abs_path: Some(PathBuf::from("/file.rs")), abs_path: Some(PathBuf::from("/file.rs")),
contents: Some("fn main() {}".to_string()), contents: Some("fn main() {}".to_string()),

View file

@ -1,8 +1,8 @@
use anyhow::Result; use anyhow::Result;
use db::sqlez::bindable::{Bind, Column, StaticColumnCount}; use db::sqlez::bindable::{Bind, Column, StaticColumnCount};
use db::sqlez::statement::Statement; use db::sqlez::statement::Statement;
use fs::MTime;
use std::path::PathBuf; use std::path::PathBuf;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use db::sqlez_macros::sql; use db::sqlez_macros::sql;
use db::{define_connection, query}; use db::{define_connection, query};
@ -14,7 +14,7 @@ pub(crate) struct SerializedEditor {
pub(crate) abs_path: Option<PathBuf>, pub(crate) abs_path: Option<PathBuf>,
pub(crate) contents: Option<String>, pub(crate) contents: Option<String>,
pub(crate) language: Option<String>, pub(crate) language: Option<String>,
pub(crate) mtime: Option<SystemTime>, pub(crate) mtime: Option<MTime>,
} }
impl StaticColumnCount for SerializedEditor { impl StaticColumnCount for SerializedEditor {
@ -29,16 +29,13 @@ impl Bind for SerializedEditor {
let start_index = statement.bind(&self.contents, start_index)?; let start_index = statement.bind(&self.contents, start_index)?;
let start_index = statement.bind(&self.language, start_index)?; let start_index = statement.bind(&self.language, start_index)?;
let mtime = self.mtime.and_then(|mtime| { let start_index = match self
mtime .mtime
.duration_since(UNIX_EPOCH) .and_then(|mtime| mtime.to_seconds_and_nanos_for_persistence())
.ok() {
.map(|duration| (duration.as_secs() as i64, duration.subsec_nanos() as i32))
});
let start_index = match mtime {
Some((seconds, nanos)) => { Some((seconds, nanos)) => {
let start_index = statement.bind(&seconds, start_index)?; let start_index = statement.bind(&(seconds as i64), start_index)?;
statement.bind(&nanos, start_index)? statement.bind(&(nanos as i32), start_index)?
} }
None => { None => {
let start_index = statement.bind::<Option<i64>>(&None, start_index)?; let start_index = statement.bind::<Option<i64>>(&None, start_index)?;
@ -64,7 +61,7 @@ impl Column for SerializedEditor {
let mtime = mtime_seconds let mtime = mtime_seconds
.zip(mtime_nanos) .zip(mtime_nanos)
.map(|(seconds, nanos)| UNIX_EPOCH + Duration::new(seconds as u64, nanos as u32)); .map(|(seconds, nanos)| MTime::from_seconds_and_nanos(seconds as u64, nanos as u32));
let editor = Self { let editor = Self {
abs_path, abs_path,
@ -280,12 +277,11 @@ mod tests {
assert_eq!(have, serialized_editor); assert_eq!(have, serialized_editor);
// Storing and retrieving mtime // Storing and retrieving mtime
let now = SystemTime::now();
let serialized_editor = SerializedEditor { let serialized_editor = SerializedEditor {
abs_path: None, abs_path: None,
contents: None, contents: None,
language: None, language: None,
mtime: Some(now), mtime: Some(MTime::from_seconds_and_nanos(100, 42)),
}; };
DB.save_serialized_editor(1234, workspace_id, serialized_editor.clone()) DB.save_serialized_editor(1234, workspace_id, serialized_editor.clone())

View file

@ -345,7 +345,10 @@ impl ExtensionStore {
if let (Ok(Some(index_metadata)), Ok(Some(extensions_metadata))) = if let (Ok(Some(index_metadata)), Ok(Some(extensions_metadata))) =
(index_metadata, extensions_metadata) (index_metadata, extensions_metadata)
{ {
if index_metadata.mtime > extensions_metadata.mtime { if index_metadata
.mtime
.bad_is_greater_than(extensions_metadata.mtime)
{
extension_index_needs_rebuild = false; extension_index_needs_rebuild = false;
} }
} }

View file

@ -24,6 +24,7 @@ libc.workspace = true
parking_lot.workspace = true parking_lot.workspace = true
paths.workspace = true paths.workspace = true
rope.workspace = true rope.workspace = true
proto.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
smol.workspace = true smol.workspace = true

View file

@ -27,13 +27,14 @@ use futures::{future::BoxFuture, AsyncRead, Stream, StreamExt};
use git::repository::{GitRepository, RealGitRepository}; use git::repository::{GitRepository, RealGitRepository};
use gpui::{AppContext, Global, ReadGlobal}; use gpui::{AppContext, Global, ReadGlobal};
use rope::Rope; use rope::Rope;
use serde::{Deserialize, Serialize};
use smol::io::AsyncWriteExt; use smol::io::AsyncWriteExt;
use std::{ use std::{
io::{self, Write}, io::{self, Write},
path::{Component, Path, PathBuf}, path::{Component, Path, PathBuf},
pin::Pin, pin::Pin,
sync::Arc, sync::Arc,
time::{Duration, SystemTime}, time::{Duration, SystemTime, UNIX_EPOCH},
}; };
use tempfile::{NamedTempFile, TempDir}; use tempfile::{NamedTempFile, TempDir};
use text::LineEnding; use text::LineEnding;
@ -179,13 +180,62 @@ pub struct RemoveOptions {
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct Metadata { pub struct Metadata {
pub inode: u64, pub inode: u64,
pub mtime: SystemTime, pub mtime: MTime,
pub is_symlink: bool, pub is_symlink: bool,
pub is_dir: bool, pub is_dir: bool,
pub len: u64, pub len: u64,
pub is_fifo: bool, pub is_fifo: bool,
} }
/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
///
/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
#[serde(transparent)]
pub struct MTime(SystemTime);
impl MTime {
/// Conversion intended for persistence and testing.
pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
MTime(UNIX_EPOCH + Duration::new(secs, nanos))
}
/// Conversion intended for persistence.
pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
self.0
.duration_since(UNIX_EPOCH)
.ok()
.map(|duration| (duration.as_secs(), duration.subsec_nanos()))
}
/// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
/// "_for_user" is to discourage misuse - this method should not be used when making decisions
/// about file dirtiness.
pub fn timestamp_for_user(self) -> SystemTime {
self.0
}
/// Temporary method to split out the behavior changes from introduction of this newtype.
pub fn bad_is_greater_than(self, other: MTime) -> bool {
self.0 > other.0
}
}
impl From<proto::Timestamp> for MTime {
fn from(timestamp: proto::Timestamp) -> Self {
MTime(timestamp.into())
}
}
impl From<MTime> for proto::Timestamp {
fn from(mtime: MTime) -> Self {
mtime.0.into()
}
}
#[derive(Default)] #[derive(Default)]
pub struct RealFs { pub struct RealFs {
git_hosting_provider_registry: Arc<GitHostingProviderRegistry>, git_hosting_provider_registry: Arc<GitHostingProviderRegistry>,
@ -558,7 +608,7 @@ impl Fs for RealFs {
Ok(Some(Metadata { Ok(Some(Metadata {
inode, inode,
mtime: metadata.modified().unwrap(), mtime: MTime(metadata.modified().unwrap()),
len: metadata.len(), len: metadata.len(),
is_symlink, is_symlink,
is_dir: metadata.file_type().is_dir(), is_dir: metadata.file_type().is_dir(),
@ -818,13 +868,13 @@ struct FakeFsState {
enum FakeFsEntry { enum FakeFsEntry {
File { File {
inode: u64, inode: u64,
mtime: SystemTime, mtime: MTime,
len: u64, len: u64,
content: Vec<u8>, content: Vec<u8>,
}, },
Dir { Dir {
inode: u64, inode: u64,
mtime: SystemTime, mtime: MTime,
len: u64, len: u64,
entries: BTreeMap<String, Arc<Mutex<FakeFsEntry>>>, entries: BTreeMap<String, Arc<Mutex<FakeFsEntry>>>,
git_repo_state: Option<Arc<Mutex<git::repository::FakeGitRepositoryState>>>, git_repo_state: Option<Arc<Mutex<git::repository::FakeGitRepositoryState>>>,
@ -836,6 +886,18 @@ enum FakeFsEntry {
#[cfg(any(test, feature = "test-support"))] #[cfg(any(test, feature = "test-support"))]
impl FakeFsState { impl FakeFsState {
fn get_and_increment_mtime(&mut self) -> MTime {
let mtime = self.next_mtime;
self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
MTime(mtime)
}
fn get_and_increment_inode(&mut self) -> u64 {
let inode = self.next_inode;
self.next_inode += 1;
inode
}
fn read_path(&self, target: &Path) -> Result<Arc<Mutex<FakeFsEntry>>> { fn read_path(&self, target: &Path) -> Result<Arc<Mutex<FakeFsEntry>>> {
Ok(self Ok(self
.try_read_path(target, true) .try_read_path(target, true)
@ -959,7 +1021,7 @@ pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
impl FakeFs { impl FakeFs {
/// We need to use something large enough for Windows and Unix to consider this a new file. /// We need to use something large enough for Windows and Unix to consider this a new file.
/// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
const SYSTEMTIME_INTERVAL: u64 = 100; const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> { pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
let (tx, mut rx) = smol::channel::bounded::<PathBuf>(10); let (tx, mut rx) = smol::channel::bounded::<PathBuf>(10);
@ -969,13 +1031,13 @@ impl FakeFs {
state: Mutex::new(FakeFsState { state: Mutex::new(FakeFsState {
root: Arc::new(Mutex::new(FakeFsEntry::Dir { root: Arc::new(Mutex::new(FakeFsEntry::Dir {
inode: 0, inode: 0,
mtime: SystemTime::UNIX_EPOCH, mtime: MTime(UNIX_EPOCH),
len: 0, len: 0,
entries: Default::default(), entries: Default::default(),
git_repo_state: None, git_repo_state: None,
})), })),
git_event_tx: tx, git_event_tx: tx,
next_mtime: SystemTime::UNIX_EPOCH, next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
next_inode: 1, next_inode: 1,
event_txs: Default::default(), event_txs: Default::default(),
buffered_events: Vec::new(), buffered_events: Vec::new(),
@ -1007,13 +1069,16 @@ impl FakeFs {
state.next_mtime = next_mtime; state.next_mtime = next_mtime;
} }
pub fn get_and_increment_mtime(&self) -> MTime {
let mut state = self.state.lock();
state.get_and_increment_mtime()
}
pub async fn touch_path(&self, path: impl AsRef<Path>) { pub async fn touch_path(&self, path: impl AsRef<Path>) {
let mut state = self.state.lock(); let mut state = self.state.lock();
let path = path.as_ref(); let path = path.as_ref();
let new_mtime = state.next_mtime; let new_mtime = state.get_and_increment_mtime();
let new_inode = state.next_inode; let new_inode = state.get_and_increment_inode();
state.next_inode += 1;
state.next_mtime += Duration::from_nanos(Self::SYSTEMTIME_INTERVAL);
state state
.write_path(path, move |entry| { .write_path(path, move |entry| {
match entry { match entry {
@ -1062,19 +1127,14 @@ impl FakeFs {
fn write_file_internal(&self, path: impl AsRef<Path>, content: Vec<u8>) -> Result<()> { fn write_file_internal(&self, path: impl AsRef<Path>, content: Vec<u8>) -> Result<()> {
let mut state = self.state.lock(); let mut state = self.state.lock();
let path = path.as_ref();
let inode = state.next_inode;
let mtime = state.next_mtime;
state.next_inode += 1;
state.next_mtime += Duration::from_nanos(Self::SYSTEMTIME_INTERVAL);
let file = Arc::new(Mutex::new(FakeFsEntry::File { let file = Arc::new(Mutex::new(FakeFsEntry::File {
inode, inode: state.get_and_increment_inode(),
mtime, mtime: state.get_and_increment_mtime(),
len: content.len() as u64, len: content.len() as u64,
content, content,
})); }));
let mut kind = None; let mut kind = None;
state.write_path(path, { state.write_path(path.as_ref(), {
let kind = &mut kind; let kind = &mut kind;
move |entry| { move |entry| {
match entry { match entry {
@ -1090,7 +1150,7 @@ impl FakeFs {
Ok(()) Ok(())
} }
})?; })?;
state.emit_event([(path, kind)]); state.emit_event([(path.as_ref(), kind)]);
Ok(()) Ok(())
} }
@ -1383,16 +1443,6 @@ impl FakeFsEntry {
} }
} }
fn set_file_content(&mut self, path: &Path, new_content: Vec<u8>) -> Result<()> {
if let Self::File { content, mtime, .. } = self {
*mtime = SystemTime::now();
*content = new_content;
Ok(())
} else {
Err(anyhow!("not a file: {}", path.display()))
}
}
fn dir_entries( fn dir_entries(
&mut self, &mut self,
path: &Path, path: &Path,
@ -1456,10 +1506,8 @@ impl Fs for FakeFs {
} }
let mut state = self.state.lock(); let mut state = self.state.lock();
let inode = state.next_inode; let inode = state.get_and_increment_inode();
let mtime = state.next_mtime; let mtime = state.get_and_increment_mtime();
state.next_mtime += Duration::from_nanos(Self::SYSTEMTIME_INTERVAL);
state.next_inode += 1;
state.write_path(&cur_path, |entry| { state.write_path(&cur_path, |entry| {
entry.or_insert_with(|| { entry.or_insert_with(|| {
created_dirs.push((cur_path.clone(), Some(PathEventKind::Created))); created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
@ -1482,10 +1530,8 @@ impl Fs for FakeFs {
async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> { async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
self.simulate_random_delay().await; self.simulate_random_delay().await;
let mut state = self.state.lock(); let mut state = self.state.lock();
let inode = state.next_inode; let inode = state.get_and_increment_inode();
let mtime = state.next_mtime; let mtime = state.get_and_increment_mtime();
state.next_mtime += Duration::from_nanos(Self::SYSTEMTIME_INTERVAL);
state.next_inode += 1;
let file = Arc::new(Mutex::new(FakeFsEntry::File { let file = Arc::new(Mutex::new(FakeFsEntry::File {
inode, inode,
mtime, mtime,
@ -1625,13 +1671,12 @@ impl Fs for FakeFs {
let source = normalize_path(source); let source = normalize_path(source);
let target = normalize_path(target); let target = normalize_path(target);
let mut state = self.state.lock(); let mut state = self.state.lock();
let mtime = state.next_mtime; let mtime = state.get_and_increment_mtime();
let inode = util::post_inc(&mut state.next_inode); let inode = state.get_and_increment_inode();
state.next_mtime += Duration::from_nanos(Self::SYSTEMTIME_INTERVAL);
let source_entry = state.read_path(&source)?; let source_entry = state.read_path(&source)?;
let content = source_entry.lock().file_content(&source)?.clone(); let content = source_entry.lock().file_content(&source)?.clone();
let mut kind = Some(PathEventKind::Created); let mut kind = Some(PathEventKind::Created);
let entry = state.write_path(&target, |e| match e { state.write_path(&target, |e| match e {
btree_map::Entry::Occupied(e) => { btree_map::Entry::Occupied(e) => {
if options.overwrite { if options.overwrite {
kind = Some(PathEventKind::Changed); kind = Some(PathEventKind::Changed);
@ -1647,14 +1692,11 @@ impl Fs for FakeFs {
inode, inode,
mtime, mtime,
len: content.len() as u64, len: content.len() as u64,
content: Vec::new(), content,
}))) })))
.clone(), .clone(),
)), )),
})?; })?;
if let Some(entry) = entry {
entry.lock().set_file_content(&target, content)?;
}
state.emit_event([(target, kind)]); state.emit_event([(target, kind)]);
Ok(()) Ok(())
} }

View file

@ -31,6 +31,7 @@ async-watch.workspace = true
clock.workspace = true clock.workspace = true
collections.workspace = true collections.workspace = true
ec4rs.workspace = true ec4rs.workspace = true
fs.workspace = true
futures.workspace = true futures.workspace = true
fuzzy.workspace = true fuzzy.workspace = true
git.workspace = true git.workspace = true

View file

@ -21,6 +21,7 @@ use async_watch as watch;
use clock::Lamport; use clock::Lamport;
pub use clock::ReplicaId; pub use clock::ReplicaId;
use collections::HashMap; use collections::HashMap;
use fs::MTime;
use futures::channel::oneshot; use futures::channel::oneshot;
use gpui::{ use gpui::{
AnyElement, AppContext, Context as _, EventEmitter, HighlightStyle, Model, ModelContext, AnyElement, AppContext, Context as _, EventEmitter, HighlightStyle, Model, ModelContext,
@ -51,7 +52,7 @@ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
str, str,
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
time::{Duration, Instant, SystemTime}, time::{Duration, Instant},
vec, vec,
}; };
use sum_tree::TreeMap; use sum_tree::TreeMap;
@ -108,7 +109,7 @@ pub struct Buffer {
file: Option<Arc<dyn File>>, file: Option<Arc<dyn File>>,
/// The mtime of the file when this buffer was last loaded from /// The mtime of the file when this buffer was last loaded from
/// or saved to disk. /// or saved to disk.
saved_mtime: Option<SystemTime>, saved_mtime: Option<MTime>,
/// The version vector when this buffer was last loaded from /// The version vector when this buffer was last loaded from
/// or saved to disk. /// or saved to disk.
saved_version: clock::Global, saved_version: clock::Global,
@ -406,22 +407,19 @@ pub trait File: Send + Sync {
/// modified. In the case where the file is not stored, it can be either `New` or `Deleted`. In the /// modified. In the case where the file is not stored, it can be either `New` or `Deleted`. In the
/// UI these two states are distinguished. For example, the buffer tab does not display a deletion /// UI these two states are distinguished. For example, the buffer tab does not display a deletion
/// indicator for new files. /// indicator for new files.
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Copy, Clone, Debug, PartialEq)]
pub enum DiskState { pub enum DiskState {
/// File created in Zed that has not been saved. /// File created in Zed that has not been saved.
New, New,
/// File present on the filesystem. /// File present on the filesystem.
Present { Present { mtime: MTime },
/// Last known mtime (modification time).
mtime: SystemTime,
},
/// Deleted file that was previously present. /// Deleted file that was previously present.
Deleted, Deleted,
} }
impl DiskState { impl DiskState {
/// Returns the file's last known modification time on disk. /// Returns the file's last known modification time on disk.
pub fn mtime(self) -> Option<SystemTime> { pub fn mtime(self) -> Option<MTime> {
match self { match self {
DiskState::New => None, DiskState::New => None,
DiskState::Present { mtime } => Some(mtime), DiskState::Present { mtime } => Some(mtime),
@ -976,7 +974,7 @@ impl Buffer {
} }
/// The mtime of the buffer's file when the buffer was last saved or reloaded from disk. /// The mtime of the buffer's file when the buffer was last saved or reloaded from disk.
pub fn saved_mtime(&self) -> Option<SystemTime> { pub fn saved_mtime(&self) -> Option<MTime> {
self.saved_mtime self.saved_mtime
} }
@ -1011,7 +1009,7 @@ impl Buffer {
pub fn did_save( pub fn did_save(
&mut self, &mut self,
version: clock::Global, version: clock::Global,
mtime: Option<SystemTime>, mtime: Option<MTime>,
cx: &mut ModelContext<Self>, cx: &mut ModelContext<Self>,
) { ) {
self.saved_version = version; self.saved_version = version;
@ -1077,7 +1075,7 @@ impl Buffer {
&mut self, &mut self,
version: clock::Global, version: clock::Global,
line_ending: LineEnding, line_ending: LineEnding,
mtime: Option<SystemTime>, mtime: Option<MTime>,
cx: &mut ModelContext<Self>, cx: &mut ModelContext<Self>,
) { ) {
self.saved_version = version; self.saved_version = version;
@ -1777,7 +1775,9 @@ impl Buffer {
match file.disk_state() { match file.disk_state() {
DiskState::New => false, DiskState::New => false,
DiskState::Present { mtime } => match self.saved_mtime { DiskState::Present { mtime } => match self.saved_mtime {
Some(saved_mtime) => mtime > saved_mtime && self.has_unsaved_edits(), Some(saved_mtime) => {
mtime.bad_is_greater_than(saved_mtime) && self.has_unsaved_edits()
}
None => true, None => true,
}, },
DiskState::Deleted => true, DiskState::Deleted => true,

View file

@ -7,6 +7,7 @@ use anyhow::{anyhow, Context as _, Result};
use collections::Bound; use collections::Bound;
use feature_flags::FeatureFlagAppExt; use feature_flags::FeatureFlagAppExt;
use fs::Fs; use fs::Fs;
use fs::MTime;
use futures::stream::StreamExt; use futures::stream::StreamExt;
use futures_batch::ChunksTimeoutStreamExt; use futures_batch::ChunksTimeoutStreamExt;
use gpui::{AppContext, Model, Task}; use gpui::{AppContext, Model, Task};
@ -17,14 +18,7 @@ use project::{Entry, UpdatedEntriesSet, Worktree};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use smol::channel; use smol::channel;
use smol::future::FutureExt; use smol::future::FutureExt;
use std::{ use std::{cmp::Ordering, future::Future, iter, path::Path, sync::Arc, time::Duration};
cmp::Ordering,
future::Future,
iter,
path::Path,
sync::Arc,
time::{Duration, SystemTime},
};
use util::ResultExt; use util::ResultExt;
use worktree::Snapshot; use worktree::Snapshot;
@ -451,7 +445,7 @@ struct ChunkFiles {
pub struct ChunkedFile { pub struct ChunkedFile {
pub path: Arc<Path>, pub path: Arc<Path>,
pub mtime: Option<SystemTime>, pub mtime: Option<MTime>,
pub handle: IndexingEntryHandle, pub handle: IndexingEntryHandle,
pub text: String, pub text: String,
pub chunks: Vec<Chunk>, pub chunks: Vec<Chunk>,
@ -465,7 +459,7 @@ pub struct EmbedFiles {
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct EmbeddedFile { pub struct EmbeddedFile {
pub path: Arc<Path>, pub path: Arc<Path>,
pub mtime: Option<SystemTime>, pub mtime: Option<MTime>,
pub chunks: Vec<EmbeddedChunk>, pub chunks: Vec<EmbeddedChunk>,
} }

View file

@ -1,5 +1,6 @@
use collections::HashMap; use collections::HashMap;
use std::{path::Path, sync::Arc, time::SystemTime}; use fs::MTime;
use std::{path::Path, sync::Arc};
const MAX_FILES_BEFORE_RESUMMARIZE: usize = 4; const MAX_FILES_BEFORE_RESUMMARIZE: usize = 4;
const MAX_BYTES_BEFORE_RESUMMARIZE: u64 = 1_000_000; // 1 MB const MAX_BYTES_BEFORE_RESUMMARIZE: u64 = 1_000_000; // 1 MB
@ -7,14 +8,14 @@ const MAX_BYTES_BEFORE_RESUMMARIZE: u64 = 1_000_000; // 1 MB
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct SummaryBacklog { pub struct SummaryBacklog {
/// Key: path to a file that needs summarization, but that we haven't summarized yet. Value: that file's size on disk, in bytes, and its mtime. /// Key: path to a file that needs summarization, but that we haven't summarized yet. Value: that file's size on disk, in bytes, and its mtime.
files: HashMap<Arc<Path>, (u64, Option<SystemTime>)>, files: HashMap<Arc<Path>, (u64, Option<MTime>)>,
/// Cache of the sum of all values in `files`, so we don't have to traverse the whole map to check if we're over the byte limit. /// Cache of the sum of all values in `files`, so we don't have to traverse the whole map to check if we're over the byte limit.
total_bytes: u64, total_bytes: u64,
} }
impl SummaryBacklog { impl SummaryBacklog {
/// Store the given path in the backlog, along with how many bytes are in it. /// Store the given path in the backlog, along with how many bytes are in it.
pub fn insert(&mut self, path: Arc<Path>, bytes_on_disk: u64, mtime: Option<SystemTime>) { pub fn insert(&mut self, path: Arc<Path>, bytes_on_disk: u64, mtime: Option<MTime>) {
let (prev_bytes, _) = self let (prev_bytes, _) = self
.files .files
.insert(path, (bytes_on_disk, mtime)) .insert(path, (bytes_on_disk, mtime))
@ -34,7 +35,7 @@ impl SummaryBacklog {
/// Remove all the entries in the backlog and return the file paths as an iterator. /// Remove all the entries in the backlog and return the file paths as an iterator.
#[allow(clippy::needless_lifetimes)] // Clippy thinks this 'a can be elided, but eliding it gives a compile error #[allow(clippy::needless_lifetimes)] // Clippy thinks this 'a can be elided, but eliding it gives a compile error
pub fn drain<'a>(&'a mut self) -> impl Iterator<Item = (Arc<Path>, Option<SystemTime>)> + 'a { pub fn drain<'a>(&'a mut self) -> impl Iterator<Item = (Arc<Path>, Option<MTime>)> + 'a {
self.total_bytes = 0; self.total_bytes = 0;
self.files self.files

View file

@ -1,6 +1,6 @@
use anyhow::{anyhow, Context as _, Result}; use anyhow::{anyhow, Context as _, Result};
use arrayvec::ArrayString; use arrayvec::ArrayString;
use fs::Fs; use fs::{Fs, MTime};
use futures::{stream::StreamExt, TryFutureExt}; use futures::{stream::StreamExt, TryFutureExt};
use futures_batch::ChunksTimeoutStreamExt; use futures_batch::ChunksTimeoutStreamExt;
use gpui::{AppContext, Model, Task}; use gpui::{AppContext, Model, Task};
@ -21,7 +21,7 @@ use std::{
future::Future, future::Future,
path::Path, path::Path,
sync::Arc, sync::Arc,
time::{Duration, Instant, SystemTime}, time::{Duration, Instant},
}; };
use util::ResultExt; use util::ResultExt;
use worktree::Snapshot; use worktree::Snapshot;
@ -39,7 +39,7 @@ struct UnsummarizedFile {
// Path to the file on disk // Path to the file on disk
path: Arc<Path>, path: Arc<Path>,
// The mtime of the file on disk // The mtime of the file on disk
mtime: Option<SystemTime>, mtime: Option<MTime>,
// BLAKE3 hash of the source file's contents // BLAKE3 hash of the source file's contents
digest: Blake3Digest, digest: Blake3Digest,
// The source file's contents // The source file's contents
@ -51,7 +51,7 @@ struct SummarizedFile {
// Path to the file on disk // Path to the file on disk
path: String, path: String,
// The mtime of the file on disk // The mtime of the file on disk
mtime: Option<SystemTime>, mtime: Option<MTime>,
// BLAKE3 hash of the source file's contents // BLAKE3 hash of the source file's contents
digest: Blake3Digest, digest: Blake3Digest,
// The LLM's summary of the file's contents // The LLM's summary of the file's contents
@ -63,7 +63,7 @@ pub type Blake3Digest = ArrayString<{ blake3::OUT_LEN * 2 }>;
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct FileDigest { pub struct FileDigest {
pub mtime: Option<SystemTime>, pub mtime: Option<MTime>,
pub digest: Blake3Digest, pub digest: Blake3Digest,
} }
@ -88,7 +88,7 @@ pub struct SummaryIndex {
} }
struct Backlogged { struct Backlogged {
paths_to_digest: channel::Receiver<Vec<(Arc<Path>, Option<SystemTime>)>>, paths_to_digest: channel::Receiver<Vec<(Arc<Path>, Option<MTime>)>>,
task: Task<Result<()>>, task: Task<Result<()>>,
} }
@ -319,7 +319,7 @@ impl SummaryIndex {
digest_db: heed::Database<Str, SerdeBincode<FileDigest>>, digest_db: heed::Database<Str, SerdeBincode<FileDigest>>,
txn: &RoTxn<'_>, txn: &RoTxn<'_>,
entry: &Entry, entry: &Entry,
) -> Vec<(Arc<Path>, Option<SystemTime>)> { ) -> Vec<(Arc<Path>, Option<MTime>)> {
let entry_db_key = db_key_for_path(&entry.path); let entry_db_key = db_key_for_path(&entry.path);
match digest_db.get(&txn, &entry_db_key) { match digest_db.get(&txn, &entry_db_key) {
@ -414,7 +414,7 @@ impl SummaryIndex {
fn digest_files( fn digest_files(
&self, &self,
paths: channel::Receiver<Vec<(Arc<Path>, Option<SystemTime>)>>, paths: channel::Receiver<Vec<(Arc<Path>, Option<MTime>)>>,
worktree_abs_path: Arc<Path>, worktree_abs_path: Arc<Path>,
cx: &AppContext, cx: &AppContext,
) -> MightNeedSummaryFiles { ) -> MightNeedSummaryFiles {
@ -646,7 +646,7 @@ impl SummaryIndex {
let start = Instant::now(); let start = Instant::now();
let backlogged = { let backlogged = {
let (tx, rx) = channel::bounded(512); let (tx, rx) = channel::bounded(512);
let needs_summary: Vec<(Arc<Path>, Option<SystemTime>)> = { let needs_summary: Vec<(Arc<Path>, Option<MTime>)> = {
let mut backlog = self.backlog.lock(); let mut backlog = self.backlog.lock();
backlog.drain().collect() backlog.drain().collect()

View file

@ -7,7 +7,7 @@ use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
use anyhow::{anyhow, Context as _, Result}; use anyhow::{anyhow, Context as _, Result};
use clock::ReplicaId; use clock::ReplicaId;
use collections::{HashMap, HashSet, VecDeque}; use collections::{HashMap, HashSet, VecDeque};
use fs::{copy_recursive, Fs, PathEvent, RemoveOptions, Watcher}; use fs::{copy_recursive, Fs, MTime, PathEvent, RemoveOptions, Watcher};
use futures::{ use futures::{
channel::{ channel::{
mpsc::{self, UnboundedSender}, mpsc::{self, UnboundedSender},
@ -61,7 +61,7 @@ use std::{
atomic::{AtomicUsize, Ordering::SeqCst}, atomic::{AtomicUsize, Ordering::SeqCst},
Arc, Arc,
}, },
time::{Duration, Instant, SystemTime}, time::{Duration, Instant},
}; };
use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet}; use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
use text::{LineEnding, Rope}; use text::{LineEnding, Rope};
@ -3395,7 +3395,7 @@ pub struct Entry {
pub kind: EntryKind, pub kind: EntryKind,
pub path: Arc<Path>, pub path: Arc<Path>,
pub inode: u64, pub inode: u64,
pub mtime: Option<SystemTime>, pub mtime: Option<MTime>,
pub canonical_path: Option<Box<Path>>, pub canonical_path: Option<Box<Path>>,
/// Whether this entry is ignored by Git. /// Whether this entry is ignored by Git.