Maintain ref counts for document handles

This commit is contained in:
Antonio Scandurra 2023-08-01 15:07:32 +02:00
parent 7fbe5910b9
commit 50f507e38e
6 changed files with 247 additions and 196 deletions

View file

@ -6,11 +6,11 @@ use serde::{Deserialize, Serialize};
use std::{ use std::{
cmp::Ordering, cmp::Ordering,
collections::BTreeMap, collections::BTreeMap,
fmt::Debug, fmt::{self, Debug},
ops::{Bound, RangeBounds}, ops::{Bound, RangeBounds},
}; };
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub struct Map<K, V>(Sequence<MapEntry<K, V>>) pub struct Map<K, V>(Sequence<MapEntry<K, V>>)
where where
K: Clone + Debug + Ord, K: Clone + Debug + Ord,
@ -41,7 +41,7 @@ impl<K> Default for MapKeyRef<'_, K> {
} }
#[derive(Clone)] #[derive(Clone)]
pub struct TreeSet<K>(Map<K, ()>) pub struct Set<K>(Map<K, ()>)
where where
K: Clone + Debug + Ord; K: Clone + Debug + Ord;
@ -294,6 +294,16 @@ where
} }
} }
impl<K, V> Debug for Map<K, V>
where
K: Clone + Debug + Ord,
V: Clone + Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
#[derive(Debug)] #[derive(Debug)]
struct MapSeekTargetAdaptor<'a, T>(&'a T); struct MapSeekTargetAdaptor<'a, T>(&'a T);
@ -382,7 +392,7 @@ where
} }
} }
impl<K> Default for TreeSet<K> impl<K> Default for Set<K>
where where
K: Clone + Debug + Ord, K: Clone + Debug + Ord,
{ {
@ -391,7 +401,7 @@ where
} }
} }
impl<K> TreeSet<K> impl<K> Set<K>
where where
K: Clone + Debug + Ord, K: Clone + Debug + Ord,
{ {

View file

@ -3,7 +3,6 @@ mod dense_id;
mod history; mod history;
mod messages; mod messages;
mod operations; mod operations;
mod revision_cache;
mod rope; mod rope;
mod sync; mod sync;
#[cfg(test)] #[cfg(test)]
@ -17,7 +16,6 @@ use futures::{channel::mpsc, future::BoxFuture, FutureExt, StreamExt};
use history::{History, SavedHistory}; use history::{History, SavedHistory};
use messages::{MessageEnvelope, Operation, RequestEnvelope}; use messages::{MessageEnvelope, Operation, RequestEnvelope};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use revision_cache::RevisionCache;
use rope::Rope; use rope::Rope;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
@ -182,6 +180,9 @@ impl btree::Summary for OperationId {
} }
} }
pub type BranchId = OperationId;
pub type DocumentId = OperationId;
#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)]
pub struct RoomName(Arc<str>); pub struct RoomName(Arc<str>);
@ -388,17 +389,12 @@ impl<E: Executor, N: ClientNetwork> Checkout<E, N> {
self.repo.apply_operations(response.operations).await?; self.repo.apply_operations(response.operations).await?;
let operations = self let operations = self
.repo .repo
.update_async(|repo| { .update_atomic(|mut repo| async {
let max_operation_ids = response.max_operation_ids.clone(); let operations = repo
let kv = self.repo.db.kv.clone(); .history
async move { .operations_since(&(&response.max_operation_ids).into(), &*self.repo.db.kv)
let operations = repo .await?;
.history Ok((repo, operations))
.operations_since(&(&max_operation_ids).into(), &*kv)
.await?;
Ok((None, operations))
}
.boxed()
}) })
.await?; .await?;
@ -473,8 +469,7 @@ impl<E: Executor, N: ClientNetwork> Client<E, N> {
pub fn create_repo(&self) -> Repo { pub fn create_repo(&self) -> Repo {
let id = RepoId::new(); let id = RepoId::new();
let revision_cache = RevisionCache::new(id, &*self.executor, self.db.kv.clone()); let snapshot = RepoSnapshot::new(id, ReplicaId(0));
let snapshot = RepoSnapshot::new(id, ReplicaId(0), revision_cache);
let repo = Repo { let repo = Repo {
id, id,
db: self.db.clone(), db: self.db.clone(),
@ -496,11 +491,10 @@ impl<E: Executor, N: ClientNetwork> Client<E, N> {
id: repo_id, id: repo_id,
db: this.db.clone(), db: this.db.clone(),
}; };
let revision_cache = RevisionCache::new(repo_id, &*this.executor, this.db.kv.clone()); this.db
this.db.repos.write().insert( .repos
repo_id, .write()
RepoSnapshot::new(repo_id, response.replica_id, revision_cache), .insert(repo_id, RepoSnapshot::new(repo_id, response.replica_id));
);
let mut room = this.network.room(response.credentials); let mut room = this.network.room(response.credentials);
room.connect().await?; room.connect().await?;
@ -543,8 +537,7 @@ impl<E: Executor, N: ClientNetwork> Client<E, N> {
return Ok(repo); return Ok(repo);
} }
let revision_cache = RevisionCache::new(id, &*this.executor, this.db.kv.clone()); let repo = RepoSnapshot::load(id, &*this.db.kv).await?;
let repo = RepoSnapshot::load(id, revision_cache, &*this.db.kv).await?;
this.db.repos.write().entry(id).or_insert(repo); this.db.repos.write().entry(id).or_insert(repo);
Ok(Repo { Ok(Repo {
@ -687,10 +680,9 @@ impl<E: Executor, N: ServerNetwork> Server<E, N> {
.network .network
.grant_room_access(&room_name, user.login.as_ref()); .grant_room_access(&room_name, user.login.as_ref());
let revision_cache = RevisionCache::new(request.id, &*self.executor, self.db.kv.clone());
self.db.repos.write().insert( self.db.repos.write().insert(
request.id, request.id,
RepoSnapshot::new(request.id, ReplicaId(u32::MAX), revision_cache), RepoSnapshot::new(request.id, ReplicaId(u32::MAX)),
); );
self.next_replica_ids_by_repo_id self.next_replica_ids_by_repo_id
.lock() .lock()
@ -740,23 +732,16 @@ impl<E: Executor, N: ServerNetwork> Server<E, N> {
.repo(request.id) .repo(request.id)
.ok_or_else(|| anyhow!("repo not found"))?; .ok_or_else(|| anyhow!("repo not found"))?;
repo.update_async(|snapshot| { repo.update_atomic(|mut repo| async {
let request = request.clone(); let operations = repo
let kv = self.db.kv.clone(); .history
async move { .operations_since(&(&request.max_operation_ids).into(), &*self.db.kv)
let operations = snapshot .await?;
.history let response = messages::SyncRepoResponse {
.operations_since(&(&request.max_operation_ids).into(), &*kv) operations,
.await?; max_operation_ids: repo.history.max_operation_ids().into(),
Ok(( };
None, Ok((repo, response))
messages::SyncRepoResponse {
operations,
max_operation_ids: snapshot.history.max_operation_ids().into(),
},
))
}
.boxed()
}) })
.await .await
} }
@ -836,26 +821,17 @@ impl Repo {
async move { async move {
let branch_id = this let branch_id = this
.update_async(|repo| { .update_atomic(|mut repo| async {
let this = this.clone(); let branch_id = *repo
let name = name.clone(); .branch_ids_by_name
async move { .load(&name, &*this.db.kv)
let branch_id = *repo .await?
.branch_ids_by_name .ok_or_else(|| anyhow!("branch not found"))?;
.load(&name, &*this.db.kv) repo.branches
.await? .load(&branch_id, &*this.db.kv)
.ok_or_else(|| anyhow!("branch not found"))?; .await?
let head = repo .ok_or_else(|| anyhow!("branch not found"))?;
.branches Ok((repo, branch_id))
.load(&branch_id, &*this.db.kv)
.await?
.ok_or_else(|| anyhow!("branch not found"))?
.head
.clone();
repo.build_revision(&head, &*this.db.kv).await?;
Ok((None, branch_id))
}
.boxed()
}) })
.await?; .await?;
@ -908,18 +884,19 @@ impl Repo {
result result
} }
async fn update_async<F, T>(&self, mut f: F) -> Result<T> async fn update_atomic<F, Fut, T>(&self, mut f: F) -> Result<T>
where where
F: FnMut(&mut RepoSnapshot) -> BoxFuture<'_, Result<(Option<Operation>, T)>>, F: FnMut(RepoSnapshot) -> Fut,
Fut: Future<Output = Result<(RepoSnapshot, T)>>,
{ {
loop { loop {
let prev_snapshot = self.read(|repo| repo.clone()); let prev_snapshot = self.read(|repo| repo.clone());
let mut new_snapshot = prev_snapshot.clone(); let new_snapshot = prev_snapshot.clone();
let (operation, value) = f(&mut new_snapshot).await?; let (new_snapshot, value) = f(new_snapshot).await?;
let updated = self.update(|latest_snapshot| { let updated = self.update(|latest_snapshot| {
if RepoSnapshot::ptr_eq(&prev_snapshot, &latest_snapshot) { if RepoSnapshot::ptr_eq(&prev_snapshot, &latest_snapshot) {
*latest_snapshot = new_snapshot; *latest_snapshot = new_snapshot;
(operation, true) (None, true)
} else { } else {
(None, false) (None, false)
} }
@ -935,14 +912,11 @@ impl Repo {
let mut operations = operations.into(); let mut operations = operations.into();
while let Some(operation) = operations.pop_front() { while let Some(operation) = operations.pop_front() {
let flushed_operations = self let flushed_operations = self
.update_async(|repo| { .update_atomic(|mut repo| async {
let operation = operation.clone(); let flushed_operations = repo
let kv = self.db.kv.clone(); .apply_operation(operation.clone(), &*self.db.kv)
async move { .await?;
let flushed_operations = repo.apply_operation(operation, &*kv).await?; Ok((repo, flushed_operations))
Ok((None, flushed_operations))
}
.boxed()
}) })
.await?; .await?;
operations.extend(flushed_operations); operations.extend(flushed_operations);
@ -953,7 +927,7 @@ impl Repo {
#[derive(Clone)] #[derive(Clone)]
struct Branch { struct Branch {
id: OperationId, id: BranchId,
repo: Repo, repo: Repo,
} }
@ -980,17 +954,49 @@ impl Branch {
}) })
} }
pub fn load_document(&self, id: OperationId) -> Result<Document> { pub async fn load_document(&self, document_id: DocumentId) -> Result<Document> {
self.read(|revision| { self.repo
revision .update_atomic(|mut repo| async {
.document_metadata let document_key = (self.id, document_id);
.get(&id) if let Some(prev_count) = repo.document_ref_counts.get(&document_key).copied() {
.ok_or_else(|| anyhow!("document not found"))?; repo.document_ref_counts
Ok(Document { .insert(document_key, prev_count + 1);
branch: self.clone(), } else {
id, let head = repo
.branches
.get(&self.id)
.expect("branch must exist")
.head
.clone();
repo.document_ref_counts.insert(document_key, 1);
let mut revision = repo.load_revision(&head, &*self.repo.db.kv).await?;
revision
.load_documents([document_id], &*self.repo.db.kv)
.await?;
repo.revisions.insert(head, revision);
}
Ok((
repo,
Document {
branch: self.clone(),
id: document_id,
},
))
}) })
}) .await?;
todo!()
// self.read(|revision| {
// revision
// .document_metadata
// .get(&id)
// .ok_or_else(|| anyhow!("document not found"))?;
// Ok(Document {
// branch: self.clone(),
// id,
// })
// })
} }
pub fn documents(&self) -> Vec<Document> { pub fn documents(&self) -> Vec<Document> {
@ -1017,12 +1023,16 @@ impl Branch {
.expect("branch must exist") .expect("branch must exist")
.head .head
.clone(); .clone();
let mut revision = repo.revisions.get(&head).expect("head revision must exist"); let mut new_revision = repo
.revisions
.get(&head)
.expect("head revision must exist")
.clone();
let operation_id = repo.history.next_operation_id(); let operation_id = repo.history.next_operation_id();
let (operation, result) = f(operation_id, head.clone(), &mut revision); let (operation, result) = f(operation_id, head.clone(), &mut new_revision);
repo.branches repo.branches
.update(&self.id, |branch| branch.head = operation_id.into()); .update(&self.id, |branch| branch.head = operation_id.into());
repo.revisions.save(&operation_id.into(), revision); repo.revisions.insert(operation_id.into(), new_revision);
(Some(operation), result) (Some(operation), result)
}) })
} }
@ -1057,7 +1067,7 @@ struct DocumentMetadata {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
struct DocumentFragment { struct DocumentFragment {
document_id: OperationId, document_id: DocumentId,
location: DenseId, location: DenseId,
insertion_id: OperationId, insertion_id: OperationId,
insertion_subrange: Range<usize>, insertion_subrange: Range<usize>,
@ -1136,7 +1146,7 @@ impl btree::Item for DocumentFragment {
pub struct DocumentFragmentSummary { pub struct DocumentFragmentSummary {
visible_len: usize, visible_len: usize,
hidden_len: usize, hidden_len: usize,
max_document_id: OperationId, max_document_id: DocumentId,
max_location: DenseId, max_location: DenseId,
} }
@ -1273,7 +1283,7 @@ impl<'a> btree::SeekTarget<'a, InsertionFragmentSummary, InsertionFragmentSummar
struct Document { struct Document {
branch: Branch, branch: Branch,
id: OperationId, id: DocumentId,
} }
impl Document { impl Document {
@ -1537,11 +1547,30 @@ impl Document {
} }
} }
impl Drop for Document {
fn drop(&mut self) {
self.branch.repo.update(|repo| {
let document_key = (self.branch.id, self.id);
let ref_count = repo
.document_ref_counts
.update(&document_key, |ref_count| {
*ref_count -= 1;
*ref_count
})
.expect("document must exist");
if ref_count == 0 {
repo.document_ref_counts.remove(&document_key);
}
(None, ())
});
}
}
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct LocalEditDimension { pub struct LocalEditDimension {
visible_len: usize, visible_len: usize,
hidden_len: usize, hidden_len: usize,
max_document_id: OperationId, max_document_id: DocumentId,
} }
impl<'a> btree::Dimension<'a, DocumentFragmentSummary> for LocalEditDimension { impl<'a> btree::Dimension<'a, DocumentFragmentSummary> for LocalEditDimension {
@ -1698,7 +1727,8 @@ pub struct RepoSnapshot {
history: History, history: History,
branches: btree::Map<OperationId, BranchSnapshot>, branches: btree::Map<OperationId, BranchSnapshot>,
branch_ids_by_name: btree::Map<Arc<str>, OperationId>, branch_ids_by_name: btree::Map<Arc<str>, OperationId>,
revisions: RevisionCache, revisions: btree::Map<RevisionId, Revision>,
document_ref_counts: btree::Map<(BranchId, DocumentId), usize>,
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -1709,13 +1739,17 @@ struct SavedRepoSnapshot {
} }
impl RepoSnapshot { impl RepoSnapshot {
fn new(id: RepoId, replica_id: ReplicaId, revisions: RevisionCache) -> Self { fn new(id: RepoId, replica_id: ReplicaId) -> Self {
Self { Self {
id, id,
history: History::new(replica_id), history: History::new(replica_id),
branches: Default::default(), branches: Default::default(),
branch_ids_by_name: Default::default(), branch_ids_by_name: Default::default(),
revisions, revisions: btree::Map::from_ordered_entries([(
RevisionId::default(),
Revision::default(),
)]),
document_ref_counts: Default::default(),
} }
} }
@ -1724,9 +1758,11 @@ impl RepoSnapshot {
&& btree::Map::ptr_eq(&this.branches, &other.branches) && btree::Map::ptr_eq(&this.branches, &other.branches)
&& btree::Map::ptr_eq(&this.branch_ids_by_name, &other.branch_ids_by_name) && btree::Map::ptr_eq(&this.branch_ids_by_name, &other.branch_ids_by_name)
&& History::ptr_eq(&this.history, &other.history) && History::ptr_eq(&this.history, &other.history)
&& btree::Map::ptr_eq(&this.revisions, &other.revisions)
&& btree::Map::ptr_eq(&this.document_ref_counts, &other.document_ref_counts)
} }
async fn load(id: RepoId, revisions: RevisionCache, kv: &dyn KvStore) -> Result<Self> { async fn load(id: RepoId, kv: &dyn KvStore) -> Result<Self> {
let repo_bytes = kv.load(id.to_be_bytes(), "root".into()).await?; let repo_bytes = kv.load(id.to_be_bytes(), "root".into()).await?;
let saved_repo = serde_bare::from_slice::<SavedRepoSnapshot>(&repo_bytes)?; let saved_repo = serde_bare::from_slice::<SavedRepoSnapshot>(&repo_bytes)?;
Ok(Self { Ok(Self {
@ -1734,7 +1770,11 @@ impl RepoSnapshot {
history: History::load(saved_repo.history, kv).await?, history: History::load(saved_repo.history, kv).await?,
branches: btree::Map::load_root(saved_repo.branches, kv).await?, branches: btree::Map::load_root(saved_repo.branches, kv).await?,
branch_ids_by_name: btree::Map::load_root(saved_repo.branch_ids_by_name, kv).await?, branch_ids_by_name: btree::Map::load_root(saved_repo.branch_ids_by_name, kv).await?,
revisions, revisions: btree::Map::from_ordered_entries([(
RevisionId::default(),
Revision::default(),
)]),
document_ref_counts: Default::default(),
}) })
} }
@ -1761,7 +1801,7 @@ impl RepoSnapshot {
}, },
); );
self.branch_ids_by_name.insert(name.clone(), branch_id); self.branch_ids_by_name.insert(name.clone(), branch_id);
self.revisions.save(&branch_id.into(), Default::default()); self.revisions.insert(branch_id.into(), Default::default());
( (
Operation::CreateBranch(operations::CreateBranch { Operation::CreateBranch(operations::CreateBranch {
@ -1785,6 +1825,7 @@ impl RepoSnapshot {
return Ok(Default::default()); return Ok(Default::default());
} }
let branch_id = operation.branch_id();
let mut new_head; let mut new_head;
match &operation { match &operation {
Operation::CreateBranch(op) => { Operation::CreateBranch(op) => {
@ -1826,26 +1867,62 @@ impl RepoSnapshot {
let flushed_operations = self.history.insert(operation, kv).await?; let flushed_operations = self.history.insert(operation, kv).await?;
// The following ensures that a revision for the branch head is always present. // The following ensures that a revision for the branch head is always present.
#[cfg(not(any(test, feature = "test-support")))] let mut revision = if cfg!(any(test, feature = "test-support")) {
self.build_revision(&new_head, kv).await?; self.load_revision(&new_head, kv).await.unwrap()
#[cfg(any(test, feature = "test-support"))] } else {
self.build_revision(&new_head, kv).await.unwrap(); self.load_revision(&new_head, kv).await?
};
revision
.load_documents(
self.document_ref_counts
.iter()
.filter_map(|((doc_branch_id, doc_id), _)| {
if *doc_branch_id == branch_id {
Some(*doc_id)
} else {
None
}
}),
kv,
)
.await?;
self.revisions.insert(new_head, revision);
Ok(flushed_operations) Ok(flushed_operations)
} }
async fn build_revision( async fn cached_revision(
repo_id: RepoId,
revision_id: &RevisionId,
revisions: &mut btree::Map<RevisionId, Revision>,
kv: &dyn KvStore,
) -> Option<Revision> {
if let Some(revision) = revisions.get(revision_id) {
Some(revision.clone())
} else if let Ok(revision) = Revision::load(repo_id, revision_id, kv).await {
revisions.insert(revision_id.clone(), revision.clone());
Some(revision)
} else {
None
}
}
async fn load_revision(
&mut self, &mut self,
revision_id: &RevisionId, revision_id: &RevisionId,
kv: &dyn KvStore, kv: &dyn KvStore,
) -> Result<Revision> { ) -> Result<Revision> {
if let Some(revision) = self.revisions.load(revision_id, kv).await { if let Some(revision) =
Self::cached_revision(self.id, revision_id, &mut self.revisions, kv).await
{
Ok(revision) Ok(revision)
} else { } else {
let mut new_revisions = HashMap::default(); let mut new_revisions = HashMap::default();
let mut rewind = self.history.rewind(revision_id, kv).await?; let mut rewind = self.history.rewind(revision_id, kv).await?;
while let Some(ancestor_id) = rewind.next(kv).await? { while let Some(ancestor_id) = rewind.next(kv).await? {
if let Some(ancestor_revision) = self.revisions.load(&ancestor_id, kv).await { if let Some(ancestor_revision) =
Self::cached_revision(self.id, &ancestor_id, &mut self.revisions, kv).await
{
new_revisions.insert(ancestor_id, ancestor_revision); new_revisions.insert(ancestor_id, ancestor_revision);
for replay_op in rewind.replay() { for replay_op in rewind.replay() {
let parent_revision = new_revisions[&replay_op.parent_revision_id].clone(); let parent_revision = new_revisions[&replay_op.parent_revision_id].clone();
@ -1873,11 +1950,16 @@ impl RepoSnapshot {
} }
} }
for (revision_id, revision) in new_revisions.drain() { for (new_revision_id, revision) in new_revisions.drain() {
self.revisions.save(&revision_id, revision); if self.revisions.contains_key(&new_revision_id) {
continue;
}
revision.save(self.id, &new_revision_id, kv).await?;
self.revisions.insert(new_revision_id, revision);
} }
Ok(self.revisions.get(revision_id).unwrap()) Ok(self.revisions.get(revision_id).unwrap().clone())
} }
} }
} }
@ -1951,6 +2033,14 @@ pub struct SavedRevision {
} }
impl Revision { impl Revision {
fn ptr_eq(this: &Self, other: &Self) -> bool {
btree::Map::ptr_eq(&this.document_metadata, &other.document_metadata)
&& btree::Sequence::ptr_eq(&this.document_fragments, &other.document_fragments)
&& btree::Sequence::ptr_eq(&this.insertion_fragments, &other.insertion_fragments)
&& Rope::ptr_eq(&this.visible_text, &other.visible_text)
&& Rope::ptr_eq(&this.hidden_text, &other.hidden_text)
}
async fn exists(repo_id: RepoId, id: &RevisionId, kv: &dyn KvStore) -> bool { async fn exists(repo_id: RepoId, id: &RevisionId, kv: &dyn KvStore) -> bool {
kv.load(repo_id.to_be_bytes(), id.db_key()).await.is_ok() kv.load(repo_id.to_be_bytes(), id.db_key()).await.is_ok()
} }
@ -1969,6 +2059,14 @@ impl Revision {
}) })
} }
async fn load_documents(
&mut self,
documents: impl IntoIterator<Item = DocumentId>,
kv: &dyn KvStore,
) -> Result<()> {
todo!()
}
async fn save(&self, repo_id: RepoId, id: &RevisionId, kv: &dyn KvStore) -> Result<()> { async fn save(&self, repo_id: RepoId, id: &RevisionId, kv: &dyn KvStore) -> Result<()> {
let saved_revision = SavedRevision { let saved_revision = SavedRevision {
document_metadata: self.document_metadata.save(kv).await?, document_metadata: self.document_metadata.save(kv).await?,
@ -2189,8 +2287,8 @@ mod tests {
deterministic.run_until_parked(); deterministic.run_until_parked();
let branch_b = repo_b.load_branch("main").await.unwrap(); let branch_b = repo_b.load_branch("main").await.unwrap();
let doc1_b = branch_b.load_document(doc1_a.id).unwrap(); let doc1_b = branch_b.load_document(doc1_a.id).await.unwrap();
let doc2_b = branch_b.load_document(doc2_a.id).unwrap(); let doc2_b = branch_b.load_document(doc2_a.id).await.unwrap();
assert_eq!(doc1_b.text().to_string(), "abc"); assert_eq!(doc1_b.text().to_string(), "abc");
assert_eq!(doc2_b.text().to_string(), "def"); assert_eq!(doc2_b.text().to_string(), "def");
@ -2211,7 +2309,7 @@ mod tests {
); );
let repo_a2 = client_a2.repo(repo_b.id).await.unwrap(); let repo_a2 = client_a2.repo(repo_b.id).await.unwrap();
let branch_a2 = repo_a2.load_branch("main").await.unwrap(); let branch_a2 = repo_a2.load_branch("main").await.unwrap();
let doc1_a2 = branch_a2.load_document(doc1_a.id).unwrap(); let doc1_a2 = branch_a2.load_document(doc1_a.id).await.unwrap();
assert_eq!(doc1_a2.text().to_string(), "aghic"); assert_eq!(doc1_a2.text().to_string(), "aghic");
} }
@ -2405,7 +2503,7 @@ mod tests {
client_id: usize, client_id: usize,
repo_id: RepoId, repo_id: RepoId,
branch_name: Arc<str>, branch_name: Arc<str>,
document_id: OperationId, document_id: DocumentId,
edits: Vec<(Range<usize>, String)>, edits: Vec<(Range<usize>, String)>,
}, },
} }
@ -2602,7 +2700,7 @@ mod tests {
.ok_or_else(|| anyhow!("client not found"))?; .ok_or_else(|| anyhow!("client not found"))?;
let repo = client.repo(*repo_id).await?; let repo = client.repo(*repo_id).await?;
let branch = repo.load_branch(branch_name.clone()).await?; let branch = repo.load_branch(branch_name.clone()).await?;
let document = branch.load_document(*document_id)?; let document = branch.load_document(*document_id).await?;
document.edit(edits.iter().cloned()); document.edit(edits.iter().cloned());
} }
} }

View file

@ -1,6 +1,6 @@
use crate::{ use crate::{
operations::{CreateBranch, CreateDocument, Edit}, operations::{CreateBranch, CreateDocument, Edit},
OperationCount, OperationId, ReplicaId, RepoId, Request, RevisionId, RoomCredentials, BranchId, OperationCount, OperationId, ReplicaId, RepoId, Request, RevisionId, RoomCredentials,
}; };
use collections::BTreeMap; use collections::BTreeMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -141,6 +141,14 @@ impl Operation {
} }
} }
pub fn branch_id(&self) -> BranchId {
match self {
Operation::CreateBranch(op) => op.id,
Operation::CreateDocument(op) => op.branch_id,
Operation::Edit(op) => op.branch_id,
}
}
pub fn parent(&self) -> &RevisionId { pub fn parent(&self) -> &RevisionId {
match self { match self {
Operation::CreateDocument(op) => &op.parent, Operation::CreateDocument(op) => &op.parent,

View file

@ -1,8 +1,8 @@
use crate::{ use crate::{
btree::{self, Bias}, btree::{self, Bias},
dense_id::DenseId, dense_id::DenseId,
AnchorRange, DocumentFragment, DocumentFragmentSummary, DocumentMetadata, InsertionFragment, AnchorRange, BranchId, DocumentFragment, DocumentFragmentSummary, DocumentId, DocumentMetadata,
OperationId, Revision, RevisionId, RopeBuilder, Tombstone, InsertionFragment, OperationId, Revision, RevisionId, RopeBuilder, Tombstone,
}; };
use anyhow::Result; use anyhow::Result;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -11,21 +11,21 @@ use std::{cmp, sync::Arc};
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CreateBranch { pub struct CreateBranch {
pub id: OperationId, pub id: BranchId,
pub parent: RevisionId, pub parent: RevisionId,
pub name: Arc<str>, pub name: Arc<str>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CreateDocument { pub struct CreateDocument {
pub id: OperationId, pub id: DocumentId,
pub branch_id: OperationId, pub branch_id: BranchId,
pub parent: RevisionId, pub parent: RevisionId,
} }
impl CreateDocument { impl CreateDocument {
pub fn apply(self, revision: &mut Revision) { pub fn apply(self, revision: &mut Revision) {
let mut cursor = revision.document_fragments.cursor::<OperationId>(); let mut cursor = revision.document_fragments.cursor::<DocumentId>();
let mut new_document_fragments = cursor.slice(&self.id, Bias::Right, &()); let mut new_document_fragments = cursor.slice(&self.id, Bias::Right, &());
new_document_fragments.push( new_document_fragments.push(
DocumentFragment { DocumentFragment {
@ -63,8 +63,8 @@ impl CreateDocument {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Edit { pub struct Edit {
pub id: OperationId, pub id: OperationId,
pub document_id: OperationId, pub document_id: DocumentId,
pub branch_id: OperationId, pub branch_id: BranchId,
pub parent: RevisionId, pub parent: RevisionId,
pub edits: SmallVec<[(AnchorRange, Arc<str>); 2]>, pub edits: SmallVec<[(AnchorRange, Arc<str>); 2]>,
} }

View file

@ -1,69 +0,0 @@
use crate::{btree::KvStore, Executor, RepoId, Revision, RevisionId};
use collections::HashMap;
use futures::{channel::mpsc, StreamExt};
use parking_lot::Mutex;
use std::sync::Arc;
use util::ResultExt;
#[derive(Clone, Debug)]
pub struct RevisionCache {
repo_id: RepoId,
revisions: Arc<Mutex<HashMap<RevisionId, Revision>>>,
revisions_to_save: mpsc::UnboundedSender<(RevisionId, Revision)>,
}
impl RevisionCache {
pub fn new<E: Executor>(repo_id: RepoId, executor: &E, kv: Arc<dyn KvStore>) -> Self {
let (revisions_to_save_tx, mut revisions_to_save_rx) =
mpsc::unbounded::<(RevisionId, Revision)>();
executor.spawn(async move {
while let Some((revision_id, revision)) = revisions_to_save_rx.next().await {
if !Revision::exists(repo_id, &revision_id, &*kv).await {
revision.save(repo_id, &revision_id, &*kv).await.log_err();
}
}
});
Self {
repo_id,
// Always consider the empty revision as cached.
revisions: Arc::new(Mutex::new(HashMap::from_iter([(
RevisionId::default(),
Revision::default(),
)]))),
revisions_to_save: revisions_to_save_tx,
}
}
pub fn get(&self, revision_id: &RevisionId) -> Option<Revision> {
self.revisions.lock().get(revision_id).cloned()
}
pub async fn load(&self, revision_id: &RevisionId, kv: &dyn KvStore) -> Option<Revision> {
if let Some(revision) = self.get(revision_id) {
Some(revision)
} else if let Some(revision) = Revision::load(self.repo_id, revision_id, kv).await.ok() {
Some(
self.revisions
.lock()
.entry(revision_id.clone())
.or_insert(revision)
.clone(),
)
} else {
None
}
}
pub fn save(&self, revision_id: &RevisionId, revision: Revision) {
self.revisions
.lock()
.entry(revision_id.clone())
.or_insert_with(|| {
let _ = self
.revisions_to_save
.unbounded_send((revision_id.clone(), revision.clone()));
revision
});
}
}

View file

@ -43,6 +43,10 @@ impl Rope {
Self::default() Self::default()
} }
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
Sequence::ptr_eq(&this.chunks, &other.chunks)
}
pub async fn load_root(id: SavedId, kv: &dyn KvStore) -> Result<Self> { pub async fn load_root(id: SavedId, kv: &dyn KvStore) -> Result<Self> {
let chunks = Sequence::load_root(id, kv).await?; let chunks = Sequence::load_root(id, kv).await?;
Ok(Self { chunks }) Ok(Self { chunks })