From 781aa92f26629c1eabd5521524396367af319b1f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 17 May 2021 10:32:59 +0200 Subject: [PATCH 1/3] Don't summarize unnecessary information when resolving anchor --- zed/src/editor/buffer/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/zed/src/editor/buffer/mod.rs b/zed/src/editor/buffer/mod.rs index 6bbebbbf26..27044e1dfe 100644 --- a/zed/src/editor/buffer/mod.rs +++ b/zed/src/editor/buffer/mod.rs @@ -1900,15 +1900,13 @@ impl Buffer { .item() .ok_or_else(|| anyhow!("split offset is out of range"))?; - let mut fragments_cursor = self - .fragments - .cursor::(); + let mut fragments_cursor = self.fragments.cursor::(); fragments_cursor.seek(&FragmentIdRef::new(&split.fragment_id), SeekBias::Left, &()); let fragment = fragments_cursor .item() .ok_or_else(|| anyhow!("fragment id does not exist"))?; - let mut ix = fragments_cursor.start().clone().visible; + let mut ix = *fragments_cursor.start(); if fragment.visible { ix += offset - fragment.range_in_insertion.start; } @@ -2316,7 +2314,7 @@ impl Default for InsertionSplitSummary { impl<'a> sum_tree::Dimension<'a, InsertionSplitSummary> for usize { fn add_summary(&mut self, summary: &InsertionSplitSummary) { - *self += &summary.extent; + *self += summary.extent; } } From 84e0efe5df664dc599c0fe12f94ab629a11fc58b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 17 May 2021 10:33:56 +0200 Subject: [PATCH 2/3] Save a few allocations when pushing to a `SumTree` --- zed/src/sum_tree/mod.rs | 42 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/zed/src/sum_tree/mod.rs b/zed/src/sum_tree/mod.rs index b96b36c49d..90cffaebd6 100644 --- a/zed/src/sum_tree/mod.rs +++ b/zed/src/sum_tree/mod.rs @@ -207,27 +207,23 @@ impl SumTree { pub fn push(&mut self, item: T, ctx: &::Context) { let summary = item.summary(); self.push_tree( - SumTree::from_child_trees( - vec![SumTree(Arc::new(Node::Leaf { - summary: summary.clone(), - items: ArrayVec::from_iter(Some(item)), - item_summaries: ArrayVec::from_iter(Some(summary)), - }))], - ctx, - ), + SumTree(Arc::new(Node::Leaf { + summary: summary.clone(), + items: ArrayVec::from_iter(Some(item)), + item_summaries: ArrayVec::from_iter(Some(summary)), + })), ctx, - ) + ); } pub fn push_tree(&mut self, other: Self, ctx: &::Context) { - let other_node = other.0.clone(); - if !other_node.is_leaf() || other_node.items().len() > 0 { - if self.0.height() < other_node.height() { - for tree in other_node.child_trees() { + if !other.0.is_leaf() || other.0.items().len() > 0 { + if self.0.height() < other.0.height() { + for tree in other.0.child_trees() { self.push_tree(tree.clone(), ctx); } } else if let Some(split_tree) = self.push_tree_recursive(other, ctx) { - *self = Self::from_child_trees(vec![self.clone(), split_tree], ctx); + *self = Self::from_child_trees(self.clone(), split_tree, ctx); } } } @@ -353,20 +349,22 @@ impl SumTree { } fn from_child_trees( - child_trees: Vec>, + left: SumTree, + right: SumTree, ctx: &::Context, ) -> Self { - let height = child_trees[0].0.height() + 1; + let height = left.0.height() + 1; let mut child_summaries = ArrayVec::new(); - for child in &child_trees { - child_summaries.push(child.0.summary().clone()); - } - let summary = sum(child_summaries.iter(), ctx); + child_summaries.push(left.0.summary().clone()); + child_summaries.push(right.0.summary().clone()); + let mut child_trees = ArrayVec::new(); + child_trees.push(left); + child_trees.push(right); SumTree(Arc::new(Node::Internal { height, - summary, + summary: sum(child_summaries.iter(), ctx), child_summaries, - child_trees: ArrayVec::from_iter(child_trees), + child_trees, })) } From a8ece757e4017f28348ba0d18d45b0173a8f3e85 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 18 May 2021 11:31:40 +0200 Subject: [PATCH 3/3] Implement time::Global using a SmallVec Version vectors are going to be small most of the time, especially as soon as we introduce an optimization that will let us only track concurrent versions, thus preventing the version vector from growing indefinitely over time in the tree. --- gpui/Cargo.toml | 2 +- zed/Cargo.toml | 2 +- zed/src/time.rs | 64 +++++++++++++++++++------------------------------ 3 files changed, 26 insertions(+), 42 deletions(-) diff --git a/gpui/Cargo.toml b/gpui/Cargo.toml index d421a5a2f3..1eba8612bf 100644 --- a/gpui/Cargo.toml +++ b/gpui/Cargo.toml @@ -23,7 +23,7 @@ scoped-pool = {path = "../scoped_pool"} seahash = "4.1" serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.64" -smallvec = "1.6.1" +smallvec = {version = "1.6", features = ["union"]} smol = "1.2" tiny-skia = "0.5" tree-sitter = "0.17" diff --git a/zed/Cargo.toml b/zed/Cargo.toml index 2302fc6509..a1749f2474 100644 --- a/zed/Cargo.toml +++ b/zed/Cargo.toml @@ -36,7 +36,7 @@ seahash = "4.1" serde = {version = "1", features = ["derive"]} similar = "1.3" simplelog = "0.9" -smallvec = "1.6.1" +smallvec = {version = "1.6", features = ["union"]} smol = "1.2.5" [dev-dependencies] diff --git a/zed/src/time.rs b/zed/src/time.rs index 4836ed921c..f1cf4eef91 100644 --- a/zed/src/time.rs +++ b/zed/src/time.rs @@ -1,13 +1,9 @@ +use smallvec::SmallVec; use std::cmp::{self, Ordering}; -use std::collections::HashMap; -use std::mem; use std::ops::{Add, AddAssign}; -use std::sync::Arc; - -use lazy_static::lazy_static; pub type ReplicaId = u16; -pub type Seq = u64; +pub type Seq = u32; #[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, Ord, PartialOrd)] pub struct Local { @@ -58,18 +54,8 @@ impl<'a> AddAssign<&'a Local> for Local { } } -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Global(Arc>); - -lazy_static! { - static ref DEFAULT_GLOBAL: Global = Global(Arc::new(HashMap::new())); -} - -impl Default for Global { - fn default() -> Self { - DEFAULT_GLOBAL.clone() - } -} +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct Global(SmallVec<[Local; 3]>); impl Global { pub fn new() -> Self { @@ -77,21 +63,27 @@ impl Global { } pub fn get(&self, replica_id: ReplicaId) -> Seq { - *self.0.get(&replica_id).unwrap_or(&0) + self.0 + .iter() + .find(|t| t.replica_id == replica_id) + .map_or(0, |t| t.value) } pub fn observe(&mut self, timestamp: Local) { - let map = Arc::make_mut(&mut self.0); - let value = map.entry(timestamp.replica_id).or_insert(0); - *value = cmp::max(*value, timestamp.value); + if let Some(entry) = self + .0 + .iter_mut() + .find(|t| t.replica_id == timestamp.replica_id) + { + entry.value = cmp::max(entry.value, timestamp.value); + } else { + self.0.push(timestamp); + } } pub fn observe_all(&mut self, other: &Self) { - for (replica_id, value) in other.0.as_ref() { - self.observe(Local { - replica_id: *replica_id, - value: *value, - }); + for timestamp in other.0.iter() { + self.observe(*timestamp); } } @@ -100,9 +92,7 @@ impl Global { } pub fn changed_since(&self, other: &Self) -> bool { - self.0 - .iter() - .any(|(replica_id, value)| *value > other.get(*replica_id)) + self.0.iter().any(|t| t.value > other.get(t.replica_id)) } } @@ -110,8 +100,10 @@ impl PartialOrd for Global { fn partial_cmp(&self, other: &Self) -> Option { let mut global_ordering = Ordering::Equal; - for replica_id in self.0.keys().chain(other.0.keys()) { - let ordering = self.get(*replica_id).cmp(&other.get(*replica_id)); + for timestamp in self.0.iter().chain(other.0.iter()) { + let ordering = self + .get(timestamp.replica_id) + .cmp(&other.get(timestamp.replica_id)); if ordering != Ordering::Equal { if global_ordering == Ordering::Equal { global_ordering = ordering; @@ -142,12 +134,4 @@ impl Lamport { pub fn observe(&mut self, timestamp: Self) { self.value = cmp::max(self.value, timestamp.value) + 1; } - - pub fn to_bytes(&self) -> [u8; 24] { - let mut bytes = [0; 24]; - bytes[0..8].copy_from_slice(unsafe { &mem::transmute::(self.value.to_be()) }); - bytes[8..10] - .copy_from_slice(unsafe { &mem::transmute::(self.replica_id.to_be()) }); - bytes - } }