Merge pull request #525 from zed-industries/preserve-worktrees
Grow worktrees monotonically when sharing and move most messages to the background
This commit is contained in:
commit
dc5a09b3f7
21 changed files with 662 additions and 380 deletions
|
@ -68,6 +68,7 @@ pub struct Buffer {
|
|||
remote_selections: TreeMap<ReplicaId, SelectionSet>,
|
||||
selections_update_count: usize,
|
||||
diagnostics_update_count: usize,
|
||||
diagnostics_timestamp: clock::Lamport,
|
||||
file_update_count: usize,
|
||||
language_server: Option<LanguageServerState>,
|
||||
completion_triggers: Vec<String>,
|
||||
|
@ -425,23 +426,30 @@ impl Buffer {
|
|||
this.apply_ops(ops, cx)?;
|
||||
|
||||
for selection_set in message.selections {
|
||||
let lamport_timestamp = clock::Lamport {
|
||||
replica_id: selection_set.replica_id as ReplicaId,
|
||||
value: selection_set.lamport_timestamp,
|
||||
};
|
||||
this.remote_selections.insert(
|
||||
selection_set.replica_id as ReplicaId,
|
||||
SelectionSet {
|
||||
selections: proto::deserialize_selections(selection_set.selections),
|
||||
lamport_timestamp: clock::Lamport {
|
||||
replica_id: selection_set.replica_id as ReplicaId,
|
||||
value: selection_set.lamport_timestamp,
|
||||
},
|
||||
lamport_timestamp,
|
||||
},
|
||||
);
|
||||
this.text.lamport_clock.observe(lamport_timestamp);
|
||||
}
|
||||
let snapshot = this.snapshot();
|
||||
let entries = proto::deserialize_diagnostics(message.diagnostics);
|
||||
this.apply_diagnostic_update(
|
||||
DiagnosticSet::from_sorted_entries(entries.into_iter().cloned(), &snapshot),
|
||||
DiagnosticSet::from_sorted_entries(entries.iter().cloned(), &snapshot),
|
||||
clock::Lamport {
|
||||
replica_id: 0,
|
||||
value: message.diagnostics_timestamp,
|
||||
},
|
||||
cx,
|
||||
);
|
||||
|
||||
this.completion_triggers = message.completion_triggers;
|
||||
|
||||
Ok(this)
|
||||
|
@ -470,6 +478,7 @@ impl Buffer {
|
|||
})
|
||||
.collect(),
|
||||
diagnostics: proto::serialize_diagnostics(self.diagnostics.iter()),
|
||||
diagnostics_timestamp: self.diagnostics_timestamp.value,
|
||||
completion_triggers: self.completion_triggers.clone(),
|
||||
}
|
||||
}
|
||||
|
@ -512,6 +521,7 @@ impl Buffer {
|
|||
selections_update_count: 0,
|
||||
diagnostics: Default::default(),
|
||||
diagnostics_update_count: 0,
|
||||
diagnostics_timestamp: Default::default(),
|
||||
file_update_count: 0,
|
||||
language_server: None,
|
||||
completion_triggers: Default::default(),
|
||||
|
@ -1005,11 +1015,12 @@ impl Buffer {
|
|||
drop(edits_since_save);
|
||||
|
||||
let set = DiagnosticSet::new(sanitized_diagnostics, content);
|
||||
self.apply_diagnostic_update(set.clone(), cx);
|
||||
let lamport_timestamp = self.text.lamport_clock.tick();
|
||||
self.apply_diagnostic_update(set.clone(), lamport_timestamp, cx);
|
||||
|
||||
let op = Operation::UpdateDiagnostics {
|
||||
diagnostics: set.iter().cloned().collect(),
|
||||
lamport_timestamp: self.text.lamport_clock.tick(),
|
||||
lamport_timestamp,
|
||||
};
|
||||
self.send_operation(op, cx);
|
||||
Ok(())
|
||||
|
@ -1288,6 +1299,13 @@ impl Buffer {
|
|||
self.text.wait_for_edits(edit_ids)
|
||||
}
|
||||
|
||||
pub fn wait_for_anchors<'a>(
|
||||
&mut self,
|
||||
anchors: impl IntoIterator<Item = &'a Anchor>,
|
||||
) -> impl Future<Output = ()> {
|
||||
self.text.wait_for_anchors(anchors)
|
||||
}
|
||||
|
||||
pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future<Output = ()> {
|
||||
self.text.wait_for_version(version)
|
||||
}
|
||||
|
@ -1672,11 +1690,12 @@ impl Buffer {
|
|||
}
|
||||
Operation::UpdateDiagnostics {
|
||||
diagnostics: diagnostic_set,
|
||||
..
|
||||
lamport_timestamp,
|
||||
} => {
|
||||
let snapshot = self.snapshot();
|
||||
self.apply_diagnostic_update(
|
||||
DiagnosticSet::from_sorted_entries(diagnostic_set.iter().cloned(), &snapshot),
|
||||
lamport_timestamp,
|
||||
cx,
|
||||
);
|
||||
}
|
||||
|
@ -1710,11 +1729,20 @@ impl Buffer {
|
|||
}
|
||||
}
|
||||
|
||||
fn apply_diagnostic_update(&mut self, diagnostics: DiagnosticSet, cx: &mut ModelContext<Self>) {
|
||||
self.diagnostics = diagnostics;
|
||||
self.diagnostics_update_count += 1;
|
||||
cx.notify();
|
||||
cx.emit(Event::DiagnosticsUpdated);
|
||||
fn apply_diagnostic_update(
|
||||
&mut self,
|
||||
diagnostics: DiagnosticSet,
|
||||
lamport_timestamp: clock::Lamport,
|
||||
cx: &mut ModelContext<Self>,
|
||||
) {
|
||||
if lamport_timestamp > self.diagnostics_timestamp {
|
||||
self.diagnostics = diagnostics;
|
||||
self.diagnostics_timestamp = lamport_timestamp;
|
||||
self.diagnostics_update_count += 1;
|
||||
self.text.lamport_clock.observe(lamport_timestamp);
|
||||
cx.notify();
|
||||
cx.emit(Event::DiagnosticsUpdated);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
|
|
|
@ -25,7 +25,13 @@ pub fn serialize_operation(operation: &Operation) -> proto::Operation {
|
|||
replica_id: undo.id.replica_id as u32,
|
||||
local_timestamp: undo.id.value,
|
||||
lamport_timestamp: lamport_timestamp.value,
|
||||
ranges: undo.ranges.iter().map(serialize_range).collect(),
|
||||
version: From::from(&undo.version),
|
||||
transaction_ranges: undo
|
||||
.transaction_ranges
|
||||
.iter()
|
||||
.map(serialize_range)
|
||||
.collect(),
|
||||
transaction_version: From::from(&undo.transaction_version),
|
||||
counts: undo
|
||||
.counts
|
||||
.iter()
|
||||
|
@ -35,7 +41,6 @@ pub fn serialize_operation(operation: &Operation) -> proto::Operation {
|
|||
count: *count,
|
||||
})
|
||||
.collect(),
|
||||
version: From::from(&undo.version),
|
||||
}),
|
||||
Operation::UpdateSelections {
|
||||
selections,
|
||||
|
@ -183,6 +188,7 @@ pub fn deserialize_operation(message: proto::Operation) -> Result<Operation> {
|
|||
replica_id: undo.replica_id as ReplicaId,
|
||||
value: undo.local_timestamp,
|
||||
},
|
||||
version: undo.version.into(),
|
||||
counts: undo
|
||||
.counts
|
||||
.into_iter()
|
||||
|
@ -196,8 +202,12 @@ pub fn deserialize_operation(message: proto::Operation) -> Result<Operation> {
|
|||
)
|
||||
})
|
||||
.collect(),
|
||||
ranges: undo.ranges.into_iter().map(deserialize_range).collect(),
|
||||
version: undo.version.into(),
|
||||
transaction_ranges: undo
|
||||
.transaction_ranges
|
||||
.into_iter()
|
||||
.map(deserialize_range)
|
||||
.collect(),
|
||||
transaction_version: undo.transaction_version.into(),
|
||||
},
|
||||
}),
|
||||
proto::operation::Variant::UpdateSelections(message) => {
|
||||
|
|
|
@ -12,7 +12,7 @@ use std::{
|
|||
time::{Duration, Instant},
|
||||
};
|
||||
use unindent::Unindent as _;
|
||||
use util::test::Network;
|
||||
use util::{post_inc, test::Network};
|
||||
|
||||
#[cfg(test)]
|
||||
#[ctor::ctor]
|
||||
|
@ -1194,6 +1194,7 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) {
|
|||
|
||||
let mut now = Instant::now();
|
||||
let mut mutation_count = operations;
|
||||
let mut next_diagnostic_id = 0;
|
||||
let mut active_selections = BTreeMap::default();
|
||||
loop {
|
||||
let replica_index = rng.gen_range(0..replica_ids.len());
|
||||
|
@ -1234,7 +1235,27 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) {
|
|||
});
|
||||
mutation_count -= 1;
|
||||
}
|
||||
40..=49 if replica_ids.len() < max_peers => {
|
||||
40..=49 if mutation_count != 0 && replica_id == 0 => {
|
||||
let entry_count = rng.gen_range(1..=5);
|
||||
buffer.update(cx, |buffer, cx| {
|
||||
let diagnostics = (0..entry_count)
|
||||
.map(|_| {
|
||||
let range = buffer.random_byte_range(0, &mut rng);
|
||||
DiagnosticEntry {
|
||||
range,
|
||||
diagnostic: Diagnostic {
|
||||
message: post_inc(&mut next_diagnostic_id).to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
log::info!("peer {} setting diagnostics: {:?}", replica_id, diagnostics);
|
||||
buffer.update_diagnostics(diagnostics, None, cx).unwrap();
|
||||
});
|
||||
mutation_count -= 1;
|
||||
}
|
||||
50..=59 if replica_ids.len() < max_peers => {
|
||||
let old_buffer = buffer.read(cx).to_proto();
|
||||
let new_replica_id = replica_ids.len() as ReplicaId;
|
||||
log::info!(
|
||||
|
@ -1251,14 +1272,14 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) {
|
|||
replica_ids.push(new_replica_id);
|
||||
network.replicate(replica_id, new_replica_id);
|
||||
}
|
||||
50..=69 if mutation_count != 0 => {
|
||||
60..=69 if mutation_count != 0 => {
|
||||
buffer.update(cx, |buffer, cx| {
|
||||
buffer.randomly_undo_redo(&mut rng, cx);
|
||||
log::info!("buffer {} text: {:?}", buffer.replica_id(), buffer.text());
|
||||
});
|
||||
mutation_count -= 1;
|
||||
}
|
||||
70..=99 if network.has_unreceived(replica_id) => {
|
||||
_ if network.has_unreceived(replica_id) => {
|
||||
let ops = network
|
||||
.receive(replica_id)
|
||||
.into_iter()
|
||||
|
@ -1295,15 +1316,25 @@ fn test_random_collaboration(cx: &mut MutableAppContext, mut rng: StdRng) {
|
|||
}
|
||||
}
|
||||
|
||||
let first_buffer = buffers[0].read(cx);
|
||||
let first_buffer = buffers[0].read(cx).snapshot();
|
||||
for buffer in &buffers[1..] {
|
||||
let buffer = buffer.read(cx);
|
||||
let buffer = buffer.read(cx).snapshot();
|
||||
assert_eq!(
|
||||
buffer.text(),
|
||||
first_buffer.text(),
|
||||
"Replica {} text != Replica 0 text",
|
||||
buffer.replica_id()
|
||||
);
|
||||
assert_eq!(
|
||||
buffer
|
||||
.diagnostics_in_range::<_, usize>(0..buffer.len())
|
||||
.collect::<Vec<_>>(),
|
||||
first_buffer
|
||||
.diagnostics_in_range::<_, usize>(0..first_buffer.len())
|
||||
.collect::<Vec<_>>(),
|
||||
"Replica {} diagnostics != Replica 0 diagnostics",
|
||||
buffer.replica_id()
|
||||
);
|
||||
}
|
||||
|
||||
for buffer in &buffers {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue