From a74c5073a480c9c6133e64bdc29bac2aba0b9fc1 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 2 Jan 2023 17:24:03 -0800 Subject: [PATCH 01/60] Start work on allowing random collab test to be minimized Represent operations as an explicit enum. --- crates/collab/src/tests.rs | 55 +- .../src/tests/randomized_integration_tests.rs | 576 ++++++++++++------ crates/gpui/src/app/test_app_context.rs | 1 + 3 files changed, 439 insertions(+), 193 deletions(-) diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs index 8dc29f3d60..8b52c7ddcf 100644 --- a/crates/collab/src/tests.rs +++ b/crates/collab/src/tests.rs @@ -21,8 +21,9 @@ use parking_lot::Mutex; use project::{Project, WorktreeId}; use settings::Settings; use std::{ + cell::{Ref, RefCell, RefMut}, env, - ops::Deref, + ops::{Deref, DerefMut}, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, @@ -218,13 +219,10 @@ impl TestServer { let client = TestClient { client, username: name.to_string(), - local_projects: Default::default(), - remote_projects: Default::default(), - next_root_dir_id: 0, + state: Default::default(), user_store, fs, language_registry: Arc::new(LanguageRegistry::test()), - buffers: Default::default(), }; client.wait_for_current_user(cx).await; client @@ -323,13 +321,18 @@ impl Drop for TestServer { struct TestClient { client: Arc, username: String, - local_projects: Vec>, - remote_projects: Vec>, - next_root_dir_id: usize, + state: RefCell, pub user_store: ModelHandle, language_registry: Arc, fs: Arc, +} + +#[derive(Default)] +struct TestClientState { + local_projects: Vec>, + remote_projects: Vec>, buffers: HashMap, HashSet>>, + next_root_dir_id: usize, } impl Deref for TestClient { @@ -367,6 +370,38 @@ impl TestClient { .await; } + fn local_projects<'a>(&'a self) -> impl Deref>> + 'a { + Ref::map(self.state.borrow(), |state| &state.local_projects) + } + + fn remote_projects<'a>(&'a self) -> impl Deref>> + 'a { + Ref::map(self.state.borrow(), |state| &state.remote_projects) + } + + fn local_projects_mut<'a>(&'a self) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects) + } + + fn remote_projects_mut<'a>(&'a self) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects) + } + + fn buffers_for_project<'a>( + &'a self, + project: &ModelHandle, + ) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| { + state.buffers.entry(project.clone()).or_default() + }) + } + + fn buffers<'a>( + &'a self, + ) -> impl DerefMut, HashSet>>> + 'a + { + RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers) + } + fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary { self.user_store.read_with(cx, |store, _| ContactsSummary { current: store @@ -449,11 +484,11 @@ impl TestClient { }) } - fn create_new_root_dir(&mut self) -> PathBuf { + fn create_new_root_dir(&self) -> PathBuf { format!( "/{}-root-{}", self.username, - util::post_inc(&mut self.next_root_dir_id) + util::post_inc(&mut self.state.borrow_mut().next_root_dir_id) ) .into() } diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a42d4f7d32..d9d1c1c8e4 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1,5 +1,5 @@ use crate::{ - db::{self, NewUserParams}, + db::{self, NewUserParams, UserId}, rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, tests::{TestClient, TestServer}, }; @@ -15,16 +15,190 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project}; use rand::prelude::*; -use std::{env, path::PathBuf, sync::Arc}; +use std::{env, path::PathBuf, rc::Rc, sync::Arc}; + +struct TestPlan { + rng: StdRng, + allow_server_restarts: bool, + allow_client_reconnection: bool, + allow_client_disconnection: bool, +} + +#[derive(Debug)] +enum Operation { + AddConnection { + user_id: UserId, + }, + RemoveConnection { + user_id: UserId, + }, + BounceConnection { + user_id: UserId, + }, + RestartServer, + RunUntilParked, + MutateClient { + user_id: UserId, + operation: ClientOperation, + }, +} + +#[derive(Debug)] +enum ClientOperation { + AcceptIncomingCall, + RejectIncomingCall, + LeaveCall, + InviteContactToCall { user_id: UserId }, + OpenLocalProject { root: PathBuf }, + OpenRemoteProject { host_id: UserId, root: String }, + AddWorktreeToProject { id: u64, new_path: PathBuf }, + CloseProject { id: u64 }, +} + +impl TestPlan { + fn next_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + offline_users: &[(UserId, String)], + ) -> Operation { + let operation = loop { + break match self.rng.gen_range(0..100) { + 0..=9 if !offline_users.is_empty() => { + let user_id = offline_users[self.rng.gen_range(0..offline_users.len())].0; + Operation::AddConnection { user_id } + } + 10..=14 if clients.len() > 1 && self.allow_client_disconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::RemoveConnection { user_id } + } + 15..=19 if clients.len() > 1 && self.allow_client_reconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::BounceConnection { user_id } + } + 20..=24 if self.allow_server_restarts => Operation::RestartServer, + 25..=29 => Operation::RunUntilParked, + _ if !clients.is_empty() => { + let ix = self.rng.gen_range(0..clients.len()); + let (client, cx) = &clients[ix]; + let user_id = client.current_user_id(cx); + let operation = self.next_client_operation(clients, ix); + Operation::MutateClient { user_id, operation } + } + _ => continue, + }; + }; + operation + } + + fn next_client_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + client_ix: usize, + ) -> ClientOperation { + let (client, cx) = &clients[client_ix]; + let call = cx.read(ActiveCall::global); + + loop { + match self.rng.gen_range(0..100) { + // Respond to an incoming call + 0..=19 => { + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + return if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; + } + } + + // Invite a contact to the current call + 20..=29 => { + let available_contacts = client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + return ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } + } + + // Leave the current call + 30..=39 => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + return ClientOperation::LeaveCall; + } + } + + // Open a remote project + 40..=49 => { + if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + let remote_projects = room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| { + participant.projects.iter().map(|project| { + ( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + ) + }) + }) + .collect::>() + }); + if !remote_projects.is_empty() { + let (host_id, root) = + remote_projects.choose(&mut self.rng).unwrap().clone(); + return ClientOperation::OpenRemoteProject { host_id, root }; + } + } + } + + // Open a local project + 50..=59 => { + let root = client.create_new_root_dir(); + return ClientOperation::OpenLocalProject { root }; + } + + // Add a worktree to a local project + 60..=69 if !client.local_projects().is_empty() => { + let project = client + .local_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + + // let paths = client.fs.paths().await; + // let path = paths.choose(&mut self.rng).unwrap(); + + // if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + // // + // } + } + + _ => continue, + }; + } + } +} #[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, - rng: StdRng, + mut rng: StdRng, ) { deterministic.forbid_parking(); - let rng = Arc::new(Mutex::new(rng)); let max_peers = env::var("MAX_PEERS") .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) @@ -56,6 +230,13 @@ async fn test_random_collaboration( available_users.push((user_id, username)); } + let plan = Arc::new(Mutex::new(TestPlan { + allow_server_restarts: rng.gen_bool(0.7), + allow_client_reconnection: rng.gen_bool(0.7), + allow_client_disconnection: rng.gen_bool(0.1), + rng, + })); + for (ix, (user_id_a, _)) in available_users.iter().enumerate() { for (user_id_b, _) in &available_users[ix + 1..] { server @@ -74,20 +255,19 @@ async fn test_random_collaboration( } let mut clients = Vec::new(); - let mut user_ids = Vec::new(); + let mut client_tasks = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; - let allow_server_restarts = rng.lock().gen_bool(0.7); - let allow_client_reconnection = rng.lock().gen_bool(0.7); - let allow_client_disconnection = rng.lock().gen_bool(0.1); - let mut operations = 0; - while operations < max_operations { - let distribution = rng.lock().gen_range(0..100); - match distribution { - 0..=19 if !available_users.is_empty() => { - let client_ix = rng.lock().gen_range(0..available_users.len()); - let (_, username) = available_users.remove(client_ix); + for _ in 0..max_operations { + let next_operation = plan.lock().next_operation(&clients, &available_users); + match next_operation { + Operation::AddConnection { user_id } => { + let user_ix = available_users + .iter() + .position(|(id, _)| *id == user_id) + .unwrap(); + let (_, username) = available_users.remove(user_ix); log::info!("Adding new connection for {}", username); next_entity_id += 100000; let mut client_cx = TestAppContext::new( @@ -102,47 +282,45 @@ async fn test_random_collaboration( ); let op_start_signal = futures::channel::mpsc::unbounded(); - let client = server.create_client(&mut client_cx, &username).await; - user_ids.push(client.current_user_id(&client_cx)); + let client = Rc::new(server.create_client(&mut client_cx, &username).await); op_start_signals.push(op_start_signal.0); - clients.push(client_cx.foreground().spawn(simulate_client( + clients.push((client.clone(), client_cx.clone())); + client_tasks.push(client_cx.foreground().spawn(simulate_client( client, op_start_signal.1, - allow_client_disconnection, - rng.clone(), + plan.clone(), client_cx, ))); log::info!("Added connection for {}", username); - operations += 1; } - 20..=24 if clients.len() > 1 && allow_client_disconnection => { - let client_ix = rng.lock().gen_range(1..clients.len()); - log::info!( - "Simulating full disconnection of user {}", - user_ids[client_ix] - ); - let removed_user_id = user_ids.remove(client_ix); + Operation::RemoveConnection { user_id } => { + log::info!("Simulating full disconnection of user {}", user_id); + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); let user_connection_ids = server .connection_pool .lock() - .user_connection_ids(removed_user_id) + .user_connection_ids(user_id) .collect::>(); assert_eq!(user_connection_ids.len(), 1); let removed_peer_id = user_connection_ids[0].into(); - let client = clients.remove(client_ix); + let (client, mut client_cx) = clients.remove(client_ix); + let client_task = client_tasks.remove(client_ix); op_start_signals.remove(client_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); deterministic.start_waiting(); - log::info!("Waiting for user {} to exit...", removed_user_id); - let (client, mut client_cx) = client.await; + log::info!("Waiting for user {} to exit...", user_id); + client_task.await; deterministic.finish_waiting(); server.allow_connections(); - for project in &client.remote_projects { + for project in client.remote_projects().iter() { project.read_with(&client_cx, |project, _| { assert!( project.is_read_only(), @@ -151,14 +329,20 @@ async fn test_random_collaboration( ) }); } - for user_id in &user_ids { - let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); + + for (client, cx) in &clients { + let contacts = server + .app_state + .db + .get_contacts(client.current_user_id(cx)) + .await + .unwrap(); let pool = server.connection_pool.lock(); for contact in contacts { - if let db::Contact::Accepted { user_id, .. } = contact { - if pool.is_user_online(user_id) { + if let db::Contact::Accepted { user_id: id, .. } = contact { + if pool.is_user_online(id) { assert_ne!( - user_id, removed_user_id, + id, user_id, "removed client is still a contact of another peer" ); } @@ -167,18 +351,14 @@ async fn test_random_collaboration( } log::info!("{} removed", client.username); - available_users.push((removed_user_id, client.username.clone())); + available_users.push((user_id, client.username.clone())); client_cx.update(|cx| { cx.clear_globals(); drop(client); }); - - operations += 1; } - 25..=29 if clients.len() > 1 && allow_client_reconnection => { - let client_ix = rng.lock().gen_range(1..clients.len()); - let user_id = user_ids[client_ix]; + Operation::BounceConnection { user_id } => { log::info!("Simulating temporary disconnection of user {}", user_id); let user_connection_ids = server .connection_pool @@ -189,10 +369,9 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - operations += 1; } - 30..=34 if allow_server_restarts => { + Operation::RestartServer => { log::info!("Simulating server restart"); server.reset().await; deterministic.advance_clock(RECEIVE_TIMEOUT); @@ -208,39 +387,41 @@ async fn test_random_collaboration( assert_eq!(stale_room_ids, vec![]); } - _ if !op_start_signals.is_empty() => { - while operations < max_operations && rng.lock().gen_bool(0.7) { - op_start_signals - .choose(&mut *rng.lock()) - .unwrap() - .unbounded_send(()) - .unwrap(); - operations += 1; - } - - if rng.lock().gen_bool(0.8) { - deterministic.run_until_parked(); - } + Operation::RunUntilParked => { + deterministic.run_until_parked(); + } + + Operation::MutateClient { user_id, operation } => { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); + op_start_signals[client_ix] + .unbounded_send(operation) + .unwrap(); } - _ => {} } } drop(op_start_signals); deterministic.start_waiting(); - let clients = futures::future::join_all(clients).await; + futures::future::join_all(client_tasks).await; deterministic.finish_waiting(); deterministic.run_until_parked(); for (client, client_cx) in &clients { - for guest_project in &client.remote_projects { + for guest_project in client.remote_projects().iter() { guest_project.read_with(client_cx, |guest_project, cx| { let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == guest_project.remote_id() - }) - })?; + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == guest_project.remote_id() + }) + })? + .clone(); Some((project, cx)) }); @@ -305,7 +486,8 @@ async fn test_random_collaboration( }); } - for (guest_project, guest_buffers) in &client.buffers { + let buffers = client.buffers().clone(); + for (guest_project, guest_buffers) in &buffers { let project_id = if guest_project.read_with(client_cx, |project, _| { project.is_local() || project.is_read_only() }) { @@ -318,11 +500,15 @@ async fn test_random_collaboration( let guest_user_id = client.user_id().unwrap(); let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == Some(project_id) - }) - })?; + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == Some(project_id) + }) + })? + .clone(); Some((client.user_id().unwrap(), project, cx)) }); @@ -398,12 +584,11 @@ async fn test_random_collaboration( } async fn simulate_client( - mut client: TestClient, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - can_hang_up: bool, - rng: Arc>, + client: Rc, + mut op_start_signal: futures::channel::mpsc::UnboundedReceiver, + plan: Arc>, mut cx: TestAppContext, -) -> (TestClient, TestAppContext) { +) { // Setup language server let mut language = Language::new( LanguageConfig { @@ -418,7 +603,7 @@ async fn simulate_client( name: "the-fake-language-server", capabilities: lsp::LanguageServer::full_capabilities(), initializer: Some(Box::new({ - let rng = rng.clone(); + let plan = plan.clone(); let fs = client.fs.clone(); move |fake_server: &mut FakeLanguageServer| { fake_server.handle_request::( @@ -460,16 +645,16 @@ async fn simulate_client( fake_server.handle_request::({ let fs = fs.clone(); - let rng = rng.clone(); + let plan = plan.clone(); move |_, _| { let fs = fs.clone(); - let rng = rng.clone(); + let plan = plan.clone(); async move { let files = fs.files().await; - let mut rng = rng.lock(); - let count = rng.gen_range::(1..3); + let mut plan = plan.lock(); + let count = plan.rng.gen_range::(1..3); let files = (0..count) - .map(|_| files.choose(&mut *rng).unwrap()) + .map(|_| files.choose(&mut plan.rng).unwrap()) .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( @@ -486,16 +671,16 @@ async fn simulate_client( }); fake_server.handle_request::({ - let rng = rng.clone(); + let plan = plan.clone(); move |_, _| { let mut highlights = Vec::new(); - let highlight_count = rng.lock().gen_range(1..=5); + let highlight_count = plan.lock().rng.gen_range(1..=5); for _ in 0..highlight_count { - let start_row = rng.lock().gen_range(0..100); - let start_column = rng.lock().gen_range(0..100); + let start_row = plan.lock().rng.gen_range(0..100); + let start_column = plan.lock().rng.gen_range(0..100); let start = PointUtf16::new(start_row, start_column); - let end_row = rng.lock().gen_range(0..100); - let end_column = rng.lock().gen_range(0..100); + let end_row = plan.lock().rng.gen_range(0..100); + let end_column = plan.lock().rng.gen_range(0..100); let end = PointUtf16::new(end_row, end_column); let range = if start > end { end..start } else { start..end }; highlights.push(lsp::DocumentHighlight { @@ -517,50 +702,62 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while op_start_signal.next().await.is_some() { - if let Err(error) = - randomly_mutate_client(&mut client, can_hang_up, rng.clone(), &mut cx).await - { + if let Err(error) = randomly_mutate_client(&client, plan.clone(), &mut cx).await { log::error!("{} error: {:?}", client.username, error); } cx.background().simulate_random_delay().await; } log::info!("{}: done", client.username); - - (client, cx) } +// async fn apply_client_operation( +// client: &mut TestClient, +// plan: Arc>, +// operation: ClientOperation, +// cx: &mut TestAppContext, +// ) -> Result<()> { +// match operation { +// ClientOperation::AcceptIncomingCall => todo!(), +// ClientOperation::RejectIncomingCall => todo!(), +// ClientOperation::OpenLocalProject { path } => todo!(), +// ClientOperation::AddWorktreeToProject { +// existing_path, +// new_path, +// } => todo!(), +// ClientOperation::CloseProject { existing_path } => todo!(), +// } +// } + async fn randomly_mutate_client( - client: &mut TestClient, - can_hang_up: bool, - rng: Arc>, + client: &Rc, + plan: Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { - 0..=19 => randomly_mutate_active_call(client, can_hang_up, &rng, cx).await?, - 20..=49 => randomly_mutate_projects(client, &rng, cx).await?, - 50..=59 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { - randomly_mutate_worktrees(client, &rng, cx).await?; + 0..=19 => randomly_mutate_active_call(client, &plan, cx).await?, + 20..=49 => randomly_mutate_projects(client, &plan, cx).await?, + 50..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { + randomly_mutate_worktrees(client, &plan, cx).await?; } - 60..=84 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { - randomly_query_and_mutate_buffers(client, &rng, cx).await?; + 60..=84 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { + randomly_query_and_mutate_buffers(client, &plan, cx).await?; } - _ => randomly_mutate_fs(client, &rng).await, + _ => randomly_mutate_fs(client, &plan).await, } Ok(()) } async fn randomly_mutate_active_call( - client: &mut TestClient, - can_hang_up: bool, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if rng.lock().gen_bool(0.7) { + if plan.lock().rng.gen_bool(0.7) { log::info!("{}: accepting incoming call", client.username); active_call .update(cx, |call, cx| call.accept_incoming(cx)) @@ -579,10 +776,10 @@ async fn randomly_mutate_active_call( .collect::>() }); - let distribution = rng.lock().gen_range(0..100); + let distribution = plan.lock().rng.gen_range(0..100); match distribution { 0..=29 if !available_contacts.is_empty() => { - let contact = available_contacts.choose(&mut *rng.lock()).unwrap(); + let contact = available_contacts.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: inviting {}", client.username, @@ -593,7 +790,8 @@ async fn randomly_mutate_active_call( .await?; } 30..=39 - if can_hang_up && active_call.read_with(cx, |call, _| call.room().is_some()) => + if plan.lock().allow_client_disconnection + && active_call.read_with(cx, |call, _| call.room().is_some()) => { log::info!("{}: hanging up", client.username); active_call.update(cx, |call, cx| call.hang_up(cx))?; @@ -605,16 +803,16 @@ async fn randomly_mutate_active_call( Ok(()) } -async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex) { - let is_dir = rng.lock().gen::(); +async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { + let is_dir = plan.lock().rng.gen::(); let mut new_path = client .fs .directories() .await - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) .unwrap() .clone(); - new_path.push(gen_file_name(rng)); + new_path.push(gen_file_name(&mut plan.lock().rng)); if is_dir { log::info!("{}: creating local dir at {:?}", client.username, new_path); client.fs.create_dir(&new_path).await.unwrap(); @@ -630,8 +828,8 @@ async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex) { } async fn randomly_mutate_projects( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { let active_call = cx.read(ActiveCall::global); @@ -647,10 +845,10 @@ async fn randomly_mutate_projects( Default::default() }; - let project = if remote_projects.is_empty() || rng.lock().gen() { - if client.local_projects.is_empty() || rng.lock().gen() { + let project = if remote_projects.is_empty() || plan.lock().rng.gen() { + if client.local_projects().is_empty() || plan.lock().rng.gen() { let paths = client.fs.paths().await; - let local_project = if paths.is_empty() || rng.lock().gen() { + let local_project = if paths.is_empty() || plan.lock().rng.gen() { let root_path = client.create_new_root_dir(); client.fs.create_dir(&root_path).await.unwrap(); client @@ -665,7 +863,7 @@ async fn randomly_mutate_projects( ); client.build_local_project(root_path, cx).await.0 } else { - let root_path = paths.choose(&mut *rng.lock()).unwrap(); + let root_path = paths.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: opening local project at {:?}", client.username, @@ -673,25 +871,29 @@ async fn randomly_mutate_projects( ); client.build_local_project(root_path, cx).await.0 }; - client.local_projects.push(local_project.clone()); + client.local_projects_mut().push(local_project.clone()); local_project } else { client - .local_projects - .choose(&mut *rng.lock()) + .local_projects() + .choose(&mut plan.lock().rng) .unwrap() .clone() } } else { - if client.remote_projects.is_empty() || rng.lock().gen() { - let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id; - let remote_project = if let Some(project) = - client.remote_projects.iter().find(|project| { + if client.remote_projects().is_empty() || plan.lock().rng.gen() { + let remote_project_id = remote_projects.choose(&mut plan.lock().rng).unwrap().id; + let remote_projects = client.remote_projects().clone(); + let remote_project = if let Some(project) = remote_projects + .iter() + .find(|project| { project.read_with(cx, |project, _| { project.remote_id() == Some(remote_project_id) }) - }) { - project.clone() + }) + .cloned() + { + project } else { log::info!( "{}: opening remote project {}", @@ -710,15 +912,15 @@ async fn randomly_mutate_projects( ) }) .await?; - client.remote_projects.push(remote_project.clone()); + client.remote_projects_mut().push(remote_project.clone()); remote_project }; remote_project } else { client - .remote_projects - .choose(&mut *rng.lock()) + .remote_projects() + .choose(&mut plan.lock().rng) .unwrap() .clone() } @@ -740,11 +942,11 @@ async fn randomly_mutate_projects( } } - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { 0..=19 if project.read_with(cx, |project, _| project.is_local()) => { let paths = client.fs.paths().await; - let path = paths.choose(&mut *rng.lock()).unwrap(); + let path = paths.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: finding/creating local worktree for path {:?}", client.username, @@ -766,9 +968,9 @@ async fn randomly_mutate_projects( cx.update(|_| { client - .remote_projects + .remote_projects_mut() .retain(|remote_project| *remote_project != project); - client.buffers.remove(&project); + client.buffers().remove(&project); drop(project); }); } @@ -779,11 +981,11 @@ async fn randomly_mutate_projects( } async fn randomly_mutate_worktrees( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let project = choose_random_project(client, rng).unwrap(); + let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); let Some(worktree) = project.read_with(cx, |project, cx| { project .worktrees(cx) @@ -793,7 +995,7 @@ async fn randomly_mutate_worktrees( && worktree.entries(false).any(|e| e.is_file()) && worktree.root_entry().map_or(false, |e| e.is_dir()) }) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) }) else { return Ok(()) }; @@ -802,9 +1004,9 @@ async fn randomly_mutate_worktrees( (worktree.id(), worktree.root_name().to_string()) }); - let is_dir = rng.lock().gen::(); + let is_dir = plan.lock().rng.gen::(); let mut new_path = PathBuf::new(); - new_path.push(gen_file_name(rng)); + new_path.push(gen_file_name(&mut plan.lock().rng)); if !is_dir { new_path.set_extension("rs"); } @@ -825,13 +1027,13 @@ async fn randomly_mutate_worktrees( } async fn randomly_query_and_mutate_buffers( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let project = choose_random_project(client, rng).unwrap(); - let buffers = client.buffers.entry(project.clone()).or_default(); - let buffer = if buffers.is_empty() || rng.lock().gen() { + let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); + let has_buffers_for_project = !client.buffers_for_project(&project).is_empty(); + let buffer = if !has_buffers_for_project || plan.lock().rng.gen() { let Some(worktree) = project.read_with(cx, |project, cx| { project .worktrees(cx) @@ -839,7 +1041,7 @@ async fn randomly_query_and_mutate_buffers( let worktree = worktree.read(cx); worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) }) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) }) else { return Ok(()); }; @@ -848,7 +1050,7 @@ async fn randomly_query_and_mutate_buffers( let entry = worktree .entries(false) .filter(|e| e.is_file()) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) .unwrap(); ( worktree.root_name().to_string(), @@ -875,13 +1077,18 @@ async fn randomly_query_and_mutate_buffers( worktree_root_name, buffer.read_with(cx, |buffer, _| buffer.remote_id()) ); - buffers.insert(buffer.clone()); + client.buffers_for_project(&project).insert(buffer.clone()); buffer } else { - buffers.iter().choose(&mut *rng.lock()).unwrap().clone() + client + .buffers_for_project(&project) + .iter() + .choose(&mut plan.lock().rng) + .unwrap() + .clone() }; - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { 0..=9 => { cx.update(|cx| { @@ -890,7 +1097,7 @@ async fn randomly_query_and_mutate_buffers( client.username, buffer.read(cx).file().unwrap().full_path(cx) ); - buffers.remove(&buffer); + client.buffers_for_project(&project).remove(&buffer); drop(buffer); }); } @@ -902,7 +1109,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.completions(&buffer, offset, cx) }); let completions = cx.background().spawn(async move { @@ -910,7 +1117,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("completions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching completions request", client.username); cx.update(|cx| completions.detach_and_log_err(cx)); } else { @@ -925,7 +1132,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); + let range = buffer.read(cx).random_byte_range(0, &mut plan.lock().rng); project.code_actions(&buffer, range, cx) }); let code_actions = cx.background().spawn(async move { @@ -933,7 +1140,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("code actions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching code actions request", client.username); cx.update(|cx| code_actions.detach_and_log_err(cx)); } else { @@ -957,7 +1164,7 @@ async fn randomly_query_and_mutate_buffers( assert!(saved_version.observed_all(&requested_version)); Ok::<_, anyhow::Error>(()) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching save request", client.username); cx.update(|cx| save.detach_and_log_err(cx)); } else { @@ -972,7 +1179,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.prepare_rename(buffer, offset, cx) }); let prepare_rename = cx.background().spawn(async move { @@ -980,7 +1187,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching prepare rename request", client.username); cx.update(|cx| prepare_rename.detach_and_log_err(cx)); } else { @@ -995,7 +1202,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.definition(&buffer, offset, cx) }); let definitions = cx.background().spawn(async move { @@ -1003,11 +1210,14 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("definitions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching definitions request", client.username); cx.update(|cx| definitions.detach_and_log_err(cx)); } else { - buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer)); + let definitions = definitions.await?; + client + .buffers_for_project(&project) + .extend(definitions.into_iter().map(|loc| loc.target.buffer)); } } 50..=54 => { @@ -1018,7 +1228,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.document_highlights(&buffer, offset, cx) }); let highlights = cx.background().spawn(async move { @@ -1026,7 +1236,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("highlights request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching highlights request", client.username); cx.update(|cx| highlights.detach_and_log_err(cx)); } else { @@ -1035,7 +1245,7 @@ async fn randomly_query_and_mutate_buffers( } 55..=59 => { let search = project.update(cx, |project, cx| { - let query = rng.lock().gen_range('a'..='z'); + let query = plan.lock().rng.gen_range('a'..='z'); log::info!("{}: project-wide search {:?}", client.username, query); project.search(SearchQuery::text(query, false, false), cx) }); @@ -1044,11 +1254,14 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("search request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching search request", client.username); cx.update(|cx| search.detach_and_log_err(cx)); } else { - buffers.extend(search.await?.into_keys()); + let search = search.await?; + client + .buffers_for_project(&project) + .extend(search.into_keys()); } } _ => { @@ -1059,10 +1272,10 @@ async fn randomly_query_and_mutate_buffers( buffer.remote_id(), buffer.file().unwrap().full_path(cx) ); - if rng.lock().gen_bool(0.7) { - buffer.randomly_edit(&mut *rng.lock(), 5, cx); + if plan.lock().rng.gen_bool(0.7) { + buffer.randomly_edit(&mut plan.lock().rng, 5, cx); } else { - buffer.randomly_undo_redo(&mut *rng.lock(), cx); + buffer.randomly_undo_redo(&mut plan.lock().rng, cx); } }); } @@ -1071,22 +1284,19 @@ async fn randomly_query_and_mutate_buffers( Ok(()) } -fn choose_random_project( - client: &mut TestClient, - rng: &Mutex, -) -> Option> { +fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client - .local_projects + .local_projects() .iter() - .chain(&client.remote_projects) - .choose(&mut *rng.lock()) + .chain(client.remote_projects().iter()) + .choose(rng) .cloned() } -fn gen_file_name(rng: &Mutex) -> String { +fn gen_file_name(rng: &mut StdRng) -> String { let mut name = String::new(); for _ in 0..10 { - let letter = rng.lock().gen_range('a'..='z'); + let letter = rng.gen_range('a'..='z'); name.push(letter); } name diff --git a/crates/gpui/src/app/test_app_context.rs b/crates/gpui/src/app/test_app_context.rs index 72f1f546fb..d8586f753b 100644 --- a/crates/gpui/src/app/test_app_context.rs +++ b/crates/gpui/src/app/test_app_context.rs @@ -27,6 +27,7 @@ use collections::BTreeMap; use super::{AsyncAppContext, RefCounts}; +#[derive(Clone)] pub struct TestAppContext { cx: Rc>, foreground_platform: Rc, From ce8dd5a286eb604de88550321003c84101ca3b7b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 3 Jan 2023 18:05:13 -0800 Subject: [PATCH 02/60] wip --- .../src/tests/randomized_integration_tests.rs | 71 ++++++++++++++----- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index d9d1c1c8e4..06c63cfde0 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -48,15 +48,27 @@ enum ClientOperation { AcceptIncomingCall, RejectIncomingCall, LeaveCall, - InviteContactToCall { user_id: UserId }, - OpenLocalProject { root: PathBuf }, - OpenRemoteProject { host_id: UserId, root: String }, - AddWorktreeToProject { id: u64, new_path: PathBuf }, - CloseProject { id: u64 }, + InviteContactToCall { + user_id: UserId, + }, + OpenLocalProject { + first_root_path: PathBuf, + }, + OpenRemoteProject { + host_id: UserId, + first_root_name: String, + }, + AddWorktreeToProject { + first_root_path: PathBuf, + new_root_path: PathBuf, + }, + CloseProject { + id: u64, + }, } impl TestPlan { - fn next_operation( + async fn next_operation( &mut self, clients: &[(Rc, TestAppContext)], offline_users: &[(UserId, String)], @@ -83,7 +95,7 @@ impl TestPlan { let ix = self.rng.gen_range(0..clients.len()); let (client, cx) = &clients[ix]; let user_id = client.current_user_id(cx); - let operation = self.next_client_operation(clients, ix); + let operation = self.next_client_operation(clients, ix).await; Operation::MutateClient { user_id, operation } } _ => continue, @@ -92,7 +104,7 @@ impl TestPlan { operation } - fn next_client_operation( + async fn next_client_operation( &mut self, clients: &[(Rc, TestAppContext)], client_ix: usize, @@ -157,17 +169,25 @@ impl TestPlan { .collect::>() }); if !remote_projects.is_empty() { - let (host_id, root) = + let (host_id, first_root_name) = remote_projects.choose(&mut self.rng).unwrap().clone(); - return ClientOperation::OpenRemoteProject { host_id, root }; + return ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + }; } } } // Open a local project 50..=59 => { - let root = client.create_new_root_dir(); - return ClientOperation::OpenLocalProject { root }; + let paths = client.fs.paths().await; + let first_root_path = if paths.is_empty() || self.rng.gen() { + client.create_new_root_dir() + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + return ClientOperation::OpenLocalProject { first_root_path }; } // Add a worktree to a local project @@ -178,12 +198,27 @@ impl TestPlan { .unwrap() .clone(); - // let paths = client.fs.paths().await; - // let path = paths.choose(&mut self.rng).unwrap(); + let first_root_path = project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .next() + .unwrap() + .read(cx) + .abs_path() + .to_path_buf() + }); - // if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - // // - // } + let paths = client.fs.paths().await; + let new_root_path = if paths.is_empty() || self.rng.gen() { + client.create_new_root_dir() + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + + return ClientOperation::AddWorktreeToProject { + first_root_path, + new_root_path, + }; } _ => continue, @@ -260,7 +295,7 @@ async fn test_random_collaboration( let mut next_entity_id = 100000; for _ in 0..max_operations { - let next_operation = plan.lock().next_operation(&clients, &available_users); + let next_operation = plan.lock().next_operation(&clients, &available_users).await; match next_operation { Operation::AddConnection { user_id } => { let user_ix = available_users From f243633f3ef28e8f395d4ca3d423391dff0afc24 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 4 Jan 2023 15:16:06 -0800 Subject: [PATCH 03/60] Set up flow for mutating clients via explicit operation values --- crates/collab/src/tests.rs | 12 +- .../src/tests/randomized_integration_tests.rs | 1122 ++++++++++------- crates/text/src/text.rs | 20 +- 3 files changed, 656 insertions(+), 498 deletions(-) diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs index 8b52c7ddcf..67d363ddc2 100644 --- a/crates/collab/src/tests.rs +++ b/crates/collab/src/tests.rs @@ -24,7 +24,7 @@ use std::{ cell::{Ref, RefCell, RefMut}, env, ops::{Deref, DerefMut}, - path::{Path, PathBuf}, + path::Path, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, Arc, @@ -332,7 +332,6 @@ struct TestClientState { local_projects: Vec>, remote_projects: Vec>, buffers: HashMap, HashSet>>, - next_root_dir_id: usize, } impl Deref for TestClient { @@ -483,15 +482,6 @@ impl TestClient { ) }) } - - fn create_new_root_dir(&self) -> PathBuf { - format!( - "/{}-root-{}", - self.username, - util::post_inc(&mut self.state.borrow_mut().next_root_dir_id) - ) - .into() - } } impl Drop for TestClient { diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 06c63cfde0..64792cf422 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::BTreeMap; -use fs::{FakeFs, Fs as _}; +use fs::Fs as _; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; @@ -15,217 +15,13 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project}; use rand::prelude::*; -use std::{env, path::PathBuf, rc::Rc, sync::Arc}; - -struct TestPlan { - rng: StdRng, - allow_server_restarts: bool, - allow_client_reconnection: bool, - allow_client_disconnection: bool, -} - -#[derive(Debug)] -enum Operation { - AddConnection { - user_id: UserId, - }, - RemoveConnection { - user_id: UserId, - }, - BounceConnection { - user_id: UserId, - }, - RestartServer, - RunUntilParked, - MutateClient { - user_id: UserId, - operation: ClientOperation, - }, -} - -#[derive(Debug)] -enum ClientOperation { - AcceptIncomingCall, - RejectIncomingCall, - LeaveCall, - InviteContactToCall { - user_id: UserId, - }, - OpenLocalProject { - first_root_path: PathBuf, - }, - OpenRemoteProject { - host_id: UserId, - first_root_name: String, - }, - AddWorktreeToProject { - first_root_path: PathBuf, - new_root_path: PathBuf, - }, - CloseProject { - id: u64, - }, -} - -impl TestPlan { - async fn next_operation( - &mut self, - clients: &[(Rc, TestAppContext)], - offline_users: &[(UserId, String)], - ) -> Operation { - let operation = loop { - break match self.rng.gen_range(0..100) { - 0..=9 if !offline_users.is_empty() => { - let user_id = offline_users[self.rng.gen_range(0..offline_users.len())].0; - Operation::AddConnection { user_id } - } - 10..=14 if clients.len() > 1 && self.allow_client_disconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::RemoveConnection { user_id } - } - 15..=19 if clients.len() > 1 && self.allow_client_reconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::BounceConnection { user_id } - } - 20..=24 if self.allow_server_restarts => Operation::RestartServer, - 25..=29 => Operation::RunUntilParked, - _ if !clients.is_empty() => { - let ix = self.rng.gen_range(0..clients.len()); - let (client, cx) = &clients[ix]; - let user_id = client.current_user_id(cx); - let operation = self.next_client_operation(clients, ix).await; - Operation::MutateClient { user_id, operation } - } - _ => continue, - }; - }; - operation - } - - async fn next_client_operation( - &mut self, - clients: &[(Rc, TestAppContext)], - client_ix: usize, - ) -> ClientOperation { - let (client, cx) = &clients[client_ix]; - let call = cx.read(ActiveCall::global); - - loop { - match self.rng.gen_range(0..100) { - // Respond to an incoming call - 0..=19 => { - if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - return if self.rng.gen_bool(0.7) { - ClientOperation::AcceptIncomingCall - } else { - ClientOperation::RejectIncomingCall - }; - } - } - - // Invite a contact to the current call - 20..=29 => { - let available_contacts = client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - if !available_contacts.is_empty() { - let contact = available_contacts.choose(&mut self.rng).unwrap(); - return ClientOperation::InviteContactToCall { - user_id: UserId(contact.user.id as i32), - }; - } - } - - // Leave the current call - 30..=39 => { - if self.allow_client_disconnection - && call.read_with(cx, |call, _| call.room().is_some()) - { - return ClientOperation::LeaveCall; - } - } - - // Open a remote project - 40..=49 => { - if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - let remote_projects = room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| { - participant.projects.iter().map(|project| { - ( - UserId::from_proto(participant.user.id), - project.worktree_root_names[0].clone(), - ) - }) - }) - .collect::>() - }); - if !remote_projects.is_empty() { - let (host_id, first_root_name) = - remote_projects.choose(&mut self.rng).unwrap().clone(); - return ClientOperation::OpenRemoteProject { - host_id, - first_root_name, - }; - } - } - } - - // Open a local project - 50..=59 => { - let paths = client.fs.paths().await; - let first_root_path = if paths.is_empty() || self.rng.gen() { - client.create_new_root_dir() - } else { - paths.choose(&mut self.rng).unwrap().clone() - }; - return ClientOperation::OpenLocalProject { first_root_path }; - } - - // Add a worktree to a local project - 60..=69 if !client.local_projects().is_empty() => { - let project = client - .local_projects() - .choose(&mut self.rng) - .unwrap() - .clone(); - - let first_root_path = project.read_with(cx, |project, cx| { - project - .visible_worktrees(cx) - .next() - .unwrap() - .read(cx) - .abs_path() - .to_path_buf() - }); - - let paths = client.fs.paths().await; - let new_root_path = if paths.is_empty() || self.rng.gen() { - client.create_new_root_dir() - } else { - paths.choose(&mut self.rng).unwrap().clone() - }; - - return ClientOperation::AddWorktreeToProject { - first_root_path, - new_root_path, - }; - } - - _ => continue, - }; - } - } -} +use std::{ + env, + ops::Range, + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, +}; #[gpui::test(iterations = 100)] async fn test_random_collaboration( @@ -246,7 +42,7 @@ async fn test_random_collaboration( let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); - let mut available_users = Vec::new(); + let mut users = Vec::new(); for ix in 0..max_peers { let username = format!("user-{}", ix + 1); let user_id = db @@ -262,47 +58,55 @@ async fn test_random_collaboration( .await .unwrap() .user_id; - available_users.push((user_id, username)); + users.push(UserTestPlan { + user_id, + username, + online: false, + next_root_id: 0, + }); + } + + for (ix, user_a) in users.iter().enumerate() { + for user_b in &users[ix + 1..] { + server + .app_state + .db + .send_contact_request(user_a.user_id, user_b.user_id) + .await + .unwrap(); + server + .app_state + .db + .respond_to_contact_request(user_b.user_id, user_a.user_id, true) + .await + .unwrap(); + } } let plan = Arc::new(Mutex::new(TestPlan { + users, allow_server_restarts: rng.gen_bool(0.7), allow_client_reconnection: rng.gen_bool(0.7), allow_client_disconnection: rng.gen_bool(0.1), rng, })); - for (ix, (user_id_a, _)) in available_users.iter().enumerate() { - for (user_id_b, _) in &available_users[ix + 1..] { - server - .app_state - .db - .send_contact_request(*user_id_a, *user_id_b) - .await - .unwrap(); - server - .app_state - .db - .respond_to_contact_request(*user_id_b, *user_id_a, true) - .await - .unwrap(); - } - } - let mut clients = Vec::new(); let mut client_tasks = Vec::new(); - let mut op_start_signals = Vec::new(); + let mut operation_channels = Vec::new(); let mut next_entity_id = 100000; - for _ in 0..max_operations { - let next_operation = plan.lock().next_operation(&clients, &available_users).await; + let mut i = 0; + while i < max_operations { + let next_operation = plan.lock().next_operation(&clients).await; match next_operation { Operation::AddConnection { user_id } => { - let user_ix = available_users - .iter() - .position(|(id, _)| *id == user_id) - .unwrap(); - let (_, username) = available_users.remove(user_ix); + let username = { + let mut plan = plan.lock(); + let mut user = plan.user(user_id); + user.online = true; + user.username.clone() + }; log::info!("Adding new connection for {}", username); next_entity_id += 100000; let mut client_cx = TestAppContext::new( @@ -316,18 +120,19 @@ async fn test_random_collaboration( cx.function_name.clone(), ); - let op_start_signal = futures::channel::mpsc::unbounded(); + let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); let client = Rc::new(server.create_client(&mut client_cx, &username).await); - op_start_signals.push(op_start_signal.0); + operation_channels.push(operation_tx); clients.push((client.clone(), client_cx.clone())); client_tasks.push(client_cx.foreground().spawn(simulate_client( client, - op_start_signal.1, + operation_rx, plan.clone(), client_cx, ))); log::info!("Added connection for {}", username); + i += 1; } Operation::RemoveConnection { user_id } => { @@ -345,7 +150,7 @@ async fn test_random_collaboration( let removed_peer_id = user_connection_ids[0].into(); let (client, mut client_cx) = clients.remove(client_ix); let client_task = client_tasks.remove(client_ix); - op_start_signals.remove(client_ix); + operation_channels.remove(client_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); @@ -386,11 +191,12 @@ async fn test_random_collaboration( } log::info!("{} removed", client.username); - available_users.push((user_id, client.username.clone())); + plan.lock().user(user_id).online = false; client_cx.update(|cx| { cx.clear_globals(); drop(client); }); + i += 1; } Operation::BounceConnection { user_id } => { @@ -404,6 +210,7 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + i += 1; } Operation::RestartServer => { @@ -420,25 +227,27 @@ async fn test_random_collaboration( .await .unwrap(); assert_eq!(stale_room_ids, vec![]); + i += 1; } Operation::RunUntilParked => { deterministic.run_until_parked(); } - Operation::MutateClient { user_id, operation } => { - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - op_start_signals[client_ix] - .unbounded_send(operation) - .unwrap(); + Operation::MutateClients(user_ids) => { + for user_id in user_ids { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); + operation_channels[client_ix].unbounded_send(()).unwrap(); + i += 1; + } } } } - drop(op_start_signals); + drop(operation_channels); deterministic.start_waiting(); futures::future::join_all(client_tasks).await; deterministic.finish_waiting(); @@ -618,9 +427,331 @@ async fn test_random_collaboration( } } +struct TestPlan { + rng: StdRng, + users: Vec, + allow_server_restarts: bool, + allow_client_reconnection: bool, + allow_client_disconnection: bool, +} + +struct UserTestPlan { + user_id: UserId, + username: String, + next_root_id: usize, + online: bool, +} + +#[derive(Debug)] +enum Operation { + AddConnection { user_id: UserId }, + RemoveConnection { user_id: UserId }, + BounceConnection { user_id: UserId }, + RestartServer, + RunUntilParked, + MutateClients(Vec), +} + +#[derive(Debug)] +enum ClientOperation { + AcceptIncomingCall, + RejectIncomingCall, + LeaveCall, + InviteContactToCall { + user_id: UserId, + }, + OpenLocalProject { + first_root_name: String, + }, + OpenRemoteProject { + host_id: UserId, + first_root_name: String, + }, + AddWorktreeToProject { + project_root_name: String, + new_root_path: PathBuf, + }, + CloseRemoteProject { + project_root_name: String, + }, + OpenBuffer { + project_root_name: String, + full_path: PathBuf, + }, + EditBuffer { + project_root_name: String, + full_path: PathBuf, + edits: Vec<(Range, Arc)>, + }, + Other, +} + +impl TestPlan { + async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { + let operation = loop { + break match self.rng.gen_range(0..100) { + 0..=19 if clients.len() < self.users.len() => { + let user = self + .users + .iter() + .filter(|u| !u.online) + .choose(&mut self.rng) + .unwrap(); + Operation::AddConnection { + user_id: user.user_id, + } + } + 20..=24 if clients.len() > 1 && self.allow_client_disconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::RemoveConnection { user_id } + } + 25..=29 if clients.len() > 1 && self.allow_client_reconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::BounceConnection { user_id } + } + 30..=34 if self.allow_server_restarts && clients.len() > 1 => { + Operation::RestartServer + } + 35..=39 => Operation::RunUntilParked, + _ if !clients.is_empty() => { + let user_ids = (0..self.rng.gen_range(0..10)) + .map(|_| { + let ix = self.rng.gen_range(0..clients.len()); + let (client, cx) = &clients[ix]; + client.current_user_id(cx) + }) + .collect(); + Operation::MutateClients(user_ids) + } + _ => continue, + }; + }; + operation + } + + async fn next_client_operation( + &mut self, + client: &TestClient, + cx: &TestAppContext, + ) -> ClientOperation { + let user_id = client.current_user_id(cx); + let call = cx.read(ActiveCall::global); + let operation = loop { + match self.rng.gen_range(0..100) { + // Mutate the call + 0..=19 => match self.rng.gen_range(0..100_u32) { + // Respond to an incoming call + 0..=39 => { + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + break if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; + } + } + + // Invite a contact to the current call + 30..=89 => { + let available_contacts = + client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + break ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } + } + + // Leave the current call + 90.. => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + break ClientOperation::LeaveCall; + } + } + }, + + // Mutate projects + 20..=39 => match self.rng.gen_range(0..100_u32) { + // Open a remote project + 0..=30 => { + if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + let remote_projects = room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| { + participant.projects.iter().map(|project| { + ( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + ) + }) + }) + .collect::>() + }); + if !remote_projects.is_empty() { + let (host_id, first_root_name) = + remote_projects.choose(&mut self.rng).unwrap().clone(); + break ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + }; + } + } + } + + // Close a remote project + 31..=40 => { + if !client.remote_projects().is_empty() { + let project = client + .remote_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + let first_root_name = root_name_for_project(&project, cx); + break ClientOperation::CloseRemoteProject { + project_root_name: first_root_name, + }; + } + } + + // Open a local project + 41..=60 => { + let first_root_name = self.next_root_dir_name(user_id); + break ClientOperation::OpenLocalProject { first_root_name }; + } + + // Add a worktree to a local project + 61.. => { + if !client.local_projects().is_empty() { + let project = client + .local_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + let project_root_name = root_name_for_project(&project, cx); + + let mut paths = client.fs.paths().await; + paths.remove(0); + let new_root_path = if paths.is_empty() || self.rng.gen() { + Path::new("/").join(&self.next_root_dir_name(user_id)) + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + + break ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + }; + } + } + }, + + // Mutate buffers + 40..=79 => { + let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; + let project_root_name = root_name_for_project(&project, cx); + + match self.rng.gen_range(0..100_u32) { + // Manipulate an existing buffer + 0..=80 => { + let Some(buffer) = client + .buffers_for_project(&project) + .iter() + .choose(&mut self.rng) + .cloned() else { continue }; + + match self.rng.gen_range(0..100_u32) { + 0..=9 => { + let (full_path, edits) = buffer.read_with(cx, |buffer, cx| { + ( + buffer.file().unwrap().full_path(cx), + buffer.get_random_edits(&mut self.rng, 3), + ) + }); + break ClientOperation::EditBuffer { + project_root_name, + full_path, + edits, + }; + } + _ => {} + } + } + + // Open a buffer + 81.. => { + let worktree = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + }) + .choose(&mut self.rng) + }); + let Some(worktree) = worktree else { continue }; + let full_path = worktree.read_with(cx, |worktree, _| { + let entry = worktree + .entries(false) + .filter(|e| e.is_file()) + .choose(&mut self.rng) + .unwrap(); + if entry.path.as_ref() == Path::new("") { + Path::new(worktree.root_name()).into() + } else { + Path::new(worktree.root_name()).join(&entry.path) + } + }); + break ClientOperation::OpenBuffer { + project_root_name, + full_path, + }; + } + } + } + + _ => break ClientOperation::Other, + } + }; + operation + } + + fn next_root_dir_name(&mut self, user_id: UserId) -> String { + let user_ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + let root_id = util::post_inc(&mut self.users[user_ix].next_root_id); + format!("dir-{user_id}-{root_id}") + } + + fn user(&mut self, user_id: UserId) -> &mut UserTestPlan { + let ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + &mut self.users[ix] + } +} + async fn simulate_client( client: Rc, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver, + mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, plan: Arc>, mut cx: TestAppContext, ) { @@ -736,8 +867,10 @@ async fn simulate_client( .await; client.language_registry.add(Arc::new(language)); - while op_start_signal.next().await.is_some() { - if let Err(error) = randomly_mutate_client(&client, plan.clone(), &mut cx).await { + while operation_rx.next().await.is_some() { + let operation = plan.lock().next_client_operation(&client, &cx).await; + if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await + { log::error!("{} error: {:?}", client.username, error); } @@ -746,98 +879,274 @@ async fn simulate_client( log::info!("{}: done", client.username); } -// async fn apply_client_operation( -// client: &mut TestClient, -// plan: Arc>, -// operation: ClientOperation, -// cx: &mut TestAppContext, -// ) -> Result<()> { -// match operation { -// ClientOperation::AcceptIncomingCall => todo!(), -// ClientOperation::RejectIncomingCall => todo!(), -// ClientOperation::OpenLocalProject { path } => todo!(), -// ClientOperation::AddWorktreeToProject { -// existing_path, -// new_path, -// } => todo!(), -// ClientOperation::CloseProject { existing_path } => todo!(), -// } -// } - -async fn randomly_mutate_client( - client: &Rc, - plan: Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=19 => randomly_mutate_active_call(client, &plan, cx).await?, - 20..=49 => randomly_mutate_projects(client, &plan, cx).await?, - 50..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { - randomly_mutate_worktrees(client, &plan, cx).await?; - } - 60..=84 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { - randomly_query_and_mutate_buffers(client, &plan, cx).await?; - } - _ => randomly_mutate_fs(client, &plan).await, - } - - Ok(()) -} - -async fn randomly_mutate_active_call( +async fn apply_client_operation( client: &TestClient, - plan: &Arc>, + plan: Arc>, + operation: ClientOperation, cx: &mut TestAppContext, ) -> Result<()> { - let active_call = cx.read(ActiveCall::global); - if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if plan.lock().rng.gen_bool(0.7) { + match operation { + ClientOperation::AcceptIncomingCall => { log::info!("{}: accepting incoming call", client.username); + let active_call = cx.read(ActiveCall::global); active_call .update(cx, |call, cx| call.accept_incoming(cx)) .await?; - } else { + } + + ClientOperation::RejectIncomingCall => { log::info!("{}: declining incoming call", client.username); + let active_call = cx.read(ActiveCall::global); active_call.update(cx, |call, _| call.decline_incoming())?; } - } else { - let available_contacts = client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - let distribution = plan.lock().rng.gen_range(0..100); - match distribution { - 0..=29 if !available_contacts.is_empty() => { - let contact = available_contacts.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: inviting {}", - client.username, - contact.user.github_login - ); - active_call - .update(cx, |call, cx| call.invite(contact.user.id, None, cx)) - .await?; - } - 30..=39 - if plan.lock().allow_client_disconnection - && active_call.read_with(cx, |call, _| call.room().is_some()) => + ClientOperation::LeaveCall => { + log::info!("{}: hanging up", client.username); + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, cx| call.hang_up(cx))?; + } + + ClientOperation::InviteContactToCall { user_id } => { + log::info!("{}: inviting {}", client.username, user_id,); + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) + .await?; + } + + ClientOperation::OpenLocalProject { first_root_name } => { + log::info!( + "{}: opening local project at {:?}", + client.username, + first_root_name + ); + let root_path = Path::new("/").join(&first_root_name); + client.fs.create_dir(&root_path).await.unwrap(); + client + .fs + .create_file(&root_path.join("main.rs"), Default::default()) + .await + .unwrap(); + let project = client.build_local_project(root_path, cx).await.0; + + let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_some()) + && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) { - log::info!("{}: hanging up", client.username); - active_call.update(cx, |call, cx| call.hang_up(cx))?; + match active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + Ok(project_id) => { + log::info!( + "{}: shared project {} with id {}", + client.username, + first_root_name, + project_id + ); + } + Err(error) => { + log::error!( + "{}: error sharing project {}: {:?}", + client.username, + first_root_name, + error + ); + } + } + } + + client.local_projects_mut().push(project.clone()); + } + + ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + } => { + log::info!( + "{}: finding/creating local worktree at {:?} to project with root path {}", + client.username, + new_root_path, + project_root_name + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + if !client.fs.paths().await.contains(&new_root_path) { + client.fs.create_dir(&new_root_path).await.unwrap(); + } + project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(&new_root_path, true, cx) + }) + .await + .unwrap(); + } + + ClientOperation::CloseRemoteProject { project_root_name } => { + log::info!( + "{}: dropping project with root path {}", + client.username, + project_root_name, + ); + let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) + .expect("invalid project in test operation"); + client.remote_projects_mut().remove(ix); + } + + ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + } => { + log::info!( + "{}: joining remote project of user {}, root name {}", + client.username, + host_id, + first_root_name, + ); + let active_call = cx.read(ActiveCall::global); + let project_id = active_call + .read_with(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)?; + Some(project.id) + }) + .expect("invalid project in test operation"); + let project = client.build_remote_project(project_id, cx).await; + client.remote_projects_mut().push(project); + } + + ClientOperation::OpenBuffer { + project_root_name, + full_path, + } => { + log::info!( + "{}: opening path {:?} in project {}", + client.username, + full_path, + project_root_name, + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let mut components = full_path.components(); + let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); + let path = components.as_path(); + let worktree_id = project + .read_with(cx, |project, cx| { + project.worktrees(cx).find_map(|worktree| { + let worktree = worktree.read(cx); + if worktree.root_name() == root_name { + Some(worktree.id()) + } else { + None + } + }) + }) + .expect("invalid buffer path in test operation"); + let buffer = project + .update(cx, |project, cx| { + project.open_buffer((worktree_id, &path), cx) + }) + .await?; + client.buffers_for_project(&project).insert(buffer); + } + + ClientOperation::EditBuffer { + project_root_name, + full_path, + edits, + } => { + log::info!( + "{}: editing buffer {:?} in project {} with {:?}", + client.username, + full_path, + project_root_name, + edits + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = client + .buffers_for_project(&project) + .iter() + .find(|buffer| { + buffer.read_with(cx, |buffer, cx| { + buffer.file().unwrap().full_path(cx) == full_path + }) + }) + .cloned() + .expect("invalid buffer path in test operation"); + buffer.update(cx, |buffer, cx| { + buffer.edit(edits, None, cx); + }); + } + + _ => { + let choice = plan.lock().rng.gen_range(0..100); + match choice { + 50..=59 + if !client.local_projects().is_empty() + || !client.remote_projects().is_empty() => + { + randomly_mutate_worktrees(client, &plan, cx).await?; + } + 60..=84 + if !client.local_projects().is_empty() + || !client.remote_projects().is_empty() => + { + randomly_query_and_mutate_buffers(client, &plan, cx).await?; + } + _ => randomly_mutate_fs(client, &plan).await, } - _ => {} } } - Ok(()) } +fn project_for_root_name( + client: &TestClient, + root_name: &str, + cx: &TestAppContext, +) -> Option> { + if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) { + return Some(client.local_projects()[ix].clone()); + } + if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) { + return Some(client.remote_projects()[ix].clone()); + } + None +} + +fn project_ix_for_root_name( + projects: &[ModelHandle], + root_name: &str, + cx: &TestAppContext, +) -> Option { + projects.iter().position(|project| { + project.read_with(cx, |project, cx| { + let worktree = project.visible_worktrees(cx).next().unwrap(); + worktree.read(cx).root_name() == root_name + }) + }) +} + +fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> String { + project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .next() + .unwrap() + .read(cx) + .root_name() + .to_string() + }) +} + async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { let is_dir = plan.lock().rng.gen::(); let mut new_path = client @@ -862,159 +1171,6 @@ async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { } } -async fn randomly_mutate_projects( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let active_call = cx.read(ActiveCall::global); - let remote_projects = - if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) { - room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| participant.projects.clone()) - .collect::>() - }) - } else { - Default::default() - }; - - let project = if remote_projects.is_empty() || plan.lock().rng.gen() { - if client.local_projects().is_empty() || plan.lock().rng.gen() { - let paths = client.fs.paths().await; - let local_project = if paths.is_empty() || plan.lock().rng.gen() { - let root_path = client.create_new_root_dir(); - client.fs.create_dir(&root_path).await.unwrap(); - client - .fs - .create_file(&root_path.join("main.rs"), Default::default()) - .await - .unwrap(); - log::info!( - "{}: opening local project at {:?}", - client.username, - root_path - ); - client.build_local_project(root_path, cx).await.0 - } else { - let root_path = paths.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: opening local project at {:?}", - client.username, - root_path - ); - client.build_local_project(root_path, cx).await.0 - }; - client.local_projects_mut().push(local_project.clone()); - local_project - } else { - client - .local_projects() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - } - } else { - if client.remote_projects().is_empty() || plan.lock().rng.gen() { - let remote_project_id = remote_projects.choose(&mut plan.lock().rng).unwrap().id; - let remote_projects = client.remote_projects().clone(); - let remote_project = if let Some(project) = remote_projects - .iter() - .find(|project| { - project.read_with(cx, |project, _| { - project.remote_id() == Some(remote_project_id) - }) - }) - .cloned() - { - project - } else { - log::info!( - "{}: opening remote project {}", - client.username, - remote_project_id - ); - let call = cx.read(ActiveCall::global); - let room = call.read_with(cx, |call, _| call.room().unwrap().clone()); - let remote_project = room - .update(cx, |room, cx| { - room.join_project( - remote_project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - }) - .await?; - client.remote_projects_mut().push(remote_project.clone()); - remote_project - }; - - remote_project - } else { - client - .remote_projects() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - } - }; - - if active_call.read_with(cx, |call, _| call.room().is_some()) - && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) - { - match active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - Ok(project_id) => { - log::info!("{}: shared project with id {}", client.username, project_id); - } - Err(error) => { - log::error!("{}: error sharing project, {:?}", client.username, error); - } - } - } - - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=19 if project.read_with(cx, |project, _| project.is_local()) => { - let paths = client.fs.paths().await; - let path = paths.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: finding/creating local worktree for path {:?}", - client.username, - path - ); - project - .update(cx, |project, cx| { - project.find_or_create_local_worktree(&path, true, cx) - }) - .await - .unwrap(); - } - 20..=24 if project.read_with(cx, |project, _| project.is_remote()) => { - log::info!( - "{}: dropping remote project {}", - client.username, - project.read_with(cx, |project, _| project.remote_id().unwrap()) - ); - - cx.update(|_| { - client - .remote_projects_mut() - .retain(|remote_project| *remote_project != project); - client.buffers().remove(&project); - drop(project); - }); - } - _ => {} - } - - Ok(()) -} - async fn randomly_mutate_worktrees( client: &TestClient, plan: &Arc>, diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 914023f305..3bf6695cce 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -1429,12 +1429,11 @@ impl Buffer { start..end } - #[allow(clippy::type_complexity)] - pub fn randomly_edit( - &mut self, + pub fn get_random_edits( + &self, rng: &mut T, edit_count: usize, - ) -> (Vec<(Range, Arc)>, Operation) + ) -> Vec<(Range, Arc)> where T: rand::Rng, { @@ -1453,8 +1452,21 @@ impl Buffer { edits.push((range, new_text.into())); } + edits + } + #[allow(clippy::type_complexity)] + pub fn randomly_edit( + &mut self, + rng: &mut T, + edit_count: usize, + ) -> (Vec<(Range, Arc)>, Operation) + where + T: rand::Rng, + { + let mut edits = self.get_random_edits(rng, edit_count); log::info!("mutating buffer {} with {:?}", self.replica_id, edits); + let op = self.edit(edits.iter().cloned()); if let Operation::Edit(edit) = &op { assert_eq!(edits.len(), edit.new_text.len()); From f1b3692a354e42039156c735020fb5d46581f72b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 4 Jan 2023 18:46:31 -0800 Subject: [PATCH 04/60] Tweak operation rates --- .../src/tests/randomized_integration_tests.rs | 237 ++++++++++-------- 1 file changed, 133 insertions(+), 104 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 64792cf422..0db56549a6 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -230,11 +230,7 @@ async fn test_random_collaboration( i += 1; } - Operation::RunUntilParked => { - deterministic.run_until_parked(); - } - - Operation::MutateClients(user_ids) => { + Operation::MutateClients { user_ids, quiesce } => { for user_id in user_ids { let client_ix = clients .iter() @@ -243,6 +239,10 @@ async fn test_random_collaboration( operation_channels[client_ix].unbounded_send(()).unwrap(); i += 1; } + + if quiesce { + deterministic.run_until_parked(); + } } } } @@ -444,12 +444,20 @@ struct UserTestPlan { #[derive(Debug)] enum Operation { - AddConnection { user_id: UserId }, - RemoveConnection { user_id: UserId }, - BounceConnection { user_id: UserId }, + AddConnection { + user_id: UserId, + }, + RemoveConnection { + user_id: UserId, + }, + BounceConnection { + user_id: UserId, + }, RestartServer, - RunUntilParked, - MutateClients(Vec), + MutateClients { + user_ids: Vec, + quiesce: bool, + }, } #[derive(Debug)] @@ -490,7 +498,7 @@ impl TestPlan { async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { let operation = loop { break match self.rng.gen_range(0..100) { - 0..=19 if clients.len() < self.users.len() => { + 0..=29 if clients.len() < self.users.len() => { let user = self .users .iter() @@ -501,20 +509,19 @@ impl TestPlan { user_id: user.user_id, } } - 20..=24 if clients.len() > 1 && self.allow_client_disconnection => { + 30..=34 if clients.len() > 1 && self.allow_client_disconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); Operation::RemoveConnection { user_id } } - 25..=29 if clients.len() > 1 && self.allow_client_reconnection => { + 35..=39 if clients.len() > 1 && self.allow_client_reconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); Operation::BounceConnection { user_id } } - 30..=34 if self.allow_server_restarts && clients.len() > 1 => { + 40..=44 if self.allow_server_restarts && clients.len() > 1 => { Operation::RestartServer } - 35..=39 => Operation::RunUntilParked, _ if !clients.is_empty() => { let user_ids = (0..self.rng.gen_range(0..10)) .map(|_| { @@ -523,7 +530,10 @@ impl TestPlan { client.current_user_id(cx) }) .collect(); - Operation::MutateClients(user_ids) + Operation::MutateClients { + user_ids, + quiesce: self.rng.gen(), + } } _ => continue, }; @@ -541,78 +551,95 @@ impl TestPlan { let operation = loop { match self.rng.gen_range(0..100) { // Mutate the call - 0..=19 => match self.rng.gen_range(0..100_u32) { + 0..=29 => { // Respond to an incoming call - 0..=39 => { - if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - break if self.rng.gen_bool(0.7) { - ClientOperation::AcceptIncomingCall - } else { - ClientOperation::RejectIncomingCall - }; - } + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + break if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; } - // Invite a contact to the current call - 30..=89 => { - let available_contacts = - client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - if !available_contacts.is_empty() { - let contact = available_contacts.choose(&mut self.rng).unwrap(); - break ClientOperation::InviteContactToCall { - user_id: UserId(contact.user.id as i32), - }; + match self.rng.gen_range(0..100_u32) { + // Invite a contact to the current call + 0..=70 => { + let available_contacts = + client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + break ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } } - } - // Leave the current call - 90.. => { - if self.allow_client_disconnection - && call.read_with(cx, |call, _| call.room().is_some()) - { - break ClientOperation::LeaveCall; + // Leave the current call + 71.. => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + break ClientOperation::LeaveCall; + } } } - }, + } // Mutate projects - 20..=39 => match self.rng.gen_range(0..100_u32) { - // Open a remote project - 0..=30 => { + 39..=59 => match self.rng.gen_range(0..100_u32) { + // Open a new project + 0..=70 => { + // Open a remote project if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - let remote_projects = room.read_with(cx, |room, _| { + let existing_remote_project_ids = cx.read(|cx| { + client + .remote_projects() + .iter() + .map(|p| p.read(cx).remote_id().unwrap()) + .collect::>() + }); + let new_remote_projects = room.read_with(cx, |room, _| { room.remote_participants() .values() .flat_map(|participant| { - participant.projects.iter().map(|project| { - ( - UserId::from_proto(participant.user.id), - project.worktree_root_names[0].clone(), - ) + participant.projects.iter().filter_map(|project| { + if existing_remote_project_ids.contains(&project.id) { + None + } else { + Some(( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + )) + } }) }) .collect::>() }); - if !remote_projects.is_empty() { + if !new_remote_projects.is_empty() { let (host_id, first_root_name) = - remote_projects.choose(&mut self.rng).unwrap().clone(); + new_remote_projects.choose(&mut self.rng).unwrap().clone(); break ClientOperation::OpenRemoteProject { host_id, first_root_name, }; } } + // Open a local project + else { + let first_root_name = self.next_root_dir_name(user_id); + break ClientOperation::OpenLocalProject { first_root_name }; + } } // Close a remote project - 31..=40 => { + 71..=80 => { if !client.remote_projects().is_empty() { let project = client .remote_projects() @@ -626,14 +653,8 @@ impl TestPlan { } } - // Open a local project - 41..=60 => { - let first_root_name = self.next_root_dir_name(user_id); - break ClientOperation::OpenLocalProject { first_root_name }; - } - // Add a worktree to a local project - 61.. => { + 81.. => { if !client.local_projects().is_empty() { let project = client .local_projects() @@ -659,7 +680,7 @@ impl TestPlan { }, // Mutate buffers - 40..=79 => { + 60.. => { let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; let project_root_name = root_name_for_project(&project, cx); @@ -871,9 +892,8 @@ async fn simulate_client( let operation = plan.lock().next_client_operation(&client, &cx).await; if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await { - log::error!("{} error: {:?}", client.username, error); + log::error!("{} error: {}", client.username, error); } - cx.background().simulate_random_delay().await; } log::info!("{}: done", client.username); @@ -928,34 +948,7 @@ async fn apply_client_operation( .await .unwrap(); let project = client.build_local_project(root_path, cx).await.0; - - let active_call = cx.read(ActiveCall::global); - if active_call.read_with(cx, |call, _| call.room().is_some()) - && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) - { - match active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - Ok(project_id) => { - log::info!( - "{}: shared project {} with id {}", - client.username, - first_root_name, - project_id - ); - } - Err(error) => { - log::error!( - "{}: error sharing project {}: {:?}", - client.username, - first_root_name, - error - ); - } - } - } - + ensure_project_shared(&project, client, cx).await; client.local_projects_mut().push(project.clone()); } @@ -971,6 +964,7 @@ async fn apply_client_operation( ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; if !client.fs.paths().await.contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); } @@ -984,13 +978,13 @@ async fn apply_client_operation( ClientOperation::CloseRemoteProject { project_root_name } => { log::info!( - "{}: dropping project with root path {}", + "{}: closing remote project with root path {}", client.username, project_root_name, ); let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) .expect("invalid project in test operation"); - client.remote_projects_mut().remove(ix); + cx.update(|_| client.remote_projects_mut().remove(ix)); } ClientOperation::OpenRemoteProject { @@ -1027,13 +1021,14 @@ async fn apply_client_operation( full_path, } => { log::info!( - "{}: opening path {:?} in project {}", + "{}: opening buffer {:?} in project {}", client.username, full_path, project_root_name, ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); + // ensure_project_shared(&project, client, cx).await; let mut components = full_path.components(); let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); let path = components.as_path(); @@ -1086,10 +1081,10 @@ async fn apply_client_operation( }); } - _ => { + ClientOperation::Other => { let choice = plan.lock().rng.gen_range(0..100); match choice { - 50..=59 + 0..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { @@ -1147,6 +1142,40 @@ fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> }) } +async fn ensure_project_shared( + project: &ModelHandle, + client: &TestClient, + cx: &mut TestAppContext, +) { + let first_root_name = root_name_for_project(project, cx); + let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_some()) + && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) + { + match active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + Ok(project_id) => { + log::info!( + "{}: shared project {} with id {}", + client.username, + first_root_name, + project_id + ); + } + Err(error) => { + log::error!( + "{}: error sharing project {}: {:?}", + client.username, + first_root_name, + error + ); + } + } + } +} + async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { let is_dir = plan.lock().rng.gen::(); let mut new_path = client From 210286da489c782f3441ff37af73dc64d2bdd3e0 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 12:55:50 -0800 Subject: [PATCH 05/60] Make operations for all buffer manipulations --- .../src/tests/randomized_integration_tests.rs | 536 ++++++++---------- 1 file changed, 252 insertions(+), 284 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0db56549a6..86ca673df4 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -6,7 +6,7 @@ use crate::{ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; -use collections::BTreeMap; +use collections::{BTreeMap, HashSet}; use fs::Fs as _; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; @@ -486,14 +486,44 @@ enum ClientOperation { project_root_name: String, full_path: PathBuf, }, + SearchProject { + project_root_name: String, + query: String, + detach: bool, + }, EditBuffer { project_root_name: String, full_path: PathBuf, edits: Vec<(Range, Arc)>, }, + CloseBuffer { + project_root_name: String, + full_path: PathBuf, + }, + SaveBuffer { + project_root_name: String, + full_path: PathBuf, + detach: bool, + }, + RequestLspDataInBuffer { + project_root_name: String, + full_path: PathBuf, + offset: usize, + kind: LspRequestKind, + detach: bool, + }, Other, } +#[derive(Debug)] +enum LspRequestKind { + Rename, + Completion, + CodeAction, + Definition, + Highlights, +} + impl TestPlan { async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { let operation = loop { @@ -679,27 +709,44 @@ impl TestPlan { } }, - // Mutate buffers + // Query and mutate buffers 60.. => { let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; let project_root_name = root_name_for_project(&project, cx); match self.rng.gen_range(0..100_u32) { // Manipulate an existing buffer - 0..=80 => { + 0..=70 => { let Some(buffer) = client .buffers_for_project(&project) .iter() .choose(&mut self.rng) .cloned() else { continue }; + let full_path = buffer + .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + match self.rng.gen_range(0..100_u32) { - 0..=9 => { - let (full_path, edits) = buffer.read_with(cx, |buffer, cx| { - ( - buffer.file().unwrap().full_path(cx), - buffer.get_random_edits(&mut self.rng, 3), - ) + // Close the buffer + 0..=15 => { + break ClientOperation::CloseBuffer { + project_root_name, + full_path, + }; + } + // Save the buffer + 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => { + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SaveBuffer { + project_root_name, + full_path, + detach, + }; + } + // Edit the buffer + 30..=69 => { + let edits = buffer.read_with(cx, |buffer, _| { + buffer.get_random_edits(&mut self.rng, 3) }); break ClientOperation::EditBuffer { project_root_name, @@ -707,10 +754,42 @@ impl TestPlan { edits, }; } - _ => {} + // Make an LSP request + _ => { + let offset = buffer.read_with(cx, |buffer, _| { + buffer.clip_offset( + self.rng.gen_range(0..=buffer.len()), + language::Bias::Left, + ) + }); + let detach = self.rng.gen(); + break ClientOperation::RequestLspDataInBuffer { + project_root_name, + full_path, + offset, + kind: match self.rng.gen_range(0..5_u32) { + 0 => LspRequestKind::Rename, + 1 => LspRequestKind::Highlights, + 2 => LspRequestKind::Definition, + 3 => LspRequestKind::CodeAction, + 4.. => LspRequestKind::Completion, + }, + detach, + }; + } } } + 71..=80 => { + let query = self.rng.gen_range('a'..='z').to_string(); + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SearchProject { + project_root_name, + query, + detach, + }; + } + // Open a buffer 81.. => { let worktree = project.read_with(cx, |project, cx| { @@ -1066,21 +1145,159 @@ async fn apply_client_operation( ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); - let buffer = client - .buffers_for_project(&project) - .iter() - .find(|buffer| { - buffer.read_with(cx, |buffer, cx| { - buffer.file().unwrap().full_path(cx) == full_path - }) - }) - .cloned() - .expect("invalid buffer path in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); buffer.update(cx, |buffer, cx| { buffer.edit(edits, None, cx); }); } + ClientOperation::CloseBuffer { + project_root_name, + full_path, + } => { + log::info!( + "{}: dropping buffer {:?} in project {}", + client.username, + full_path, + project_root_name + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + cx.update(|_| { + client.buffers_for_project(&project).remove(&buffer); + drop(buffer); + }); + } + + ClientOperation::SaveBuffer { + project_root_name, + full_path, + detach, + } => { + log::info!( + "{}: saving buffer {:?} in project {}{}", + client.username, + full_path, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let (requested_version, save) = + buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); + let save = cx.background().spawn(async move { + let (saved_version, _, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + anyhow::Ok(()) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } + } + + ClientOperation::RequestLspDataInBuffer { + project_root_name, + full_path, + offset, + kind, + detach, + } => { + log::info!( + "{}: request LSP {:?} for buffer {:?} in project {}{}", + client.username, + kind, + full_path, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let request = match kind { + LspRequestKind::Rename => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) + .await?; + anyhow::Ok(()) + }), + LspRequestKind::Completion => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Definition => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Highlights => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) + .await?; + Ok(()) + }), + }; + if detach { + request.detach(); + } else { + request.await?; + } + } + + ClientOperation::SearchProject { + project_root_name, + query, + detach, + } => { + log::info!( + "{}: search project {} for {:?}{}", + client.username, + project_root_name, + query, + if detach { ", detaching" } else { ", awaiting" } + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let search = project.update(cx, |project, cx| { + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search + .await + .map_err(|err| anyhow!("search request failed: {:?}", err)) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| search.detach_and_log_err(cx)); + } else { + search.await?; + } + } + ClientOperation::Other => { let choice = plan.lock().rng.gen_range(0..100); match choice { @@ -1090,12 +1307,6 @@ async fn apply_client_operation( { randomly_mutate_worktrees(client, &plan, cx).await?; } - 60..=84 - if !client.local_projects().is_empty() - || !client.remote_projects().is_empty() => - { - randomly_query_and_mutate_buffers(client, &plan, cx).await?; - } _ => randomly_mutate_fs(client, &plan).await, } } @@ -1103,6 +1314,21 @@ async fn apply_client_operation( Ok(()) } +fn buffer_for_full_path( + buffers: &HashSet>, + full_path: &PathBuf, + cx: &TestAppContext, +) -> Option> { + buffers + .iter() + .find(|buffer| { + buffer.read_with(cx, |buffer, cx| { + buffer.file().unwrap().full_path(cx) == *full_path + }) + }) + .cloned() +} + fn project_for_root_name( client: &TestClient, root_name: &str, @@ -1246,264 +1472,6 @@ async fn randomly_mutate_worktrees( Ok(()) } -async fn randomly_query_and_mutate_buffers( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); - let has_buffers_for_project = !client.buffers_for_project(&project).is_empty(); - let buffer = if !has_buffers_for_project || plan.lock().rng.gen() { - let Some(worktree) = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) - }) - .choose(&mut plan.lock().rng) - }) else { - return Ok(()); - }; - - let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| { - let entry = worktree - .entries(false) - .filter(|e| e.is_file()) - .choose(&mut plan.lock().rng) - .unwrap(); - ( - worktree.root_name().to_string(), - (worktree.id(), entry.path.clone()), - ) - }); - log::info!( - "{}: opening path {:?} in worktree {} ({})", - client.username, - project_path.1, - project_path.0, - worktree_root_name, - ); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer(project_path.clone(), cx) - }) - .await?; - log::info!( - "{}: opened path {:?} in worktree {} ({}) with buffer id {}", - client.username, - project_path.1, - project_path.0, - worktree_root_name, - buffer.read_with(cx, |buffer, _| buffer.remote_id()) - ); - client.buffers_for_project(&project).insert(buffer.clone()); - buffer - } else { - client - .buffers_for_project(&project) - .iter() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - }; - - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=9 => { - cx.update(|cx| { - log::info!( - "{}: dropping buffer {:?}", - client.username, - buffer.read(cx).file().unwrap().full_path(cx) - ); - client.buffers_for_project(&project).remove(&buffer); - drop(buffer); - }); - } - 10..=19 => { - let completions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting completions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.completions(&buffer, offset, cx) - }); - let completions = cx.background().spawn(async move { - completions - .await - .map_err(|err| anyhow!("completions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching completions request", client.username); - cx.update(|cx| completions.detach_and_log_err(cx)); - } else { - completions.await?; - } - } - 20..=29 => { - let code_actions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting code actions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let range = buffer.read(cx).random_byte_range(0, &mut plan.lock().rng); - project.code_actions(&buffer, range, cx) - }); - let code_actions = cx.background().spawn(async move { - code_actions - .await - .map_err(|err| anyhow!("code actions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching code actions request", client.username); - cx.update(|cx| code_actions.detach_and_log_err(cx)); - } else { - code_actions.await?; - } - } - 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { - let (requested_version, save) = buffer.update(cx, |buffer, cx| { - log::info!( - "{}: saving buffer {} ({:?})", - client.username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - (buffer.version(), buffer.save(cx)) - }); - let save = cx.background().spawn(async move { - let (saved_version, _, _) = save - .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - Ok::<_, anyhow::Error>(()) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching save request", client.username); - cx.update(|cx| save.detach_and_log_err(cx)); - } else { - save.await?; - } - } - 40..=44 => { - let prepare_rename = project.update(cx, |project, cx| { - log::info!( - "{}: preparing rename for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.prepare_rename(buffer, offset, cx) - }); - let prepare_rename = cx.background().spawn(async move { - prepare_rename - .await - .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching prepare rename request", client.username); - cx.update(|cx| prepare_rename.detach_and_log_err(cx)); - } else { - prepare_rename.await?; - } - } - 45..=49 => { - let definitions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting definitions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.definition(&buffer, offset, cx) - }); - let definitions = cx.background().spawn(async move { - definitions - .await - .map_err(|err| anyhow!("definitions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching definitions request", client.username); - cx.update(|cx| definitions.detach_and_log_err(cx)); - } else { - let definitions = definitions.await?; - client - .buffers_for_project(&project) - .extend(definitions.into_iter().map(|loc| loc.target.buffer)); - } - } - 50..=54 => { - let highlights = project.update(cx, |project, cx| { - log::info!( - "{}: requesting highlights for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.document_highlights(&buffer, offset, cx) - }); - let highlights = cx.background().spawn(async move { - highlights - .await - .map_err(|err| anyhow!("highlights request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching highlights request", client.username); - cx.update(|cx| highlights.detach_and_log_err(cx)); - } else { - highlights.await?; - } - } - 55..=59 => { - let search = project.update(cx, |project, cx| { - let query = plan.lock().rng.gen_range('a'..='z'); - log::info!("{}: project-wide search {:?}", client.username, query); - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching search request", client.username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - let search = search.await?; - client - .buffers_for_project(&project) - .extend(search.into_keys()); - } - } - _ => { - buffer.update(cx, |buffer, cx| { - log::info!( - "{}: updating buffer {} ({:?})", - client.username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - if plan.lock().rng.gen_bool(0.7) { - buffer.randomly_edit(&mut plan.lock().rng, 5, cx); - } else { - buffer.randomly_undo_redo(&mut plan.lock().rng, cx); - } - }); - } - } - - Ok(()) -} - fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client .local_projects() From 99390a7237344e41918da4b9cc09034e6d04ffaa Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 15:30:49 -0800 Subject: [PATCH 06/60] Represent all randomized test actions as operations --- .../src/tests/randomized_integration_tests.rs | 955 ++++++++++-------- 1 file changed, 515 insertions(+), 440 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 86ca673df4..9cdc05833e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -13,7 +13,7 @@ use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; -use project::{search::SearchQuery, Project}; +use project::{search::SearchQuery, Project, ProjectPath}; use rand::prelude::*; use std::{ env, @@ -22,6 +22,7 @@ use std::{ rc::Rc, sync::Arc, }; +use util::ResultExt; #[gpui::test(iterations = 100)] async fn test_random_collaboration( @@ -84,10 +85,12 @@ async fn test_random_collaboration( } let plan = Arc::new(Mutex::new(TestPlan { - users, allow_server_restarts: rng.gen_bool(0.7), allow_client_reconnection: rng.gen_bool(0.7), allow_client_disconnection: rng.gen_bool(0.1), + operation_ix: 0, + max_operations, + users, rng, })); @@ -96,9 +99,8 @@ async fn test_random_collaboration( let mut operation_channels = Vec::new(); let mut next_entity_id = 100000; - let mut i = 0; - while i < max_operations { - let next_operation = plan.lock().next_operation(&clients).await; + loop { + let Some(next_operation) = plan.lock().next_operation(&clients).await else { break }; match next_operation { Operation::AddConnection { user_id } => { let username = { @@ -132,7 +134,6 @@ async fn test_random_collaboration( ))); log::info!("Added connection for {}", username); - i += 1; } Operation::RemoveConnection { user_id } => { @@ -196,7 +197,6 @@ async fn test_random_collaboration( cx.clear_globals(); drop(client); }); - i += 1; } Operation::BounceConnection { user_id } => { @@ -210,7 +210,6 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - i += 1; } Operation::RestartServer => { @@ -227,7 +226,6 @@ async fn test_random_collaboration( .await .unwrap(); assert_eq!(stale_room_ids, vec![]); - i += 1; } Operation::MutateClients { user_ids, quiesce } => { @@ -237,7 +235,6 @@ async fn test_random_collaboration( .position(|(client, cx)| client.current_user_id(cx) == user_id) .unwrap(); operation_channels[client_ix].unbounded_send(()).unwrap(); - i += 1; } if quiesce { @@ -427,8 +424,387 @@ async fn test_random_collaboration( } } +async fn apply_client_operation( + client: &TestClient, + operation: ClientOperation, + cx: &mut TestAppContext, +) -> Result<()> { + match operation { + ClientOperation::AcceptIncomingCall => { + log::info!("{}: accepting incoming call", client.username); + + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.accept_incoming(cx)) + .await?; + } + + ClientOperation::RejectIncomingCall => { + log::info!("{}: declining incoming call", client.username); + + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, _| call.decline_incoming())?; + } + + ClientOperation::LeaveCall => { + log::info!("{}: hanging up", client.username); + + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, cx| call.hang_up(cx))?; + } + + ClientOperation::InviteContactToCall { user_id } => { + log::info!("{}: inviting {}", client.username, user_id,); + + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) + .await + .log_err(); + } + + ClientOperation::OpenLocalProject { first_root_name } => { + log::info!( + "{}: opening local project at {:?}", + client.username, + first_root_name + ); + + let root_path = Path::new("/").join(&first_root_name); + client.fs.create_dir(&root_path).await.unwrap(); + client + .fs + .create_file(&root_path.join("main.rs"), Default::default()) + .await + .unwrap(); + let project = client.build_local_project(root_path, cx).await.0; + ensure_project_shared(&project, client, cx).await; + client.local_projects_mut().push(project.clone()); + } + + ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + } => { + log::info!( + "{}: finding/creating local worktree at {:?} to project with root path {}", + client.username, + new_root_path, + project_root_name + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + if !client.fs.paths().await.contains(&new_root_path) { + client.fs.create_dir(&new_root_path).await.unwrap(); + } + project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(&new_root_path, true, cx) + }) + .await + .unwrap(); + } + + ClientOperation::CloseRemoteProject { project_root_name } => { + log::info!( + "{}: closing remote project with root path {}", + client.username, + project_root_name, + ); + + let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) + .expect("invalid project in test operation"); + cx.update(|_| client.remote_projects_mut().remove(ix)); + } + + ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + } => { + log::info!( + "{}: joining remote project of user {}, root name {}", + client.username, + host_id, + first_root_name, + ); + + let active_call = cx.read(ActiveCall::global); + let project_id = active_call + .read_with(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)?; + Some(project.id) + }) + .expect("invalid project in test operation"); + let project = client.build_remote_project(project_id, cx).await; + client.remote_projects_mut().push(project); + } + + ClientOperation::CreateWorktreeEntry { + project_root_name, + is_local, + full_path, + is_dir, + } => { + log::info!( + "{}: creating {} at path {:?} in {} project {}", + client.username, + if is_dir { "dir" } else { "file" }, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .expect("invalid worktree path in test operation"); + project + .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx)) + .unwrap() + .await?; + } + + ClientOperation::OpenBuffer { + project_root_name, + is_local, + full_path, + } => { + log::info!( + "{}: opening buffer {:?} in {} project {}", + client.username, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .expect("invalid buffer path in test operation"); + let buffer = project + .update(cx, |project, cx| project.open_buffer(project_path, cx)) + .await?; + client.buffers_for_project(&project).insert(buffer); + } + + ClientOperation::EditBuffer { + project_root_name, + is_local, + full_path, + edits, + } => { + log::info!( + "{}: editing buffer {:?} in {} project {} with {:?}", + client.username, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + edits + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + buffer.update(cx, |buffer, cx| { + buffer.edit(edits, None, cx); + }); + } + + ClientOperation::CloseBuffer { + project_root_name, + is_local, + full_path, + } => { + log::info!( + "{}: dropping buffer {:?} in {} project {}", + client.username, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + cx.update(|_| { + client.buffers_for_project(&project).remove(&buffer); + drop(buffer); + }); + } + + ClientOperation::SaveBuffer { + project_root_name, + is_local, + full_path, + detach, + } => { + log::info!( + "{}: saving buffer {:?} in {} project {}{}", + client.username, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let (requested_version, save) = + buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); + let save = cx.background().spawn(async move { + let (saved_version, _, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + anyhow::Ok(()) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } + } + + ClientOperation::RequestLspDataInBuffer { + project_root_name, + is_local, + full_path, + offset, + kind, + detach, + } => { + log::info!( + "{}: request LSP {:?} for buffer {:?} in {} project {}{}", + client.username, + kind, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let request = match kind { + LspRequestKind::Rename => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) + .await?; + anyhow::Ok(()) + }), + LspRequestKind::Completion => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Definition => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Highlights => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) + .await?; + Ok(()) + }), + }; + if detach { + request.detach(); + } else { + request.await?; + } + } + + ClientOperation::SearchProject { + project_root_name, + query, + detach, + } => { + log::info!( + "{}: search project {} for {:?}{}", + client.username, + project_root_name, + query, + if detach { ", detaching" } else { ", awaiting" } + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let search = project.update(cx, |project, cx| { + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search + .await + .map_err(|err| anyhow!("search request failed: {:?}", err)) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| search.detach_and_log_err(cx)); + } else { + search.await?; + } + } + + ClientOperation::CreateFsEntry { path, is_dir } => { + log::info!( + "{}: creating {} at {:?}", + client.username, + if is_dir { "dir" } else { "file" }, + path + ); + if is_dir { + client.fs.create_dir(&path).await.unwrap(); + } else { + client + .fs + .create_file(&path, Default::default()) + .await + .unwrap(); + } + } + } + Ok(()) +} + struct TestPlan { rng: StdRng, + max_operations: usize, + operation_ix: usize, users: Vec, allow_server_restarts: bool, allow_client_reconnection: bool, @@ -484,6 +860,7 @@ enum ClientOperation { }, OpenBuffer { project_root_name: String, + is_local: bool, full_path: PathBuf, }, SearchProject { @@ -493,26 +870,39 @@ enum ClientOperation { }, EditBuffer { project_root_name: String, + is_local: bool, full_path: PathBuf, edits: Vec<(Range, Arc)>, }, CloseBuffer { project_root_name: String, + is_local: bool, full_path: PathBuf, }, SaveBuffer { project_root_name: String, + is_local: bool, full_path: PathBuf, detach: bool, }, RequestLspDataInBuffer { project_root_name: String, + is_local: bool, full_path: PathBuf, offset: usize, kind: LspRequestKind, detach: bool, }, - Other, + CreateWorktreeEntry { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + is_dir: bool, + }, + CreateFsEntry { + path: PathBuf, + is_dir: bool, + }, } #[derive(Debug)] @@ -525,7 +915,14 @@ enum LspRequestKind { } impl TestPlan { - async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { + async fn next_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + ) -> Option { + if self.operation_ix == self.max_operations { + return None; + } + let operation = loop { break match self.rng.gen_range(0..100) { 0..=29 if clients.len() < self.users.len() => { @@ -535,6 +932,7 @@ impl TestPlan { .filter(|u| !u.online) .choose(&mut self.rng) .unwrap(); + self.operation_ix += 1; Operation::AddConnection { user_id: user.user_id, } @@ -542,18 +940,25 @@ impl TestPlan { 30..=34 if clients.len() > 1 && self.allow_client_disconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); + self.operation_ix += 1; Operation::RemoveConnection { user_id } } 35..=39 if clients.len() > 1 && self.allow_client_reconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); + self.operation_ix += 1; Operation::BounceConnection { user_id } } 40..=44 if self.allow_server_restarts && clients.len() > 1 => { + self.operation_ix += 1; Operation::RestartServer } _ if !clients.is_empty() => { - let user_ids = (0..self.rng.gen_range(0..10)) + let count = self + .rng + .gen_range(1..10) + .min(self.max_operations - self.operation_ix); + let user_ids = (0..count) .map(|_| { let ix = self.rng.gen_range(0..clients.len()); let (client, cx) = &clients[ix]; @@ -568,18 +973,22 @@ impl TestPlan { _ => continue, }; }; - operation + Some(operation) } async fn next_client_operation( &mut self, client: &TestClient, cx: &TestAppContext, - ) -> ClientOperation { + ) -> Option { + if self.operation_ix == self.max_operations { + return None; + } + let user_id = client.current_user_id(cx); let call = cx.read(ActiveCall::global); let operation = loop { - match self.rng.gen_range(0..100) { + match self.rng.gen_range(0..100_u32) { // Mutate the call 0..=29 => { // Respond to an incoming call @@ -623,7 +1032,7 @@ impl TestPlan { } // Mutate projects - 39..=59 => match self.rng.gen_range(0..100_u32) { + 30..=59 => match self.rng.gen_range(0..100_u32) { // Open a new project 0..=70 => { // Open a remote project @@ -683,16 +1092,15 @@ impl TestPlan { } } - // Add a worktree to a local project - 81.. => { - if !client.local_projects().is_empty() { - let project = client - .local_projects() - .choose(&mut self.rng) - .unwrap() - .clone(); + // Mutate project worktrees + 81.. => match self.rng.gen_range(0..100_u32) { + // Add a worktree to a local project + 0..=50 => { + let Some(project) = client + .local_projects() + .choose(&mut self.rng) + .cloned() else { continue }; let project_root_name = root_name_for_project(&project, cx); - let mut paths = client.fs.paths().await; paths.remove(0); let new_root_path = if paths.is_empty() || self.rng.gen() { @@ -700,19 +1108,51 @@ impl TestPlan { } else { paths.choose(&mut self.rng).unwrap().clone() }; - break ClientOperation::AddWorktreeToProject { project_root_name, new_root_path, }; } - } + + // Add an entry to a worktree + _ => { + let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; + let project_root_name = root_name_for_project(&project, cx); + let is_local = project.read_with(cx, |project, _| project.is_local()); + let worktree = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + && worktree.root_entry().map_or(false, |e| e.is_dir()) + }) + .choose(&mut self.rng) + }); + let Some(worktree) = worktree else { continue }; + let is_dir = self.rng.gen::(); + let mut full_path = + worktree.read_with(cx, |w, _| PathBuf::from(w.root_name())); + full_path.push(gen_file_name(&mut self.rng)); + if !is_dir { + full_path.set_extension("rs"); + } + break ClientOperation::CreateWorktreeEntry { + project_root_name, + is_local, + full_path, + is_dir, + }; + } + }, }, // Query and mutate buffers - 60.. => { + 60..=95 => { let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; let project_root_name = root_name_for_project(&project, cx); + let is_local = project.read_with(cx, |project, _| project.is_local()); match self.rng.gen_range(0..100_u32) { // Manipulate an existing buffer @@ -731,6 +1171,7 @@ impl TestPlan { 0..=15 => { break ClientOperation::CloseBuffer { project_root_name, + is_local, full_path, }; } @@ -739,6 +1180,7 @@ impl TestPlan { let detach = self.rng.gen_bool(0.3); break ClientOperation::SaveBuffer { project_root_name, + is_local, full_path, detach, }; @@ -750,6 +1192,7 @@ impl TestPlan { }); break ClientOperation::EditBuffer { project_root_name, + is_local, full_path, edits, }; @@ -767,6 +1210,7 @@ impl TestPlan { project_root_name, full_path, offset, + is_local, kind: match self.rng.gen_range(0..5_u32) { 0 => LspRequestKind::Rename, 1 => LspRequestKind::Highlights, @@ -817,16 +1261,33 @@ impl TestPlan { }); break ClientOperation::OpenBuffer { project_root_name, + is_local, full_path, }; } } } - _ => break ClientOperation::Other, + // Create a file or directory + 96.. => { + let is_dir = self.rng.gen::(); + let mut path = client + .fs + .directories() + .await + .choose(&mut self.rng) + .unwrap() + .clone(); + path.push(gen_file_name(&mut self.rng)); + if !is_dir { + path.set_extension("rs"); + } + break ClientOperation::CreateFsEntry { path, is_dir }; + } } }; - operation + self.operation_ix += 1; + Some(operation) } fn next_root_dir_name(&mut self, user_id: UserId) -> String { @@ -968,9 +1429,8 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while operation_rx.next().await.is_some() { - let operation = plan.lock().next_client_operation(&client, &cx).await; - if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await - { + let Some(operation) = plan.lock().next_client_operation(&client, &cx).await else { break }; + if let Err(error) = apply_client_operation(&client, operation, &mut cx).await { log::error!("{} error: {}", client.username, error); } cx.background().simulate_random_delay().await; @@ -978,342 +1438,6 @@ async fn simulate_client( log::info!("{}: done", client.username); } -async fn apply_client_operation( - client: &TestClient, - plan: Arc>, - operation: ClientOperation, - cx: &mut TestAppContext, -) -> Result<()> { - match operation { - ClientOperation::AcceptIncomingCall => { - log::info!("{}: accepting incoming call", client.username); - let active_call = cx.read(ActiveCall::global); - active_call - .update(cx, |call, cx| call.accept_incoming(cx)) - .await?; - } - - ClientOperation::RejectIncomingCall => { - log::info!("{}: declining incoming call", client.username); - let active_call = cx.read(ActiveCall::global); - active_call.update(cx, |call, _| call.decline_incoming())?; - } - - ClientOperation::LeaveCall => { - log::info!("{}: hanging up", client.username); - let active_call = cx.read(ActiveCall::global); - active_call.update(cx, |call, cx| call.hang_up(cx))?; - } - - ClientOperation::InviteContactToCall { user_id } => { - log::info!("{}: inviting {}", client.username, user_id,); - let active_call = cx.read(ActiveCall::global); - active_call - .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) - .await?; - } - - ClientOperation::OpenLocalProject { first_root_name } => { - log::info!( - "{}: opening local project at {:?}", - client.username, - first_root_name - ); - let root_path = Path::new("/").join(&first_root_name); - client.fs.create_dir(&root_path).await.unwrap(); - client - .fs - .create_file(&root_path.join("main.rs"), Default::default()) - .await - .unwrap(); - let project = client.build_local_project(root_path, cx).await.0; - ensure_project_shared(&project, client, cx).await; - client.local_projects_mut().push(project.clone()); - } - - ClientOperation::AddWorktreeToProject { - project_root_name, - new_root_path, - } => { - log::info!( - "{}: finding/creating local worktree at {:?} to project with root path {}", - client.username, - new_root_path, - project_root_name - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - ensure_project_shared(&project, client, cx).await; - if !client.fs.paths().await.contains(&new_root_path) { - client.fs.create_dir(&new_root_path).await.unwrap(); - } - project - .update(cx, |project, cx| { - project.find_or_create_local_worktree(&new_root_path, true, cx) - }) - .await - .unwrap(); - } - - ClientOperation::CloseRemoteProject { project_root_name } => { - log::info!( - "{}: closing remote project with root path {}", - client.username, - project_root_name, - ); - let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) - .expect("invalid project in test operation"); - cx.update(|_| client.remote_projects_mut().remove(ix)); - } - - ClientOperation::OpenRemoteProject { - host_id, - first_root_name, - } => { - log::info!( - "{}: joining remote project of user {}, root name {}", - client.username, - host_id, - first_root_name, - ); - let active_call = cx.read(ActiveCall::global); - let project_id = active_call - .read_with(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)?; - Some(project.id) - }) - .expect("invalid project in test operation"); - let project = client.build_remote_project(project_id, cx).await; - client.remote_projects_mut().push(project); - } - - ClientOperation::OpenBuffer { - project_root_name, - full_path, - } => { - log::info!( - "{}: opening buffer {:?} in project {}", - client.username, - full_path, - project_root_name, - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - // ensure_project_shared(&project, client, cx).await; - let mut components = full_path.components(); - let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); - let path = components.as_path(); - let worktree_id = project - .read_with(cx, |project, cx| { - project.worktrees(cx).find_map(|worktree| { - let worktree = worktree.read(cx); - if worktree.root_name() == root_name { - Some(worktree.id()) - } else { - None - } - }) - }) - .expect("invalid buffer path in test operation"); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer((worktree_id, &path), cx) - }) - .await?; - client.buffers_for_project(&project).insert(buffer); - } - - ClientOperation::EditBuffer { - project_root_name, - full_path, - edits, - } => { - log::info!( - "{}: editing buffer {:?} in project {} with {:?}", - client.username, - full_path, - project_root_name, - edits - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - buffer.update(cx, |buffer, cx| { - buffer.edit(edits, None, cx); - }); - } - - ClientOperation::CloseBuffer { - project_root_name, - full_path, - } => { - log::info!( - "{}: dropping buffer {:?} in project {}", - client.username, - full_path, - project_root_name - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - cx.update(|_| { - client.buffers_for_project(&project).remove(&buffer); - drop(buffer); - }); - } - - ClientOperation::SaveBuffer { - project_root_name, - full_path, - detach, - } => { - log::info!( - "{}: saving buffer {:?} in project {}{}", - client.username, - full_path, - project_root_name, - if detach { ", detaching" } else { ", awaiting" } - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - let (requested_version, save) = - buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); - let save = cx.background().spawn(async move { - let (saved_version, _, _) = save - .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - anyhow::Ok(()) - }); - if detach { - log::info!("{}: detaching save request", client.username); - cx.update(|cx| save.detach_and_log_err(cx)); - } else { - save.await?; - } - } - - ClientOperation::RequestLspDataInBuffer { - project_root_name, - full_path, - offset, - kind, - detach, - } => { - log::info!( - "{}: request LSP {:?} for buffer {:?} in project {}{}", - client.username, - kind, - full_path, - project_root_name, - if detach { ", detaching" } else { ", awaiting" } - ); - - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - let request = match kind { - LspRequestKind::Rename => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) - .await?; - anyhow::Ok(()) - }), - LspRequestKind::Completion => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Definition => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Highlights => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) - .await?; - Ok(()) - }), - }; - if detach { - request.detach(); - } else { - request.await?; - } - } - - ClientOperation::SearchProject { - project_root_name, - query, - detach, - } => { - log::info!( - "{}: search project {} for {:?}{}", - client.username, - project_root_name, - query, - if detach { ", detaching" } else { ", awaiting" } - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let search = project.update(cx, |project, cx| { - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if detach { - log::info!("{}: detaching save request", client.username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - search.await?; - } - } - - ClientOperation::Other => { - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=59 - if !client.local_projects().is_empty() - || !client.remote_projects().is_empty() => - { - randomly_mutate_worktrees(client, &plan, cx).await?; - } - _ => randomly_mutate_fs(client, &plan).await, - } - } - } - Ok(()) -} - fn buffer_for_full_path( buffers: &HashSet>, full_path: &PathBuf, @@ -1368,6 +1492,27 @@ fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> }) } +fn project_path_for_full_path( + project: &ModelHandle, + full_path: &Path, + cx: &TestAppContext, +) -> Option { + let mut components = full_path.components(); + let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); + let path = components.as_path().into(); + let worktree_id = project.read_with(cx, |project, cx| { + project.worktrees(cx).find_map(|worktree| { + let worktree = worktree.read(cx); + if worktree.root_name() == root_name { + Some(worktree.id()) + } else { + None + } + }) + })?; + Some(ProjectPath { worktree_id, path }) +} + async fn ensure_project_shared( project: &ModelHandle, client: &TestClient, @@ -1402,76 +1547,6 @@ async fn ensure_project_shared( } } -async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { - let is_dir = plan.lock().rng.gen::(); - let mut new_path = client - .fs - .directories() - .await - .choose(&mut plan.lock().rng) - .unwrap() - .clone(); - new_path.push(gen_file_name(&mut plan.lock().rng)); - if is_dir { - log::info!("{}: creating local dir at {:?}", client.username, new_path); - client.fs.create_dir(&new_path).await.unwrap(); - } else { - new_path.set_extension("rs"); - log::info!("{}: creating local file at {:?}", client.username, new_path); - client - .fs - .create_file(&new_path, Default::default()) - .await - .unwrap(); - } -} - -async fn randomly_mutate_worktrees( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); - let Some(worktree) = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - && worktree.root_entry().map_or(false, |e| e.is_dir()) - }) - .choose(&mut plan.lock().rng) - }) else { - return Ok(()) - }; - - let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| { - (worktree.id(), worktree.root_name().to_string()) - }); - - let is_dir = plan.lock().rng.gen::(); - let mut new_path = PathBuf::new(); - new_path.push(gen_file_name(&mut plan.lock().rng)); - if !is_dir { - new_path.set_extension("rs"); - } - log::info!( - "{}: creating {:?} in worktree {} ({})", - client.username, - new_path, - worktree_id, - worktree_root_name, - ); - project - .update(cx, |project, cx| { - project.create_entry((worktree_id, new_path), is_dir, cx) - }) - .unwrap() - .await?; - Ok(()) -} - fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client .local_projects() From 2351f2bd0cedc851e08da6423234d2baab286640 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 15:40:42 -0800 Subject: [PATCH 07/60] Tolerate failure to join remote projects in randomized test --- .../src/tests/randomized_integration_tests.rs | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 9cdc05833e..8ed290bcf8 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::{BTreeMap, HashSet}; -use fs::Fs as _; +use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; @@ -531,22 +531,30 @@ async fn apply_client_operation( ); let active_call = cx.read(ActiveCall::global); - let project_id = active_call - .read_with(cx, |call, cx| { + let project = active_call + .update(cx, |call, cx| { let room = call.room().cloned()?; let participant = room .read(cx) .remote_participants() .get(&host_id.to_proto())?; - let project = participant + let project_id = participant .projects .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)?; - Some(project.id) + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) }) - .expect("invalid project in test operation"); - let project = client.build_remote_project(project_id, cx).await; - client.remote_projects_mut().push(project); + .expect("invalid project in test operation") + .await?; + client.remote_projects_mut().push(project.clone()); } ClientOperation::CreateWorktreeEntry { From c503ba00b63b9b04d167b9b17ce0ac20c0584e9b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 17:12:15 -0800 Subject: [PATCH 08/60] Add env vars to store and load test plan from JSON files --- .../src/tests/randomized_integration_tests.rs | 178 +++++++++++++++--- 1 file changed, 153 insertions(+), 25 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 8ed290bcf8..243d275e13 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -15,6 +15,7 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project, ProjectPath}; use rand::prelude::*; +use serde::{Deserialize, Serialize}; use std::{ env, ops::Range, @@ -28,18 +29,20 @@ use util::ResultExt; async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, - mut rng: StdRng, + rng: StdRng, ) { deterministic.forbid_parking(); let max_peers = env::var("MAX_PEERS") .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) - .unwrap_or(5); - + .unwrap_or(3); let max_operations = env::var("OPERATIONS") .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) .unwrap_or(10); + let plan_load_path = path_env_var("LOAD_PLAN"); + let plan_save_path = path_env_var("SAVE_PLAN"); + let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); @@ -64,6 +67,7 @@ async fn test_random_collaboration( username, online: false, next_root_id: 0, + operation_ix: 0, }); } @@ -84,15 +88,12 @@ async fn test_random_collaboration( } } - let plan = Arc::new(Mutex::new(TestPlan { - allow_server_restarts: rng.gen_bool(0.7), - allow_client_reconnection: rng.gen_bool(0.7), - allow_client_disconnection: rng.gen_bool(0.1), - operation_ix: 0, - max_operations, - users, - rng, - })); + let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); + + if let Some(path) = &plan_load_path { + eprintln!("loaded plan from path {:?}", path); + plan.lock().load(path); + } let mut clients = Vec::new(); let mut client_tasks = Vec::new(); @@ -250,6 +251,11 @@ async fn test_random_collaboration( deterministic.finish_waiting(); deterministic.run_until_parked(); + if let Some(path) = &plan_save_path { + eprintln!("saved test plan to path {:?}", path); + plan.lock().save(path); + } + for (client, client_cx) in &clients { for guest_project in client.remote_projects().iter() { guest_project.read_with(client_cx, |guest_project, cx| { @@ -760,12 +766,14 @@ async fn apply_client_operation( ClientOperation::SearchProject { project_root_name, + is_local, query, detach, } => { log::info!( - "{}: search project {} for {:?}{}", + "{}: search {} project {} for {:?}{}", client.username, + if is_local { "local" } else { "remote" }, project_root_name, query, if detach { ", detaching" } else { ", awaiting" } @@ -811,6 +819,8 @@ async fn apply_client_operation( struct TestPlan { rng: StdRng, + replay: bool, + stored_operations: Vec, max_operations: usize, operation_ix: usize, users: Vec, @@ -823,10 +833,21 @@ struct UserTestPlan { user_id: UserId, username: String, next_root_id: usize, + operation_ix: usize, online: bool, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(untagged)] +enum StoredOperation { + Server(Operation), + Client { + user_id: UserId, + operation: ClientOperation, + }, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] enum Operation { AddConnection { user_id: UserId, @@ -844,7 +865,7 @@ enum Operation { }, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] enum ClientOperation { AcceptIncomingCall, RejectIncomingCall, @@ -873,6 +894,7 @@ enum ClientOperation { }, SearchProject { project_root_name: String, + is_local: bool, query: String, detach: bool, }, @@ -913,7 +935,7 @@ enum ClientOperation { }, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] enum LspRequestKind { Rename, Completion, @@ -923,15 +945,109 @@ enum LspRequestKind { } impl TestPlan { + fn new(mut rng: StdRng, users: Vec, max_operations: usize) -> Self { + Self { + replay: false, + allow_server_restarts: rng.gen_bool(0.7), + allow_client_reconnection: rng.gen_bool(0.7), + allow_client_disconnection: rng.gen_bool(0.1), + stored_operations: Vec::new(), + operation_ix: 0, + max_operations, + users, + rng, + } + } + + fn load(&mut self, path: &Path) { + let json = std::fs::read_to_string(path).unwrap(); + self.replay = true; + self.stored_operations = serde_json::from_str(&json).unwrap(); + } + + fn save(&mut self, path: &Path) { + // Format each operation as one line + let mut json = Vec::new(); + json.push(b'['); + for (i, stored_operation) in self.stored_operations.iter().enumerate() { + if i > 0 { + json.push(b','); + } + json.extend_from_slice(b"\n "); + serde_json::to_writer(&mut json, stored_operation).unwrap(); + } + json.extend_from_slice(b"\n]\n"); + std::fs::write(path, &json).unwrap(); + } + async fn next_operation( &mut self, clients: &[(Rc, TestAppContext)], + ) -> Option { + if self.replay { + while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { + self.operation_ix += 1; + if let StoredOperation::Server(operation) = stored_operation { + return Some(operation.clone()); + } + } + None + } else { + let operation = self.generate_operation(clients).await; + if let Some(operation) = &operation { + self.stored_operations + .push(StoredOperation::Server(operation.clone())) + } + operation + } + } + + async fn next_client_operation( + &mut self, + client: &TestClient, + cx: &TestAppContext, + ) -> Option { + let current_user_id = client.current_user_id(cx); + let user_ix = self + .users + .iter() + .position(|user| user.user_id == current_user_id) + .unwrap(); + let user_plan = &mut self.users[user_ix]; + + if self.replay { + while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { + user_plan.operation_ix += 1; + if let StoredOperation::Client { user_id, operation } = stored_operation { + if user_id == ¤t_user_id { + return Some(operation.clone()); + } + } + } + None + } else { + let operation = self + .generate_client_operation(current_user_id, client, cx) + .await; + if let Some(operation) = &operation { + self.stored_operations.push(StoredOperation::Client { + user_id: current_user_id, + operation: operation.clone(), + }) + } + operation + } + } + + async fn generate_operation( + &mut self, + clients: &[(Rc, TestAppContext)], ) -> Option { if self.operation_ix == self.max_operations { return None; } - let operation = loop { + Some(loop { break match self.rng.gen_range(0..100) { 0..=29 if clients.len() < self.users.len() => { let user = self @@ -980,12 +1096,12 @@ impl TestPlan { } _ => continue, }; - }; - Some(operation) + }) } - async fn next_client_operation( + async fn generate_client_operation( &mut self, + user_id: UserId, client: &TestClient, cx: &TestAppContext, ) -> Option { @@ -993,9 +1109,9 @@ impl TestPlan { return None; } - let user_id = client.current_user_id(cx); + self.operation_ix += 1; let call = cx.read(ActiveCall::global); - let operation = loop { + Some(loop { match self.rng.gen_range(0..100_u32) { // Mutate the call 0..=29 => { @@ -1237,6 +1353,7 @@ impl TestPlan { let detach = self.rng.gen_bool(0.3); break ClientOperation::SearchProject { project_root_name, + is_local, query, detach, }; @@ -1293,9 +1410,7 @@ impl TestPlan { break ClientOperation::CreateFsEntry { path, is_dir }; } } - }; - self.operation_ix += 1; - Some(operation) + }) } fn next_root_dir_name(&mut self, user_id: UserId) -> String { @@ -1572,3 +1687,16 @@ fn gen_file_name(rng: &mut StdRng) -> String { } name } + +fn path_env_var(name: &str) -> Option { + let value = env::var(name).ok()?; + let mut path = PathBuf::from(value); + if path.is_relative() { + let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + abs_path.pop(); + abs_path.pop(); + abs_path.push(path); + path = abs_path + } + Some(path) +} From 3e3a703b60096e3e5287ddf33cff57839f80d339 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 9 Jan 2023 11:36:53 -0800 Subject: [PATCH 09/60] Skip inapplicable operations when running an edited test plan --- .../src/tests/randomized_integration_tests.rs | 633 +++++++++++------- 1 file changed, 375 insertions(+), 258 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 243d275e13..4d87ca9ccc 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,9 +7,10 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::{BTreeMap, HashSet}; +use editor::Bias; use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; -use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; +use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; @@ -21,7 +22,10 @@ use std::{ ops::Range, path::{Path, PathBuf}, rc::Rc, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, + }, }; use util::ResultExt; @@ -101,147 +105,21 @@ async fn test_random_collaboration( let mut next_entity_id = 100000; loop { - let Some(next_operation) = plan.lock().next_operation(&clients).await else { break }; - match next_operation { - Operation::AddConnection { user_id } => { - let username = { - let mut plan = plan.lock(); - let mut user = plan.user(user_id); - user.online = true; - user.username.clone() - }; - log::info!("Adding new connection for {}", username); - next_entity_id += 100000; - let mut client_cx = TestAppContext::new( - cx.foreground_platform(), - cx.platform(), - deterministic.build_foreground(next_entity_id), - deterministic.build_background(), - cx.font_cache(), - cx.leak_detector(), - next_entity_id, - cx.function_name.clone(), - ); - - let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); - let client = Rc::new(server.create_client(&mut client_cx, &username).await); - operation_channels.push(operation_tx); - clients.push((client.clone(), client_cx.clone())); - client_tasks.push(client_cx.foreground().spawn(simulate_client( - client, - operation_rx, - plan.clone(), - client_cx, - ))); - - log::info!("Added connection for {}", username); - } - - Operation::RemoveConnection { user_id } => { - log::info!("Simulating full disconnection of user {}", user_id); - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(user_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let removed_peer_id = user_connection_ids[0].into(); - let (client, mut client_cx) = clients.remove(client_ix); - let client_task = client_tasks.remove(client_ix); - operation_channels.remove(client_ix); - server.forbid_connections(); - server.disconnect_client(removed_peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - deterministic.start_waiting(); - log::info!("Waiting for user {} to exit...", user_id); - client_task.await; - deterministic.finish_waiting(); - server.allow_connections(); - - for project in client.remote_projects().iter() { - project.read_with(&client_cx, |project, _| { - assert!( - project.is_read_only(), - "project {:?} should be read only", - project.remote_id() - ) - }); - } - - for (client, cx) in &clients { - let contacts = server - .app_state - .db - .get_contacts(client.current_user_id(cx)) - .await - .unwrap(); - let pool = server.connection_pool.lock(); - for contact in contacts { - if let db::Contact::Accepted { user_id: id, .. } = contact { - if pool.is_user_online(id) { - assert_ne!( - id, user_id, - "removed client is still a contact of another peer" - ); - } - } - } - } - - log::info!("{} removed", client.username); - plan.lock().user(user_id).online = false; - client_cx.update(|cx| { - cx.clear_globals(); - drop(client); - }); - } - - Operation::BounceConnection { user_id } => { - log::info!("Simulating temporary disconnection of user {}", user_id); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(user_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let peer_id = user_connection_ids[0].into(); - server.disconnect_client(peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - } - - Operation::RestartServer => { - log::info!("Simulating server restart"); - server.reset().await; - deterministic.advance_clock(RECEIVE_TIMEOUT); - server.start().await.unwrap(); - deterministic.advance_clock(CLEANUP_TIMEOUT); - let environment = &server.app_state.config.zed_environment; - let stale_room_ids = server - .app_state - .db - .stale_room_ids(environment, server.id()) - .await - .unwrap(); - assert_eq!(stale_room_ids, vec![]); - } - - Operation::MutateClients { user_ids, quiesce } => { - for user_id in user_ids { - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - operation_channels[client_ix].unbounded_send(()).unwrap(); - } - - if quiesce { - deterministic.run_until_parked(); - } - } + let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; + let applied = apply_server_operation( + deterministic.clone(), + &mut server, + &mut clients, + &mut client_tasks, + &mut operation_channels, + &mut next_entity_id, + plan.clone(), + next_operation, + cx, + ) + .await; + if !applied { + skipped.store(false, SeqCst); } } @@ -430,39 +308,216 @@ async fn test_random_collaboration( } } +async fn apply_server_operation( + deterministic: Arc, + server: &mut TestServer, + clients: &mut Vec<(Rc, TestAppContext)>, + client_tasks: &mut Vec>, + operation_channels: &mut Vec>, + next_entity_id: &mut usize, + plan: Arc>, + operation: Operation, + cx: &mut TestAppContext, +) -> bool { + match operation { + Operation::AddConnection { user_id } => { + let username; + { + let mut plan = plan.lock(); + let mut user = plan.user(user_id); + if user.online { + return false; + } + user.online = true; + username = user.username.clone(); + }; + log::info!("Adding new connection for {}", username); + *next_entity_id += 100000; + let mut client_cx = TestAppContext::new( + cx.foreground_platform(), + cx.platform(), + deterministic.build_foreground(*next_entity_id), + deterministic.build_background(), + cx.font_cache(), + cx.leak_detector(), + *next_entity_id, + cx.function_name.clone(), + ); + + let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); + let client = Rc::new(server.create_client(&mut client_cx, &username).await); + operation_channels.push(operation_tx); + clients.push((client.clone(), client_cx.clone())); + client_tasks.push(client_cx.foreground().spawn(simulate_client( + client, + operation_rx, + plan.clone(), + client_cx, + ))); + + log::info!("Added connection for {}", username); + } + + Operation::RemoveConnection { user_id } => { + log::info!("Simulating full disconnection of user {}", user_id); + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id); + let Some(client_ix) = client_ix else { return false }; + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(user_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let removed_peer_id = user_connection_ids[0].into(); + let (client, mut client_cx) = clients.remove(client_ix); + let client_task = client_tasks.remove(client_ix); + operation_channels.remove(client_ix); + server.forbid_connections(); + server.disconnect_client(removed_peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + deterministic.start_waiting(); + log::info!("Waiting for user {} to exit...", user_id); + client_task.await; + deterministic.finish_waiting(); + server.allow_connections(); + + for project in client.remote_projects().iter() { + project.read_with(&client_cx, |project, _| { + assert!( + project.is_read_only(), + "project {:?} should be read only", + project.remote_id() + ) + }); + } + + for (client, cx) in clients { + let contacts = server + .app_state + .db + .get_contacts(client.current_user_id(cx)) + .await + .unwrap(); + let pool = server.connection_pool.lock(); + for contact in contacts { + if let db::Contact::Accepted { user_id: id, .. } = contact { + if pool.is_user_online(id) { + assert_ne!( + id, user_id, + "removed client is still a contact of another peer" + ); + } + } + } + } + + log::info!("{} removed", client.username); + plan.lock().user(user_id).online = false; + client_cx.update(|cx| { + cx.clear_globals(); + drop(client); + }); + } + + Operation::BounceConnection { user_id } => { + log::info!("Simulating temporary disconnection of user {}", user_id); + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(user_id) + .collect::>(); + if user_connection_ids.is_empty() { + return false; + } + assert_eq!(user_connection_ids.len(), 1); + let peer_id = user_connection_ids[0].into(); + server.disconnect_client(peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + } + + Operation::RestartServer => { + log::info!("Simulating server restart"); + server.reset().await; + deterministic.advance_clock(RECEIVE_TIMEOUT); + server.start().await.unwrap(); + deterministic.advance_clock(CLEANUP_TIMEOUT); + let environment = &server.app_state.config.zed_environment; + let stale_room_ids = server + .app_state + .db + .stale_room_ids(environment, server.id()) + .await + .unwrap(); + assert_eq!(stale_room_ids, vec![]); + } + + Operation::MutateClients { user_ids, quiesce } => { + let mut applied = false; + for user_id in user_ids { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id); + let Some(client_ix) = client_ix else { continue }; + applied = true; + if let Err(err) = operation_channels[client_ix].unbounded_send(()) { + // panic!("error signaling user {}, client {}", user_id, client_ix); + } + } + + if quiesce && applied { + deterministic.run_until_parked(); + } + + return applied; + } + } + true +} + async fn apply_client_operation( client: &TestClient, operation: ClientOperation, cx: &mut TestAppContext, -) -> Result<()> { +) -> Result { match operation { ClientOperation::AcceptIncomingCall => { - log::info!("{}: accepting incoming call", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { + return Ok(false); + } + + log::info!("{}: accepting incoming call", client.username); active_call .update(cx, |call, cx| call.accept_incoming(cx)) .await?; } ClientOperation::RejectIncomingCall => { - log::info!("{}: declining incoming call", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { + return Ok(false); + } + + log::info!("{}: declining incoming call", client.username); active_call.update(cx, |call, _| call.decline_incoming())?; } ClientOperation::LeaveCall => { - log::info!("{}: hanging up", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_none()) { + return Ok(false); + } + + log::info!("{}: hanging up", client.username); active_call.update(cx, |call, cx| call.hang_up(cx))?; } ClientOperation::InviteContactToCall { user_id } => { - log::info!("{}: inviting {}", client.username, user_id,); - let active_call = cx.read(ActiveCall::global); + + log::info!("{}: inviting {}", client.username, user_id,); active_call .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) .await @@ -492,6 +547,10 @@ async fn apply_client_operation( project_root_name, new_root_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false) + }; + log::info!( "{}: finding/creating local worktree at {:?} to project with root path {}", client.username, @@ -499,8 +558,6 @@ async fn apply_client_operation( project_root_name ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; if !client.fs.paths().await.contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); @@ -514,21 +571,56 @@ async fn apply_client_operation( } ClientOperation::CloseRemoteProject { project_root_name } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false) + }; + log::info!( "{}: closing remote project with root path {}", client.username, project_root_name, ); - let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) - .expect("invalid project in test operation"); - cx.update(|_| client.remote_projects_mut().remove(ix)); + let ix = client + .remote_projects() + .iter() + .position(|p| p == &project) + .unwrap(); + cx.update(|_| { + client.remote_projects_mut().remove(ix); + drop(project); + }); } ClientOperation::OpenRemoteProject { host_id, first_root_name, } => { + let active_call = cx.read(ActiveCall::global); + let project = active_call.update(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project_id = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) + }); + let Some(project) = project else { + return Ok(false) + }; + log::info!( "{}: joining remote project of user {}, root name {}", client.username, @@ -536,30 +628,7 @@ async fn apply_client_operation( first_root_name, ); - let active_call = cx.read(ActiveCall::global); - let project = active_call - .update(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project_id = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)? - .id; - Some(room.update(cx, |room, cx| { - room.join_project( - project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - })) - }) - .expect("invalid project in test operation") - .await?; + let project = project.await?; client.remote_projects_mut().push(project.clone()); } @@ -569,6 +638,13 @@ async fn apply_client_operation( full_path, is_dir, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: creating {} at path {:?} in {} project {}", client.username, @@ -578,11 +654,7 @@ async fn apply_client_operation( project_root_name, ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let project_path = project_path_for_full_path(&project, &full_path, cx) - .expect("invalid worktree path in test operation"); project .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx)) .unwrap() @@ -594,6 +666,13 @@ async fn apply_client_operation( is_local, full_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: opening buffer {:?} in {} project {}", client.username, @@ -602,11 +681,7 @@ async fn apply_client_operation( project_root_name, ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let project_path = project_path_for_full_path(&project, &full_path, cx) - .expect("invalid buffer path in test operation"); let buffer = project .update(cx, |project, cx| project.open_buffer(project_path, cx)) .await?; @@ -619,6 +694,14 @@ async fn apply_client_operation( full_path, edits, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: editing buffer {:?} in {} project {} with {:?}", client.username, @@ -628,14 +711,18 @@ async fn apply_client_operation( edits ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); buffer.update(cx, |buffer, cx| { - buffer.edit(edits, None, cx); + let snapshot = buffer.snapshot(); + buffer.edit( + edits.into_iter().map(|(range, text)| { + let start = snapshot.clip_offset(range.start, Bias::Left); + let end = snapshot.clip_offset(range.end, Bias::Right); + (start..end, text) + }), + None, + cx, + ); }); } @@ -644,20 +731,23 @@ async fn apply_client_operation( is_local, full_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( - "{}: dropping buffer {:?} in {} project {}", + "{}: closing buffer {:?} in {} project {}", client.username, full_path, if is_local { "local" } else { "remote" }, project_root_name ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); cx.update(|_| { client.buffers_for_project(&project).remove(&buffer); drop(buffer); @@ -670,6 +760,14 @@ async fn apply_client_operation( full_path, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: saving buffer {:?} in {} project {}{}", client.username, @@ -679,12 +777,7 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); let (requested_version, save) = buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); let save = cx.background().spawn(async move { @@ -710,6 +803,14 @@ async fn apply_client_operation( kind, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: request LSP {:?} for buffer {:?} in {} project {}{}", client.username, @@ -720,11 +821,7 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); + let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left)); let request = match kind { LspRequestKind::Rename => cx.spawn(|mut cx| async move { project @@ -770,6 +867,10 @@ async fn apply_client_operation( query, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + log::info!( "{}: search {} project {} for {:?}{}", client.username, @@ -778,8 +879,7 @@ async fn apply_client_operation( query, if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); + let search = project.update(cx, |project, cx| { project.search(SearchQuery::text(query, false, false), cx) }); @@ -797,12 +897,17 @@ async fn apply_client_operation( } ClientOperation::CreateFsEntry { path, is_dir } => { + if client.fs.metadata(&path.parent().unwrap()).await?.is_none() { + return Ok(false); + } + log::info!( "{}: creating {} at {:?}", client.username, if is_dir { "dir" } else { "file" }, path ); + if is_dir { client.fs.create_dir(&path).await.unwrap(); } else { @@ -814,13 +919,13 @@ async fn apply_client_operation( } } } - Ok(()) + Ok(true) } struct TestPlan { rng: StdRng, replay: bool, - stored_operations: Vec, + stored_operations: Vec<(StoredOperation, Arc)>, max_operations: usize, operation_ix: usize, users: Vec, @@ -962,51 +1067,57 @@ impl TestPlan { fn load(&mut self, path: &Path) { let json = std::fs::read_to_string(path).unwrap(); self.replay = true; - self.stored_operations = serde_json::from_str(&json).unwrap(); + let stored_operations: Vec = serde_json::from_str(&json).unwrap(); + self.stored_operations = stored_operations + .into_iter() + .map(|operation| (operation, Arc::new(AtomicBool::new(false)))) + .collect() } fn save(&mut self, path: &Path) { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); - for (i, stored_operation) in self.stored_operations.iter().enumerate() { - if i > 0 { + for (operation, skipped) in &self.stored_operations { + if skipped.load(SeqCst) { + continue; + } + if json.len() > 1 { json.push(b','); } json.extend_from_slice(b"\n "); - serde_json::to_writer(&mut json, stored_operation).unwrap(); + serde_json::to_writer(&mut json, operation).unwrap(); } json.extend_from_slice(b"\n]\n"); std::fs::write(path, &json).unwrap(); } - async fn next_operation( + fn next_server_operation( &mut self, clients: &[(Rc, TestAppContext)], - ) -> Option { + ) -> Option<(Operation, Arc)> { if self.replay { while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { self.operation_ix += 1; - if let StoredOperation::Server(operation) = stored_operation { - return Some(operation.clone()); + if let (StoredOperation::Server(operation), skipped) = stored_operation { + return Some((operation.clone(), skipped.clone())); } } None } else { - let operation = self.generate_operation(clients).await; - if let Some(operation) = &operation { - self.stored_operations - .push(StoredOperation::Server(operation.clone())) - } - operation + let operation = self.generate_server_operation(clients)?; + let skipped = Arc::new(AtomicBool::new(false)); + self.stored_operations + .push((StoredOperation::Server(operation.clone()), skipped.clone())); + Some((operation, skipped)) } } - async fn next_client_operation( + fn next_client_operation( &mut self, client: &TestClient, cx: &TestAppContext, - ) -> Option { + ) -> Option<(ClientOperation, Arc)> { let current_user_id = client.current_user_id(cx); let user_ix = self .users @@ -1018,28 +1129,29 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { user_plan.operation_ix += 1; - if let StoredOperation::Client { user_id, operation } = stored_operation { + if let (StoredOperation::Client { user_id, operation }, skipped) = stored_operation + { if user_id == ¤t_user_id { - return Some(operation.clone()); + return Some((operation.clone(), skipped.clone())); } } } None } else { - let operation = self - .generate_client_operation(current_user_id, client, cx) - .await; - if let Some(operation) = &operation { - self.stored_operations.push(StoredOperation::Client { + let operation = self.generate_client_operation(current_user_id, client, cx)?; + let skipped = Arc::new(AtomicBool::new(false)); + self.stored_operations.push(( + StoredOperation::Client { user_id: current_user_id, operation: operation.clone(), - }) - } - operation + }, + skipped.clone(), + )); + Some((operation, skipped)) } } - async fn generate_operation( + fn generate_server_operation( &mut self, clients: &[(Rc, TestAppContext)], ) -> Option { @@ -1091,7 +1203,7 @@ impl TestPlan { .collect(); Operation::MutateClients { user_ids, - quiesce: self.rng.gen(), + quiesce: self.rng.gen_bool(0.7), } } _ => continue, @@ -1099,7 +1211,7 @@ impl TestPlan { }) } - async fn generate_client_operation( + fn generate_client_operation( &mut self, user_id: UserId, client: &TestClient, @@ -1221,11 +1333,11 @@ impl TestPlan { // Add a worktree to a local project 0..=50 => { let Some(project) = client - .local_projects() - .choose(&mut self.rng) - .cloned() else { continue }; + .local_projects() + .choose(&mut self.rng) + .cloned() else { continue }; let project_root_name = root_name_for_project(&project, cx); - let mut paths = client.fs.paths().await; + let mut paths = cx.background().block(client.fs.paths()); paths.remove(0); let new_root_path = if paths.is_empty() || self.rng.gen() { Path::new("/").join(&self.next_root_dir_name(user_id)) @@ -1396,10 +1508,9 @@ impl TestPlan { // Create a file or directory 96.. => { let is_dir = self.rng.gen::(); - let mut path = client - .fs - .directories() - .await + let mut path = cx + .background() + .block(client.fs.directories()) .choose(&mut self.rng) .unwrap() .clone(); @@ -1501,10 +1612,9 @@ async fn simulate_client( let plan = plan.clone(); async move { let files = fs.files().await; - let mut plan = plan.lock(); - let count = plan.rng.gen_range::(1..3); + let count = plan.lock().rng.gen_range::(1..3); let files = (0..count) - .map(|_| files.choose(&mut plan.rng).unwrap()) + .map(|_| files.choose(&mut plan.lock().rng).unwrap()) .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( @@ -1552,9 +1662,16 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while operation_rx.next().await.is_some() { - let Some(operation) = plan.lock().next_client_operation(&client, &cx).await else { break }; - if let Err(error) = apply_client_operation(&client, operation, &mut cx).await { - log::error!("{} error: {}", client.username, error); + let Some((operation, skipped)) = plan.lock().next_client_operation(&client, &cx) else { break }; + match apply_client_operation(&client, operation, &mut cx).await { + Err(error) => { + log::error!("{} error: {}", client.username, error); + } + Ok(applied) => { + if !applied { + skipped.store(true, SeqCst); + } + } } cx.background().simulate_random_delay().await; } From 576a9bb92cd59cb147d9b85ff838a6d39f3dda40 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 9 Jan 2023 14:49:36 -0800 Subject: [PATCH 10/60] Drop project's buffers when closing a remote project --- crates/collab/src/tests/randomized_integration_tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 34ae96e665..cd51a2e1f8 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -597,6 +597,7 @@ async fn apply_client_operation( .unwrap(); cx.update(|_| { client.remote_projects_mut().remove(ix); + client.buffers().retain(|project, _| project != project); drop(project); }); } From a3c7416218d04abeeb29b0920c5e87b02ed3ff26 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 12:33:23 -0800 Subject: [PATCH 11/60] Don't include user ids with MutateClients ops in serialized test plans --- .../src/tests/randomized_integration_tests.rs | 66 +++++++++++++++---- 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index cd51a2e1f8..0c2f7ce288 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -322,7 +322,7 @@ async fn apply_server_operation( server: &mut TestServer, clients: &mut Vec<(Rc, TestAppContext)>, client_tasks: &mut Vec>, - operation_channels: &mut Vec>, + operation_channels: &mut Vec>, next_entity_id: &mut usize, plan: Arc>, operation: Operation, @@ -462,7 +462,11 @@ async fn apply_server_operation( assert_eq!(stale_room_ids, vec![]); } - Operation::MutateClients { user_ids, quiesce } => { + Operation::MutateClients { + user_ids, + batch_id, + quiesce, + } => { let mut applied = false; for user_id in user_ids { let client_ix = clients @@ -470,7 +474,7 @@ async fn apply_server_operation( .position(|(client, cx)| client.current_user_id(cx) == user_id); let Some(client_ix) = client_ix else { continue }; applied = true; - if let Err(err) = operation_channels[client_ix].unbounded_send(()) { + if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) { // panic!("error signaling user {}, client {}", user_id, client_ix); } } @@ -970,6 +974,7 @@ struct TestPlan { max_operations: usize, operation_ix: usize, users: Vec, + next_batch_id: usize, allow_server_restarts: bool, allow_client_reconnection: bool, allow_client_disconnection: bool, @@ -989,6 +994,7 @@ enum StoredOperation { Server(Operation), Client { user_id: UserId, + batch_id: usize, operation: ClientOperation, }, } @@ -1006,6 +1012,9 @@ enum Operation { }, RestartServer, MutateClients { + batch_id: usize, + #[serde(skip_serializing)] + #[serde(skip_deserializing)] user_ids: Vec, quiesce: bool, }, @@ -1103,6 +1112,7 @@ impl TestPlan { allow_client_disconnection: rng.gen_bool(0.1), stored_operations: Vec::new(), operation_ix: 0, + next_batch_id: 0, max_operations, users, rng, @@ -1114,8 +1124,32 @@ impl TestPlan { self.replay = true; let stored_operations: Vec = serde_json::from_str(&json).unwrap(); self.stored_operations = stored_operations - .into_iter() - .map(|operation| (operation, Arc::new(AtomicBool::new(false)))) + .iter() + .cloned() + .enumerate() + .map(|(i, mut operation)| { + if let StoredOperation::Server(Operation::MutateClients { + batch_id: current_batch_id, + user_ids, + .. + }) = &mut operation + { + assert!(user_ids.is_empty()); + user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| { + if let StoredOperation::Client { + user_id, batch_id, .. + } = operation + { + if batch_id == current_batch_id { + return Some(user_id); + } + } + None + })); + user_ids.sort_unstable(); + } + (operation, Arc::new(AtomicBool::new(false))) + }) .collect() } @@ -1161,6 +1195,7 @@ impl TestPlan { fn next_client_operation( &mut self, client: &TestClient, + current_batch_id: usize, cx: &TestAppContext, ) -> Option<(ClientOperation, Arc)> { let current_user_id = client.current_user_id(cx); @@ -1174,7 +1209,12 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { user_plan.operation_ix += 1; - if let (StoredOperation::Client { user_id, operation }, skipped) = stored_operation + if let ( + StoredOperation::Client { + user_id, operation, .. + }, + skipped, + ) = stored_operation { if user_id == ¤t_user_id { return Some((operation.clone(), skipped.clone())); @@ -1188,6 +1228,7 @@ impl TestPlan { self.stored_operations.push(( StoredOperation::Client { user_id: current_user_id, + batch_id: current_batch_id, operation: operation.clone(), }, skipped.clone(), @@ -1239,15 +1280,18 @@ impl TestPlan { .rng .gen_range(1..10) .min(self.max_operations - self.operation_ix); - let user_ids = (0..count) + let batch_id = util::post_inc(&mut self.next_batch_id); + let mut user_ids = (0..count) .map(|_| { let ix = self.rng.gen_range(0..clients.len()); let (client, cx) = &clients[ix]; client.current_user_id(cx) }) - .collect(); + .collect::>(); + user_ids.sort_unstable(); Operation::MutateClients { user_ids, + batch_id, quiesce: self.rng.gen_bool(0.7), } } @@ -1625,7 +1669,7 @@ impl TestPlan { async fn simulate_client( client: Rc, - mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, + mut operation_rx: futures::channel::mpsc::UnboundedReceiver, plan: Arc>, mut cx: TestAppContext, ) { @@ -1740,8 +1784,8 @@ async fn simulate_client( .await; client.language_registry.add(Arc::new(language)); - while operation_rx.next().await.is_some() { - let Some((operation, skipped)) = plan.lock().next_client_operation(&client, &cx) else { break }; + while let Some(batch_id) = operation_rx.next().await { + let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { Err(error) => { log::error!("{} error: {}", client.username, error); From 00e8625037f9fb6a17577a66f46c5b53f3d85d8e Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 14:30:10 -0800 Subject: [PATCH 12/60] Simplify management of entity ids for different app contexts in randomized test --- crates/collab/src/tests/randomized_integration_tests.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0c2f7ce288..0b6ec2367a 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -105,7 +105,6 @@ async fn test_random_collaboration( let mut clients = Vec::new(); let mut client_tasks = Vec::new(); let mut operation_channels = Vec::new(); - let mut next_entity_id = 100000; loop { let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; @@ -115,7 +114,6 @@ async fn test_random_collaboration( &mut clients, &mut client_tasks, &mut operation_channels, - &mut next_entity_id, plan.clone(), next_operation, cx, @@ -323,7 +321,6 @@ async fn apply_server_operation( clients: &mut Vec<(Rc, TestAppContext)>, client_tasks: &mut Vec>, operation_channels: &mut Vec>, - next_entity_id: &mut usize, plan: Arc>, operation: Operation, cx: &mut TestAppContext, @@ -341,15 +338,15 @@ async fn apply_server_operation( username = user.username.clone(); }; log::info!("Adding new connection for {}", username); - *next_entity_id += 100000; + let next_entity_id = (user_id.0 * 10_000) as usize; let mut client_cx = TestAppContext::new( cx.foreground_platform(), cx.platform(), - deterministic.build_foreground(*next_entity_id), + deterministic.build_foreground(user_id.0 as usize), deterministic.build_background(), cx.font_cache(), cx.leak_detector(), - *next_entity_id, + next_entity_id, cx.function_name.clone(), ); From e04d0be8531ae4ab78e72811d841f5c3045e71e7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 14:30:53 -0800 Subject: [PATCH 13/60] Remove unneeded log messages in randomized test --- crates/collab/src/tests/randomized_integration_tests.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0b6ec2367a..fd6252e78c 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -799,7 +799,6 @@ async fn apply_client_operation( anyhow::Ok(()) }); if detach { - log::info!("{}: detaching save request", client.username); cx.update(|cx| save.detach_and_log_err(cx)); } else { save.await?; @@ -900,7 +899,6 @@ async fn apply_client_operation( .map_err(|err| anyhow!("search request failed: {:?}", err)) }); if detach { - log::info!("{}: detaching save request", client.username); cx.update(|cx| search.detach_and_log_err(cx)); } else { search.await?; From 1a9ff2420e5441d1a86496ac3daa6833d69b855b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 22:09:36 -0800 Subject: [PATCH 14/60] Clean up how applications are marked as inapplicable --- .../src/tests/randomized_integration_tests.rs | 172 +++++++++--------- 1 file changed, 82 insertions(+), 90 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index fd6252e78c..d7f946cc0a 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -6,7 +6,7 @@ use crate::{ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; -use collections::{BTreeMap, HashSet}; +use collections::BTreeMap; use editor::Bias; use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; @@ -490,12 +490,12 @@ async fn apply_client_operation( client: &TestClient, operation: ClientOperation, cx: &mut TestAppContext, -) -> Result { +) -> Result<(), TestError> { match operation { ClientOperation::AcceptIncomingCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: accepting incoming call", client.username); @@ -507,7 +507,7 @@ async fn apply_client_operation( ClientOperation::RejectIncomingCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: declining incoming call", client.username); @@ -517,7 +517,7 @@ async fn apply_client_operation( ClientOperation::LeaveCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.room().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: hanging up", client.username); @@ -557,9 +557,8 @@ async fn apply_client_operation( project_root_name, new_root_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false) - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: finding/creating local worktree at {:?} to project with root path {}", @@ -581,9 +580,8 @@ async fn apply_client_operation( } ClientOperation::CloseRemoteProject { project_root_name } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false) - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: closing remote project with root path {}", @@ -608,29 +606,28 @@ async fn apply_client_operation( first_root_name, } => { let active_call = cx.read(ActiveCall::global); - let project = active_call.update(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project_id = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)? - .id; - Some(room.update(cx, |room, cx| { - room.join_project( - project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - })) - }); - let Some(project) = project else { - return Ok(false) - }; + let project = active_call + .update(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project_id = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) + }) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: joining remote project of user {}, root name {}", @@ -649,12 +646,10 @@ async fn apply_client_operation( full_path, is_dir, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: creating {} at path {:?} in {} project {}", @@ -677,12 +672,10 @@ async fn apply_client_operation( is_local, full_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: opening buffer {:?} in {} project {}", @@ -705,13 +698,10 @@ async fn apply_client_operation( full_path, edits, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: editing buffer {:?} in {} project {} with {:?}", @@ -742,13 +732,10 @@ async fn apply_client_operation( is_local, full_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: closing buffer {:?} in {} project {}", @@ -771,13 +758,10 @@ async fn apply_client_operation( full_path, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: saving buffer {:?} in {} project {}{}", @@ -813,13 +797,10 @@ async fn apply_client_operation( kind, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: request LSP {:?} for buffer {:?} in {} project {}{}", @@ -877,9 +858,8 @@ async fn apply_client_operation( query, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: search {} project {} for {:?}{}", @@ -906,9 +886,11 @@ async fn apply_client_operation( } ClientOperation::CreateFsEntry { path, is_dir } => { - if client.fs.metadata(&path.parent().unwrap()).await?.is_none() { - return Ok(false); - } + client + .fs + .metadata(&path.parent().unwrap()) + .await? + .ok_or(TestError::Inapplicable)?; log::info!( "{}: creating {} at {:?}", @@ -938,7 +920,7 @@ async fn apply_client_operation( .await? .map_or(false, |m| m.is_dir) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!( @@ -959,7 +941,7 @@ async fn apply_client_operation( client.fs.set_index_for_repo(&dot_git_dir, &contents).await; } } - Ok(true) + Ok(()) } struct TestPlan { @@ -1098,6 +1080,17 @@ enum LspRequestKind { Highlights, } +enum TestError { + Inapplicable, + Other(anyhow::Error), +} + +impl From for TestError { + fn from(value: anyhow::Error) -> Self { + Self::Other(value) + } +} + impl TestPlan { fn new(mut rng: StdRng, users: Vec, max_operations: usize) -> Self { Self { @@ -1782,14 +1775,11 @@ async fn simulate_client( while let Some(batch_id) = operation_rx.next().await { let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { - Err(error) => { + Ok(()) => {} + Err(TestError::Inapplicable) => skipped.store(true, SeqCst), + Err(TestError::Other(error)) => { log::error!("{} error: {}", client.username, error); } - Ok(applied) => { - if !applied { - skipped.store(true, SeqCst); - } - } } cx.background().simulate_random_delay().await; } @@ -1797,11 +1787,13 @@ async fn simulate_client( } fn buffer_for_full_path( - buffers: &HashSet>, + client: &TestClient, + project: &ModelHandle, full_path: &PathBuf, cx: &TestAppContext, ) -> Option> { - buffers + client + .buffers_for_project(project) .iter() .find(|buffer| { buffer.read_with(cx, |buffer, cx| { From 2c84b741263f958ba51e257e5763aa0bf40253a5 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 16 Jan 2023 09:48:54 -0800 Subject: [PATCH 15/60] Avoid retaining project in randomized test while LSP request is outstanding --- .../src/tests/randomized_integration_tests.rs | 68 +++++++++---------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index d7f946cc0a..01a744427e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -197,13 +197,17 @@ async fn test_random_collaboration( assert_eq!( guest_snapshot.entries(false).collect::>(), host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {} ({:?}) and project {:?}", + "{} has different snapshot than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + host_project.read_with(host_cx, |project, _| project.remote_id()) + ); + assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), + "{} has different scan id than the host for worktree {:?} and project {:?}", client.username, - id, host_snapshot.abs_path(), host_project.read_with(host_cx, |project, _| project.remote_id()) ); - assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); } } } @@ -812,39 +816,32 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); + use futures::{FutureExt as _, TryFutureExt as _}; let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left)); - let request = match kind { - LspRequestKind::Rename => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) - .await?; - anyhow::Ok(()) - }), - LspRequestKind::Completion => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Definition => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Highlights => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) - .await?; - Ok(()) - }), - }; + let request = cx.foreground().spawn(project.update(cx, |project, cx| { + match kind { + LspRequestKind::Rename => project + .prepare_rename(buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Completion => project + .completions(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::CodeAction => project + .code_actions(&buffer, offset..offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Definition => project + .definition(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Highlights => project + .document_highlights(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + } + })); if detach { request.detach(); } else { @@ -873,6 +870,7 @@ async fn apply_client_operation( let search = project.update(cx, |project, cx| { project.search(SearchQuery::text(query, false, false), cx) }); + drop(project); let search = cx.background().spawn(async move { search .await From 543301f94930ec6dc71caed81bc56b9ec53b49d6 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 15:58:11 -0700 Subject: [PATCH 16/60] Avoid repeatedly loading/saving the test plan for each iteration --- .../src/tests/randomized_integration_tests.rs | 64 +++++++++++++------ 1 file changed, 43 insertions(+), 21 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 19961c3ba5..a05870dc1e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -33,6 +33,11 @@ use std::{ }; use util::ResultExt; +lazy_static::lazy_static! { + static ref LOADED_PLAN_JSON: Mutex>> = Default::default(); + static ref DID_SAVE_PLAN_JSON: AtomicBool = Default::default(); +} + #[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, @@ -99,8 +104,14 @@ async fn test_random_collaboration( let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); if let Some(path) = &plan_load_path { - eprintln!("loaded plan from path {:?}", path); - plan.lock().load(path); + let json = LOADED_PLAN_JSON + .lock() + .get_or_insert_with(|| { + eprintln!("loaded test plan from path {:?}", path); + std::fs::read(path).unwrap() + }) + .clone(); + plan.lock().deserialize(json); } let mut clients = Vec::new(); @@ -132,8 +143,10 @@ async fn test_random_collaboration( deterministic.run_until_parked(); if let Some(path) = &plan_save_path { - eprintln!("saved test plan to path {:?}", path); - plan.lock().save(path); + if !DID_SAVE_PLAN_JSON.swap(true, SeqCst) { + eprintln!("saved test plan to path {:?}", path); + std::fs::write(path, plan.lock().serialize()).unwrap(); + } } for (client, client_cx) in &clients { @@ -313,28 +326,38 @@ async fn test_random_collaboration( host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); let guest_saved_version = guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); - assert_eq!(guest_saved_version, host_saved_version); + assert_eq!( + guest_saved_version, host_saved_version, + "guest saved version does not match host's for path {path:?} in project {project_id}", + ); let host_saved_version_fingerprint = host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); let guest_saved_version_fingerprint = guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); assert_eq!( - guest_saved_version_fingerprint, - host_saved_version_fingerprint + guest_saved_version_fingerprint, host_saved_version_fingerprint, + "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", ); let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); - assert_eq!(guest_saved_mtime, host_saved_mtime); + assert_eq!( + guest_saved_mtime, host_saved_mtime, + "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + ); let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); - assert_eq!(guest_is_dirty, host_is_dirty); + assert_eq!(guest_is_dirty, host_is_dirty, + "guest's dirty status does not match host's for path {path:?} in project {project_id}", + ); let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); - assert_eq!(guest_has_conflict, host_has_conflict); + assert_eq!(guest_has_conflict, host_has_conflict, + "guest's conflict status does not match host's for path {path:?} in project {project_id}", + ); } } } @@ -797,12 +820,12 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: saving buffer {:?} in {} project {}{}", + "{}: saving buffer {:?} in {} project {}, {}", client.username, full_path, if is_local { "local" } else { "remote" }, project_root_name, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); ensure_project_shared(&project, client, cx).await; @@ -836,13 +859,13 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: request LSP {:?} for buffer {:?} in {} project {}{}", + "{}: request LSP {:?} for buffer {:?} in {} project {}, {}", client.username, kind, full_path, if is_local { "local" } else { "remote" }, project_root_name, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); use futures::{FutureExt as _, TryFutureExt as _}; @@ -888,12 +911,12 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: search {} project {} for {:?}{}", + "{}: search {} project {} for {:?}, {}", client.username, if is_local { "local" } else { "remote" }, project_root_name, query, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); let search = project.update(cx, |project, cx| { @@ -1137,10 +1160,9 @@ impl TestPlan { } } - fn load(&mut self, path: &Path) { - let json = std::fs::read_to_string(path).unwrap(); + fn deserialize(&mut self, json: Vec) { + let stored_operations: Vec = serde_json::from_slice(&json).unwrap(); self.replay = true; - let stored_operations: Vec = serde_json::from_str(&json).unwrap(); self.stored_operations = stored_operations .iter() .cloned() @@ -1171,7 +1193,7 @@ impl TestPlan { .collect() } - fn save(&mut self, path: &Path) { + fn serialize(&mut self) -> Vec { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); @@ -1186,7 +1208,7 @@ impl TestPlan { serde_json::to_writer(&mut json, operation).unwrap(); } json.extend_from_slice(b"\n]\n"); - std::fs::write(path, &json).unwrap(); + json } fn next_server_operation( From f95732e981adda5331512caac4b008a5efd91ee9 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 16:23:44 -0700 Subject: [PATCH 17/60] Fix bug where guest would drop BufferSaved messages while opening the buffer --- .../src/tests/randomized_integration_tests.rs | 2 +- crates/project/src/lsp_command.rs | 20 ++++----- crates/project/src/project.rs | 44 ++++++++++--------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a05870dc1e..44e1891363 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -132,7 +132,7 @@ async fn test_random_collaboration( ) .await; if !applied { - skipped.store(false, SeqCst); + skipped.store(true, SeqCst); } } diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index feec1ee0e4..dcc462546f 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -4,7 +4,7 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use client::proto::{self, PeerId}; -use gpui::{AppContext, AsyncAppContext, ModelHandle}; +use gpui::{AppContext, AsyncAppContext, ModelHandle, MutableAppContext}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, @@ -49,7 +49,7 @@ pub(crate) trait LspCommand: 'static + Sized { project: &mut Project, peer_id: PeerId, buffer_version: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> ::Response; async fn response_from_proto( self, @@ -175,7 +175,7 @@ impl LspCommand for PrepareRename { _: &mut Project, _: PeerId, buffer_version: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::PrepareRenameResponse { proto::PrepareRenameResponse { can_rename: range.is_some(), @@ -296,7 +296,7 @@ impl LspCommand for PerformRename { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::PerformRenameResponse { let transaction = project.serialize_project_transaction_for_peer(response, peer_id, cx); proto::PerformRenameResponse { @@ -391,7 +391,7 @@ impl LspCommand for GetDefinition { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetDefinitionResponse { let links = location_links_to_proto(response, project, peer_id, cx); proto::GetDefinitionResponse { links } @@ -477,7 +477,7 @@ impl LspCommand for GetTypeDefinition { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetTypeDefinitionResponse { let links = location_links_to_proto(response, project, peer_id, cx); proto::GetTypeDefinitionResponse { links } @@ -658,7 +658,7 @@ fn location_links_to_proto( links: Vec, project: &mut Project, peer_id: PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> Vec { links .into_iter() @@ -787,7 +787,7 @@ impl LspCommand for GetReferences { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetReferencesResponse { let locations = response .into_iter() @@ -928,7 +928,7 @@ impl LspCommand for GetDocumentHighlights { _: &mut Project, _: PeerId, _: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::GetDocumentHighlightsResponse { let highlights = response .into_iter() @@ -1130,7 +1130,7 @@ impl LspCommand for GetHover { _: &mut Project, _: PeerId, _: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::GetHoverResponse { if let Some(response) = response { let (start, end) = if let Some(range) = response.range { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index b0a9784ba9..2755f281f3 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5858,7 +5858,7 @@ impl Project { &mut self, project_transaction: ProjectTransaction, peer_id: proto::PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::ProjectTransaction { let mut serialized_transaction = proto::ProjectTransaction { buffer_ids: Default::default(), @@ -5916,27 +5916,27 @@ impl Project { &mut self, buffer: &ModelHandle, peer_id: proto::PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); if let Some(project_id) = self.remote_id() { let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); if shared_buffers.insert(buffer_id) { - let buffer = buffer.read(cx); - let state = buffer.to_proto(); - let operations = buffer.serialize_ops(None, cx); + let buffer = buffer.clone(); + let operations = buffer.read(cx).serialize_ops(None, cx); let client = self.client.clone(); - cx.background() - .spawn( - async move { - let operations = operations.await; + cx.spawn(move |cx| async move { + let operations = operations.await; + let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::State(state)), - })?; + client.send(proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some(proto::create_buffer_for_peer::Variant::State(state)), + })?; + cx.background() + .spawn(async move { let mut chunks = split_operations(operations).peekable(); while let Some(chunk) = chunks.next() { let is_last = chunks.peek().is_none(); @@ -5952,12 +5952,11 @@ impl Project { )), })?; } - anyhow::Ok(()) - } - .log_err(), - ) - .detach(); + }) + .await + }) + .detach() } } @@ -6231,7 +6230,12 @@ impl Project { let buffer = this .opened_buffers .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)); + .and_then(|buffer| buffer.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&envelope.payload.buffer_id) + .and_then(|b| b.clone()) + }); if let Some(buffer) = buffer { buffer.update(cx, |buffer, cx| { buffer.did_save(version, fingerprint, mtime, cx); From 5ecc9606af10d7ad400c344e7944e97dd3d99886 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:15:07 -0700 Subject: [PATCH 18/60] Use synchronous locks in FakeFs This way, the state can be accessed without running the deterministic executor. --- .../src/tests/randomized_integration_tests.rs | 21 +- crates/fs/src/fs.rs | 297 ++++++++---------- crates/project/src/worktree.rs | 2 +- 3 files changed, 149 insertions(+), 171 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 44e1891363..583271b342 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -624,7 +624,7 @@ async fn apply_client_operation( ); ensure_project_shared(&project, client, cx).await; - if !client.fs.paths().await.contains(&new_root_path) { + if !client.fs.paths().contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); } project @@ -1350,7 +1350,6 @@ impl TestPlan { return None; } - let executor = cx.background(); self.operation_ix += 1; let call = cx.read(ActiveCall::global); Some(loop { @@ -1467,7 +1466,7 @@ impl TestPlan { .choose(&mut self.rng) .cloned() else { continue }; let project_root_name = root_name_for_project(&project, cx); - let mut paths = executor.block(client.fs.paths()); + let mut paths = client.fs.paths(); paths.remove(0); let new_root_path = if paths.is_empty() || self.rng.gen() { Path::new("/").join(&self.next_root_dir_name(user_id)) @@ -1637,14 +1636,16 @@ impl TestPlan { // Update a git index 91..=95 => { - let repo_path = executor - .block(client.fs.directories()) + let repo_path = client + .fs + .directories() .choose(&mut self.rng) .unwrap() .clone(); - let mut file_paths = executor - .block(client.fs.files()) + let mut file_paths = client + .fs + .files() .into_iter() .filter(|path| path.starts_with(&repo_path)) .collect::>(); @@ -1673,7 +1674,7 @@ impl TestPlan { let is_dir = self.rng.gen::(); let content; let mut path; - let dir_paths = cx.background().block(client.fs.directories()); + let dir_paths = client.fs.directories(); if is_dir { content = String::new(); @@ -1683,7 +1684,7 @@ impl TestPlan { content = Alphanumeric.sample_string(&mut self.rng, 16); // Create a new file or overwrite an existing file - let file_paths = cx.background().block(client.fs.files()); + let file_paths = client.fs.files(); if file_paths.is_empty() || self.rng.gen_bool(0.5) { path = dir_paths.choose(&mut self.rng).unwrap().clone(); path.push(gen_file_name(&mut self.rng)); @@ -1789,7 +1790,7 @@ async fn simulate_client( let fs = fs.clone(); let plan = plan.clone(); async move { - let files = fs.files().await; + let files = fs.files(); let count = plan.lock().rng.gen_range::(1..3); let files = (0..count) .map(|_| files.choose(&mut plan.lock().rng).unwrap()) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index fd713ef3b5..4d0b0c4f44 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -5,7 +5,7 @@ use fsevent::EventStream; use futures::{future::BoxFuture, Stream, StreamExt}; use git2::Repository as LibGitRepository; use lazy_static::lazy_static; -use parking_lot::Mutex as SyncMutex; +use parking_lot::Mutex; use regex::Regex; use repository::GitRepository; use rope::Rope; @@ -27,8 +27,6 @@ use util::ResultExt; #[cfg(any(test, feature = "test-support"))] use collections::{btree_map, BTreeMap}; #[cfg(any(test, feature = "test-support"))] -use futures::lock::Mutex; -#[cfg(any(test, feature = "test-support"))] use repository::FakeGitRepositoryState; #[cfg(any(test, feature = "test-support"))] use std::sync::Weak; @@ -117,7 +115,7 @@ pub trait Fs: Send + Sync { path: &Path, latency: Duration, ) -> Pin>>>; - fn open_repo(&self, abs_dot_git: &Path) -> Option>>; + fn open_repo(&self, abs_dot_git: &Path) -> Option>>; fn is_fake(&self) -> bool; #[cfg(any(test, feature = "test-support"))] fn as_fake(&self) -> &FakeFs; @@ -350,11 +348,11 @@ impl Fs for RealFs { }))) } - fn open_repo(&self, dotgit_path: &Path) -> Option>> { + fn open_repo(&self, dotgit_path: &Path) -> Option>> { LibGitRepository::open(&dotgit_path) .log_err() - .and_then::>, _>(|libgit_repository| { - Some(Arc::new(SyncMutex::new(libgit_repository))) + .and_then::>, _>(|libgit_repository| { + Some(Arc::new(Mutex::new(libgit_repository))) }) } @@ -396,7 +394,7 @@ enum FakeFsEntry { inode: u64, mtime: SystemTime, entries: BTreeMap>>, - git_repo_state: Option>>, + git_repo_state: Option>>, }, Symlink { target: PathBuf, @@ -405,18 +403,14 @@ enum FakeFsEntry { #[cfg(any(test, feature = "test-support"))] impl FakeFsState { - async fn read_path<'a>(&'a self, target: &Path) -> Result>> { + fn read_path<'a>(&'a self, target: &Path) -> Result>> { Ok(self .try_read_path(target) - .await .ok_or_else(|| anyhow!("path does not exist: {}", target.display()))? .0) } - async fn try_read_path<'a>( - &'a self, - target: &Path, - ) -> Option<(Arc>, PathBuf)> { + fn try_read_path<'a>(&'a self, target: &Path) -> Option<(Arc>, PathBuf)> { let mut path = target.to_path_buf(); let mut real_path = PathBuf::new(); let mut entry_stack = Vec::new(); @@ -438,10 +432,10 @@ impl FakeFsState { } Component::Normal(name) => { let current_entry = entry_stack.last().cloned()?; - let current_entry = current_entry.lock().await; + let current_entry = current_entry.lock(); if let FakeFsEntry::Dir { entries, .. } = &*current_entry { let entry = entries.get(name.to_str().unwrap()).cloned()?; - let _entry = entry.lock().await; + let _entry = entry.lock(); if let FakeFsEntry::Symlink { target, .. } = &*_entry { let mut target = target.clone(); target.extend(path_components); @@ -462,7 +456,7 @@ impl FakeFsState { entry_stack.pop().map(|entry| (entry, real_path)) } - async fn write_path(&self, path: &Path, callback: Fn) -> Result + fn write_path(&self, path: &Path, callback: Fn) -> Result where Fn: FnOnce(btree_map::Entry>>) -> Result, { @@ -472,8 +466,8 @@ impl FakeFsState { .ok_or_else(|| anyhow!("cannot overwrite the root"))?; let parent_path = path.parent().unwrap(); - let parent = self.read_path(parent_path).await?; - let mut parent = parent.lock().await; + let parent = self.read_path(parent_path)?; + let mut parent = parent.lock(); let new_entry = parent .dir_entries(parent_path)? .entry(filename.to_str().unwrap().into()); @@ -529,7 +523,7 @@ impl FakeFs { } pub async fn insert_file(&self, path: impl AsRef, content: String) { - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let path = path.as_ref(); let inode = state.next_inode; let mtime = state.next_mtime; @@ -552,13 +546,12 @@ impl FakeFs { } Ok(()) }) - .await .unwrap(); state.emit_event(&[path]); } pub async fn insert_symlink(&self, path: impl AsRef, target: PathBuf) { - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let path = path.as_ref(); let file = Arc::new(Mutex::new(FakeFsEntry::Symlink { target })); state @@ -572,21 +565,20 @@ impl FakeFs { Ok(()) } }) - .await .unwrap(); state.emit_event(&[path]); } pub async fn pause_events(&self) { - self.state.lock().await.events_paused = true; + self.state.lock().events_paused = true; } pub async fn buffered_event_count(&self) -> usize { - self.state.lock().await.buffered_events.len() + self.state.lock().buffered_events.len() } pub async fn flush_events(&self, count: usize) { - self.state.lock().await.flush_events(count); + self.state.lock().flush_events(count); } #[must_use] @@ -625,9 +617,9 @@ impl FakeFs { } pub async fn set_index_for_repo(&self, dot_git: &Path, head_state: &[(&Path, String)]) { - let mut state = self.state.lock().await; - let entry = state.read_path(dot_git).await.unwrap(); - let mut entry = entry.lock().await; + let mut state = self.state.lock(); + let entry = state.read_path(dot_git).unwrap(); + let mut entry = entry.lock(); if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { let repo_state = git_repo_state.get_or_insert_with(Default::default); @@ -646,12 +638,12 @@ impl FakeFs { } } - pub async fn paths(&self) -> Vec { + pub fn paths(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - if let FakeFsEntry::Dir { entries, .. } = &*entry.lock().await { + if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() { for (name, entry) in entries { queue.push_back((path.join(name), entry.clone())); } @@ -661,12 +653,12 @@ impl FakeFs { result } - pub async fn directories(&self) -> Vec { + pub fn directories(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - if let FakeFsEntry::Dir { entries, .. } = &*entry.lock().await { + if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() { for (name, entry) in entries { queue.push_back((path.join(name), entry.clone())); } @@ -676,12 +668,12 @@ impl FakeFs { result } - pub async fn files(&self) -> Vec { + pub fn files(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - let e = entry.lock().await; + let e = entry.lock(); match &*e { FakeFsEntry::File { .. } => result.push(path), FakeFsEntry::Dir { entries, .. } => { @@ -745,11 +737,11 @@ impl FakeFsEntry { impl Fs for FakeFs { async fn create_dir(&self, path: &Path) -> Result<()> { self.simulate_random_delay().await; - let mut state = self.state.lock().await; let mut created_dirs = Vec::new(); let mut cur_path = PathBuf::new(); for component in path.components() { + let mut state = self.state.lock(); cur_path.push(component); if cur_path == Path::new("/") { continue; @@ -759,29 +751,27 @@ impl Fs for FakeFs { let mtime = state.next_mtime; state.next_mtime += Duration::from_nanos(1); state.next_inode += 1; - state - .write_path(&cur_path, |entry| { - entry.or_insert_with(|| { - created_dirs.push(cur_path.clone()); - Arc::new(Mutex::new(FakeFsEntry::Dir { - inode, - mtime, - entries: Default::default(), - git_repo_state: None, - })) - }); - Ok(()) - }) - .await?; + state.write_path(&cur_path, |entry| { + entry.or_insert_with(|| { + created_dirs.push(cur_path.clone()); + Arc::new(Mutex::new(FakeFsEntry::Dir { + inode, + mtime, + entries: Default::default(), + git_repo_state: None, + })) + }); + Ok(()) + })? } - state.emit_event(&created_dirs); + self.state.lock().emit_event(&created_dirs); Ok(()) } async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> { self.simulate_random_delay().await; - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let inode = state.next_inode; let mtime = state.next_mtime; state.next_mtime += Duration::from_nanos(1); @@ -791,23 +781,21 @@ impl Fs for FakeFs { mtime, content: String::new(), })); - state - .write_path(path, |entry| { - match entry { - btree_map::Entry::Occupied(mut e) => { - if options.overwrite { - *e.get_mut() = file; - } else if !options.ignore_if_exists { - return Err(anyhow!("path already exists: {}", path.display())); - } - } - btree_map::Entry::Vacant(e) => { - e.insert(file); + state.write_path(path, |entry| { + match entry { + btree_map::Entry::Occupied(mut e) => { + if options.overwrite { + *e.get_mut() = file; + } else if !options.ignore_if_exists { + return Err(anyhow!("path already exists: {}", path.display())); } } - Ok(()) - }) - .await?; + btree_map::Entry::Vacant(e) => { + e.insert(file); + } + } + Ok(()) + })?; state.emit_event(&[path]); Ok(()) } @@ -815,33 +803,29 @@ impl Fs for FakeFs { async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> { let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); - let mut state = self.state.lock().await; - let moved_entry = state - .write_path(&old_path, |e| { - if let btree_map::Entry::Occupied(e) = e { - Ok(e.remove()) - } else { - Err(anyhow!("path does not exist: {}", &old_path.display())) - } - }) - .await?; - state - .write_path(&new_path, |e| { - match e { - btree_map::Entry::Occupied(mut e) => { - if options.overwrite { - *e.get_mut() = moved_entry; - } else if !options.ignore_if_exists { - return Err(anyhow!("path already exists: {}", new_path.display())); - } - } - btree_map::Entry::Vacant(e) => { - e.insert(moved_entry); + let mut state = self.state.lock(); + let moved_entry = state.write_path(&old_path, |e| { + if let btree_map::Entry::Occupied(e) = e { + Ok(e.remove()) + } else { + Err(anyhow!("path does not exist: {}", &old_path.display())) + } + })?; + state.write_path(&new_path, |e| { + match e { + btree_map::Entry::Occupied(mut e) => { + if options.overwrite { + *e.get_mut() = moved_entry; + } else if !options.ignore_if_exists { + return Err(anyhow!("path already exists: {}", new_path.display())); } } - Ok(()) - }) - .await?; + btree_map::Entry::Vacant(e) => { + e.insert(moved_entry); + } + } + Ok(()) + })?; state.emit_event(&[old_path, new_path]); Ok(()) } @@ -849,35 +833,33 @@ impl Fs for FakeFs { async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> { let source = normalize_path(source); let target = normalize_path(target); - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let mtime = state.next_mtime; let inode = util::post_inc(&mut state.next_inode); state.next_mtime += Duration::from_nanos(1); - let source_entry = state.read_path(&source).await?; - let content = source_entry.lock().await.file_content(&source)?.clone(); - let entry = state - .write_path(&target, |e| match e { - btree_map::Entry::Occupied(e) => { - if options.overwrite { - Ok(Some(e.get().clone())) - } else if !options.ignore_if_exists { - return Err(anyhow!("{target:?} already exists")); - } else { - Ok(None) - } + let source_entry = state.read_path(&source)?; + let content = source_entry.lock().file_content(&source)?.clone(); + let entry = state.write_path(&target, |e| match e { + btree_map::Entry::Occupied(e) => { + if options.overwrite { + Ok(Some(e.get().clone())) + } else if !options.ignore_if_exists { + return Err(anyhow!("{target:?} already exists")); + } else { + Ok(None) } - btree_map::Entry::Vacant(e) => Ok(Some( - e.insert(Arc::new(Mutex::new(FakeFsEntry::File { - inode, - mtime, - content: String::new(), - }))) - .clone(), - )), - }) - .await?; + } + btree_map::Entry::Vacant(e) => Ok(Some( + e.insert(Arc::new(Mutex::new(FakeFsEntry::File { + inode, + mtime, + content: String::new(), + }))) + .clone(), + )), + })?; if let Some(entry) = entry { - entry.lock().await.set_file_content(&target, content)?; + entry.lock().set_file_content(&target, content)?; } state.emit_event(&[target]); Ok(()) @@ -890,9 +872,9 @@ impl Fs for FakeFs { .ok_or_else(|| anyhow!("cannot remove the root"))?; let base_name = path.file_name().unwrap(); - let mut state = self.state.lock().await; - let parent_entry = state.read_path(parent_path).await?; - let mut parent_entry = parent_entry.lock().await; + let mut state = self.state.lock(); + let parent_entry = state.read_path(parent_path)?; + let mut parent_entry = parent_entry.lock(); let entry = parent_entry .dir_entries(parent_path)? .entry(base_name.to_str().unwrap().into()); @@ -905,7 +887,7 @@ impl Fs for FakeFs { } btree_map::Entry::Occupied(e) => { { - let mut entry = e.get().lock().await; + let mut entry = e.get().lock(); let children = entry.dir_entries(&path)?; if !options.recursive && !children.is_empty() { return Err(anyhow!("{path:?} is not empty")); @@ -924,9 +906,9 @@ impl Fs for FakeFs { .parent() .ok_or_else(|| anyhow!("cannot remove the root"))?; let base_name = path.file_name().unwrap(); - let mut state = self.state.lock().await; - let parent_entry = state.read_path(parent_path).await?; - let mut parent_entry = parent_entry.lock().await; + let mut state = self.state.lock(); + let parent_entry = state.read_path(parent_path)?; + let mut parent_entry = parent_entry.lock(); let entry = parent_entry .dir_entries(parent_path)? .entry(base_name.to_str().unwrap().into()); @@ -937,7 +919,7 @@ impl Fs for FakeFs { } } btree_map::Entry::Occupied(e) => { - e.get().lock().await.file_content(&path)?; + e.get().lock().file_content(&path)?; e.remove(); } } @@ -953,9 +935,9 @@ impl Fs for FakeFs { async fn load(&self, path: &Path) -> Result { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - let entry = state.read_path(&path).await?; - let entry = entry.lock().await; + let state = self.state.lock(); + let entry = state.read_path(&path)?; + let entry = entry.lock(); entry.file_content(&path).cloned() } @@ -978,8 +960,8 @@ impl Fs for FakeFs { async fn canonicalize(&self, path: &Path) -> Result { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - if let Some((_, real_path)) = state.try_read_path(&path).await { + let state = self.state.lock(); + if let Some((_, real_path)) = state.try_read_path(&path) { Ok(real_path) } else { Err(anyhow!("path does not exist: {}", path.display())) @@ -989,9 +971,9 @@ impl Fs for FakeFs { async fn is_file(&self, path: &Path) -> bool { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - if let Some((entry, _)) = state.try_read_path(&path).await { - entry.lock().await.is_file() + let state = self.state.lock(); + if let Some((entry, _)) = state.try_read_path(&path) { + entry.lock().is_file() } else { false } @@ -1000,9 +982,9 @@ impl Fs for FakeFs { async fn metadata(&self, path: &Path) -> Result> { self.simulate_random_delay().await; let path = normalize_path(path); - let state = self.state.lock().await; - if let Some((entry, real_path)) = state.try_read_path(&path).await { - let entry = entry.lock().await; + let state = self.state.lock(); + if let Some((entry, real_path)) = state.try_read_path(&path) { + let entry = entry.lock(); let is_symlink = real_path != path; Ok(Some(match &*entry { @@ -1031,9 +1013,9 @@ impl Fs for FakeFs { ) -> Result>>>> { self.simulate_random_delay().await; let path = normalize_path(path); - let state = self.state.lock().await; - let entry = state.read_path(&path).await?; - let mut entry = entry.lock().await; + let state = self.state.lock(); + let entry = state.read_path(&path)?; + let mut entry = entry.lock(); let children = entry.dir_entries(&path)?; let paths = children .keys() @@ -1047,10 +1029,9 @@ impl Fs for FakeFs { path: &Path, _: Duration, ) -> Pin>>> { - let mut state = self.state.lock().await; self.simulate_random_delay().await; let (tx, rx) = smol::channel::unbounded(); - state.event_txs.push(tx); + self.state.lock().event_txs.push(tx); let path = path.to_path_buf(); let executor = self.executor.clone(); Box::pin(futures::StreamExt::filter(rx, move |events| { @@ -1065,22 +1046,18 @@ impl Fs for FakeFs { })) } - fn open_repo(&self, abs_dot_git: &Path) -> Option>> { - smol::block_on(async move { - let state = self.state.lock().await; - let entry = state.read_path(abs_dot_git).await.unwrap(); - let mut entry = entry.lock().await; - if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { - let state = git_repo_state - .get_or_insert_with(|| { - Arc::new(SyncMutex::new(FakeGitRepositoryState::default())) - }) - .clone(); - Some(repository::FakeGitRepository::open(state)) - } else { - None - } - }) + fn open_repo(&self, abs_dot_git: &Path) -> Option>> { + let state = self.state.lock(); + let entry = state.read_path(abs_dot_git).unwrap(); + let mut entry = entry.lock(); + if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { + let state = git_repo_state + .get_or_insert_with(|| Arc::new(Mutex::new(FakeGitRepositoryState::default()))) + .clone(); + Some(repository::FakeGitRepository::open(state)) + } else { + None + } } fn is_fake(&self) -> bool { @@ -1213,7 +1190,7 @@ mod tests { .await; assert_eq!( - fs.files().await, + fs.files(), vec![ PathBuf::from("/root/dir1/a"), PathBuf::from("/root/dir1/b"), diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 2357052d2c..b1aebf29f1 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -3729,7 +3729,7 @@ mod tests { ) { let mut files = Vec::new(); let mut dirs = Vec::new(); - for path in fs.as_fake().paths().await { + for path in fs.as_fake().paths() { if path.starts_with(root_path) { if fs.is_file(&path).await { files.push(path); From 7b0a6c0dfaf73bd92f0d3cff8e818e63e2645f50 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:16:08 -0700 Subject: [PATCH 19/60] Add an 'on_failure' attribute to gpui tests This lets us perform a finalization step when a randomized test fails. --- crates/gpui/src/test.rs | 2 ++ crates/gpui_macros/src/gpui_macros.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/crates/gpui/src/test.rs b/crates/gpui/src/test.rs index d784d43ece..0bf3c333b3 100644 --- a/crates/gpui/src/test.rs +++ b/crates/gpui/src/test.rs @@ -45,6 +45,7 @@ pub fn run_test( Arc, u64, )), + on_fail_fn: Option, fn_name: String, ) { // let _profiler = dhat::Profiler::new_heap(); @@ -177,6 +178,7 @@ pub fn run_test( if is_randomized { eprintln!("failing seed: {}", atomic_seed.load(SeqCst)); } + on_fail_fn.map(|f| f()); panic::resume_unwind(error); } } diff --git a/crates/gpui_macros/src/gpui_macros.rs b/crates/gpui_macros/src/gpui_macros.rs index cabae1ac0a..42cdb66ee3 100644 --- a/crates/gpui_macros/src/gpui_macros.rs +++ b/crates/gpui_macros/src/gpui_macros.rs @@ -1,4 +1,5 @@ use proc_macro::TokenStream; +use proc_macro2::Ident; use quote::{format_ident, quote}; use std::mem; use syn::{ @@ -15,6 +16,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { let mut num_iterations = 1; let mut starting_seed = 0; let mut detect_nondeterminism = false; + let mut on_failure_fn_name = quote!(None); for arg in args { match arg { @@ -33,6 +35,20 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { Some("retries") => max_retries = parse_int(&meta.lit)?, Some("iterations") => num_iterations = parse_int(&meta.lit)?, Some("seed") => starting_seed = parse_int(&meta.lit)?, + Some("on_failure") => { + if let Lit::Str(name) = meta.lit { + let ident = Ident::new(&name.value(), name.span()); + on_failure_fn_name = quote!(Some(#ident)); + } else { + return Err(TokenStream::from( + syn::Error::new( + meta.lit.span(), + "on_failure argument must be a string", + ) + .into_compile_error(), + )); + } + } _ => { return Err(TokenStream::from( syn::Error::new(meta.path.span(), "invalid argument") @@ -152,6 +168,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { cx.foreground().run(#inner_fn_name(#inner_fn_args)); #cx_teardowns }, + #on_failure_fn_name, stringify!(#outer_fn_name).to_string(), ); } @@ -187,6 +204,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { #max_retries, #detect_nondeterminism, &mut |cx, _, _, seed| #inner_fn_name(#inner_fn_args), + #on_failure_fn_name, stringify!(#outer_fn_name).to_string(), ); } From b251e249a7c4f812a3cb49dabd15a89dcc9a81c7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:17:11 -0700 Subject: [PATCH 20/60] Check for consistency between clients every time the system quiesces --- .../src/tests/randomized_integration_tests.rs | 462 +++++++++--------- 1 file changed, 236 insertions(+), 226 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 583271b342..ffe09b74bd 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -34,11 +34,13 @@ use std::{ use util::ResultExt; lazy_static::lazy_static! { + static ref PLAN_LOAD_PATH: Option = path_env_var("LOAD_PLAN"); + static ref PLAN_SAVE_PATH: Option = path_env_var("SAVE_PLAN"); static ref LOADED_PLAN_JSON: Mutex>> = Default::default(); - static ref DID_SAVE_PLAN_JSON: AtomicBool = Default::default(); + static ref PLAN: Mutex>>> = Default::default(); } -#[gpui::test(iterations = 100)] +#[gpui::test(iterations = 100, on_failure = "on_failure")] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, @@ -53,9 +55,6 @@ async fn test_random_collaboration( .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) .unwrap_or(10); - let plan_load_path = path_env_var("LOAD_PLAN"); - let plan_save_path = path_env_var("SAVE_PLAN"); - let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); @@ -103,7 +102,7 @@ async fn test_random_collaboration( let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); - if let Some(path) = &plan_load_path { + if let Some(path) = &*PLAN_LOAD_PATH { let json = LOADED_PLAN_JSON .lock() .get_or_insert_with(|| { @@ -114,6 +113,8 @@ async fn test_random_collaboration( plan.lock().deserialize(json); } + PLAN.lock().replace(plan.clone()); + let mut clients = Vec::new(); let mut client_tasks = Vec::new(); let mut operation_channels = Vec::new(); @@ -142,225 +143,7 @@ async fn test_random_collaboration( deterministic.finish_waiting(); deterministic.run_until_parked(); - if let Some(path) = &plan_save_path { - if !DID_SAVE_PLAN_JSON.swap(true, SeqCst) { - eprintln!("saved test plan to path {:?}", path); - std::fs::write(path, plan.lock().serialize()).unwrap(); - } - } - - for (client, client_cx) in &clients { - for guest_project in client.remote_projects().iter() { - guest_project.read_with(client_cx, |guest_project, cx| { - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client - .local_projects() - .iter() - .find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == guest_project.remote_id() - }) - })? - .clone(); - Some((project, cx)) - }); - - if !guest_project.is_read_only() { - if let Some((host_project, host_cx)) = host_project { - let host_worktree_snapshots = - host_project.read_with(host_cx, |host_project, cx| { - host_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>() - }); - let guest_worktree_snapshots = guest_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>(); - - assert_eq!( - guest_worktree_snapshots.keys().collect::>(), - host_worktree_snapshots.keys().collect::>(), - "{} has different worktrees than the host", - client.username - ); - - for (id, host_snapshot) in &host_worktree_snapshots { - let guest_snapshot = &guest_worktree_snapshots[id]; - assert_eq!( - guest_snapshot.root_name(), - host_snapshot.root_name(), - "{} has different root name than the host for worktree {}", - client.username, - id - ); - assert_eq!( - guest_snapshot.abs_path(), - host_snapshot.abs_path(), - "{} has different abs path than the host for worktree {}", - client.username, - id - ); - assert_eq!( - guest_snapshot.entries(false).collect::>(), - host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {:?} and project {:?}", - client.username, - host_snapshot.abs_path(), - host_project.read_with(host_cx, |project, _| project.remote_id()) - ); - assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), - "{} has different scan id than the host for worktree {:?} and project {:?}", - client.username, - host_snapshot.abs_path(), - host_project.read_with(host_cx, |project, _| project.remote_id()) - ); - } - } - } - - guest_project.check_invariants(cx); - }); - } - - let buffers = client.buffers().clone(); - for (guest_project, guest_buffers) in &buffers { - let project_id = if guest_project.read_with(client_cx, |project, _| { - project.is_local() || project.is_read_only() - }) { - continue; - } else { - guest_project - .read_with(client_cx, |project, _| project.remote_id()) - .unwrap() - }; - let guest_user_id = client.user_id().unwrap(); - - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client - .local_projects() - .iter() - .find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == Some(project_id) - }) - })? - .clone(); - Some((client.user_id().unwrap(), project, cx)) - }); - - let (host_user_id, host_project, host_cx) = - if let Some((host_user_id, host_project, host_cx)) = host_project { - (host_user_id, host_project, host_cx) - } else { - continue; - }; - - for guest_buffer in guest_buffers { - let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); - let host_buffer = host_project.read_with(host_cx, |project, cx| { - project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { - panic!( - "host does not have buffer for guest:{}, peer:{:?}, id:{}", - client.username, - client.peer_id(), - buffer_id - ) - }) - }); - let path = host_buffer - .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); - - assert_eq!( - guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()), - 0, - "{}, buffer {}, path {:?} has deferred operations", - client.username, - buffer_id, - path, - ); - assert_eq!( - guest_buffer.read_with(client_cx, |buffer, _| buffer.text()), - host_buffer.read_with(host_cx, |buffer, _| buffer.text()), - "{}, buffer {}, path {:?}, differs from the host's buffer", - client.username, - buffer_id, - path - ); - - let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned()); - let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); - match (host_file, guest_file) { - (Some(host_file), Some(guest_file)) => { - assert_eq!(guest_file.path(), host_file.path()); - assert_eq!(guest_file.is_deleted(), host_file.is_deleted()); - assert_eq!( - guest_file.mtime(), - host_file.mtime(), - "guest {} mtime does not match host {} for path {:?} in project {}", - guest_user_id, - host_user_id, - guest_file.path(), - project_id, - ); - } - (None, None) => {} - (None, _) => panic!("host's file is None, guest's isn't"), - (_, None) => panic!("guest's file is None, hosts's isn't"), - } - - let host_diff_base = - host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); - let guest_diff_base = guest_buffer - .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); - assert_eq!(guest_diff_base, host_diff_base); - - let host_saved_version = - host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); - let guest_saved_version = - guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); - assert_eq!( - guest_saved_version, host_saved_version, - "guest saved version does not match host's for path {path:?} in project {project_id}", - ); - - let host_saved_version_fingerprint = - host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); - let guest_saved_version_fingerprint = - guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); - assert_eq!( - guest_saved_version_fingerprint, host_saved_version_fingerprint, - "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", - ); - - let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); - let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); - assert_eq!( - guest_saved_mtime, host_saved_mtime, - "guest's saved mtime does not match host's for path {path:?} in project {project_id}", - ); - - let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); - let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); - assert_eq!(guest_is_dirty, host_is_dirty, - "guest's dirty status does not match host's for path {path:?} in project {project_id}", - ); - - let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); - let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); - assert_eq!(guest_has_conflict, host_has_conflict, - "guest's conflict status does not match host's for path {path:?} in project {project_id}", - ); - } - } - } + check_consistency_between_clients(&clients); for (client, mut cx) in clients { cx.update(|cx| { @@ -371,6 +154,15 @@ async fn test_random_collaboration( } } +fn on_failure() { + if let Some(plan) = PLAN.lock().clone() { + if let Some(path) = &*PLAN_SAVE_PATH { + eprintln!("saved test plan to path {:?}", path); + std::fs::write(path, plan.lock().serialize()).unwrap(); + } + } +} + async fn apply_server_operation( deterministic: Arc, server: &mut TestServer, @@ -528,12 +320,13 @@ async fn apply_server_operation( let Some(client_ix) = client_ix else { continue }; applied = true; if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) { - // panic!("error signaling user {}, client {}", user_id, client_ix); + log::error!("error signaling user {user_id}: {err}"); } } if quiesce && applied { deterministic.run_until_parked(); + check_consistency_between_clients(&clients); } return applied; @@ -996,6 +789,223 @@ async fn apply_client_operation( Ok(()) } +fn check_consistency_between_clients(clients: &[(Rc, TestAppContext)]) { + for (client, client_cx) in clients { + for guest_project in client.remote_projects().iter() { + guest_project.read_with(client_cx, |guest_project, cx| { + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == guest_project.remote_id() + }) + })? + .clone(); + Some((project, cx)) + }); + + if !guest_project.is_read_only() { + if let Some((host_project, host_cx)) = host_project { + let host_worktree_snapshots = + host_project.read_with(host_cx, |host_project, cx| { + host_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>() + }); + let guest_worktree_snapshots = guest_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>(); + + assert_eq!( + guest_worktree_snapshots.values().map(|w| w.abs_path()).collect::>(), + host_worktree_snapshots.values().map(|w| w.abs_path()).collect::>(), + "{} has different worktrees than the host for project {:?}", + client.username, guest_project.remote_id(), + ); + + for (id, host_snapshot) in &host_worktree_snapshots { + let guest_snapshot = &guest_worktree_snapshots[id]; + assert_eq!( + guest_snapshot.root_name(), + host_snapshot.root_name(), + "{} has different root name than the host for worktree {}, project {:?}", + client.username, + id, + guest_project.remote_id(), + ); + assert_eq!( + guest_snapshot.abs_path(), + host_snapshot.abs_path(), + "{} has different abs path than the host for worktree {}, project: {:?}", + client.username, + id, + guest_project.remote_id(), + ); + assert_eq!( + guest_snapshot.entries(false).collect::>(), + host_snapshot.entries(false).collect::>(), + "{} has different snapshot than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + guest_project.remote_id(), + ); + assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), + "{} has different scan id than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + guest_project.remote_id(), + ); + } + } + } + + guest_project.check_invariants(cx); + }); + } + + let buffers = client.buffers().clone(); + for (guest_project, guest_buffers) in &buffers { + let project_id = if guest_project.read_with(client_cx, |project, _| { + project.is_local() || project.is_read_only() + }) { + continue; + } else { + guest_project + .read_with(client_cx, |project, _| project.remote_id()) + .unwrap() + }; + let guest_user_id = client.user_id().unwrap(); + + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == Some(project_id) + }) + })? + .clone(); + Some((client.user_id().unwrap(), project, cx)) + }); + + let (host_user_id, host_project, host_cx) = + if let Some((host_user_id, host_project, host_cx)) = host_project { + (host_user_id, host_project, host_cx) + } else { + continue; + }; + + for guest_buffer in guest_buffers { + let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); + let host_buffer = host_project.read_with(host_cx, |project, cx| { + project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { + panic!( + "host does not have buffer for guest:{}, peer:{:?}, id:{}", + client.username, + client.peer_id(), + buffer_id + ) + }) + }); + let path = host_buffer + .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + + assert_eq!( + guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()), + 0, + "{}, buffer {}, path {:?} has deferred operations", + client.username, + buffer_id, + path, + ); + assert_eq!( + guest_buffer.read_with(client_cx, |buffer, _| buffer.text()), + host_buffer.read_with(host_cx, |buffer, _| buffer.text()), + "{}, buffer {}, path {:?}, differs from the host's buffer", + client.username, + buffer_id, + path + ); + + let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned()); + let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); + match (host_file, guest_file) { + (Some(host_file), Some(guest_file)) => { + assert_eq!(guest_file.path(), host_file.path()); + assert_eq!(guest_file.is_deleted(), host_file.is_deleted()); + assert_eq!( + guest_file.mtime(), + host_file.mtime(), + "guest {} mtime does not match host {} for path {:?} in project {}", + guest_user_id, + host_user_id, + guest_file.path(), + project_id, + ); + } + (None, None) => {} + (None, _) => panic!("host's file is None, guest's isn't"), + (_, None) => panic!("guest's file is None, hosts's isn't"), + } + + let host_diff_base = + host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); + let guest_diff_base = guest_buffer + .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); + assert_eq!(guest_diff_base, host_diff_base); + + let host_saved_version = + host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); + let guest_saved_version = + guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); + assert_eq!( + guest_saved_version, host_saved_version, + "guest saved version does not match host's for path {path:?} in project {project_id}", + ); + + let host_saved_version_fingerprint = + host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); + let guest_saved_version_fingerprint = + guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); + assert_eq!( + guest_saved_version_fingerprint, host_saved_version_fingerprint, + "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", + ); + + let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); + let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); + assert_eq!( + guest_saved_mtime, host_saved_mtime, + "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + ); + + let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); + let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); + assert_eq!(guest_is_dirty, host_is_dirty, + "guest's dirty status does not match host's for path {path:?} in project {project_id}", + ); + + let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); + let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); + assert_eq!(guest_has_conflict, host_has_conflict, + "guest's conflict status does not match host's for path {path:?} in project {project_id}", + ); + } + } + } +} + struct TestPlan { rng: StdRng, replay: bool, From bcf9b2f10dfec38fbff289327725da014c5a4d11 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 22:42:34 -0700 Subject: [PATCH 21/60] Add missing random delays in FakeFs --- crates/fs/src/fs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 4d0b0c4f44..c53c20c774 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -801,6 +801,8 @@ impl Fs for FakeFs { } async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> { + self.simulate_random_delay().await; + let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); let mut state = self.state.lock(); @@ -831,6 +833,8 @@ impl Fs for FakeFs { } async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> { + self.simulate_random_delay().await; + let source = normalize_path(source); let target = normalize_path(target); let mut state = self.state.lock(); @@ -866,6 +870,8 @@ impl Fs for FakeFs { } async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> { + self.simulate_random_delay().await; + let path = normalize_path(path); let parent_path = path .parent() @@ -901,6 +907,8 @@ impl Fs for FakeFs { } async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> { + self.simulate_random_delay().await; + let path = normalize_path(path); let parent_path = path .parent() From 1ccf174388151c2997fdec620fae7d10c08c4b12 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 4 Apr 2023 18:34:39 -0700 Subject: [PATCH 22/60] Avoid applying outdated UpdateProject messages Co-authored-by: Nathan Sobo --- crates/client/src/client.rs | 15 +++++++++++++-- crates/project/src/project.rs | 18 +++++++++++++----- crates/rpc/src/peer.rs | 25 +++++++++++++++++++++---- 3 files changed, 47 insertions(+), 11 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 76004f14a4..ae8cf8bf56 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -10,7 +10,10 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; +use futures::{ + future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryFutureExt as _, + TryStreamExt, +}; use gpui::{ actions, serde_json::{self, Value}, @@ -1187,6 +1190,14 @@ impl Client { &self, request: T, ) -> impl Future> { + self.request_envelope(request) + .map_ok(|envelope| envelope.payload) + } + + pub fn request_envelope( + &self, + request: T, + ) -> impl Future>> { let client_id = self.id; log::debug!( "rpc request start. client_id:{}. name:{}", @@ -1195,7 +1206,7 @@ impl Client { ); let response = self .connection_id() - .map(|conn_id| self.peer.request(conn_id, request)); + .map(|conn_id| self.peer.request_envelope(conn_id, request)); async move { let response = response?.await; log::debug!( diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 2755f281f3..1e9721339f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -100,6 +100,7 @@ pub struct Project { next_language_server_id: usize, client: Arc, next_entry_id: Arc, + join_project_response_message_id: u32, next_diagnostic_group_id: usize, user_store: ModelHandle, fs: Arc, @@ -425,6 +426,7 @@ impl Project { loading_buffers_by_path: Default::default(), loading_local_worktrees: Default::default(), buffer_snapshots: Default::default(), + join_project_response_message_id: 0, client_state: None, opened_buffer: watch::channel(), client_subscriptions: Vec::new(), @@ -463,15 +465,15 @@ impl Project { let subscription = client.subscribe_to_entity(remote_id); let response = client - .request(proto::JoinProject { + .request_envelope(proto::JoinProject { project_id: remote_id, }) .await?; let this = cx.add_model(|cx| { - let replica_id = response.replica_id as ReplicaId; + let replica_id = response.payload.replica_id as ReplicaId; let mut worktrees = Vec::new(); - for worktree in response.worktrees { + for worktree in response.payload.worktrees { let worktree = cx.update(|cx| { Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx) }); @@ -487,6 +489,7 @@ impl Project { loading_local_worktrees: Default::default(), active_entry: None, collaborators: Default::default(), + join_project_response_message_id: response.message_id, _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), languages, @@ -505,6 +508,7 @@ impl Project { language_servers: Default::default(), language_server_ids: Default::default(), language_server_statuses: response + .payload .language_servers .into_iter() .map(|server| { @@ -537,6 +541,7 @@ impl Project { let subscription = subscription.set_model(&this, &mut cx); let user_ids = response + .payload .collaborators .iter() .map(|peer| peer.user_id) @@ -546,7 +551,7 @@ impl Project { .await?; this.update(&mut cx, |this, cx| { - this.set_collaborators_from_proto(response.collaborators, cx)?; + this.set_collaborators_from_proto(response.payload.collaborators, cx)?; this.client_subscriptions.push(subscription); anyhow::Ok(()) })?; @@ -4930,7 +4935,10 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { this.update(&mut cx, |this, cx| { - this.set_worktrees_from_proto(envelope.payload.worktrees, cx)?; + // Don't handle messages that were sent before the response to us joining the project + if envelope.message_id > this.join_project_response_message_id { + this.set_worktrees_from_proto(envelope.payload.worktrees, cx)?; + } Ok(()) }) } diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 0df87fd92d..72ddfa567b 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -7,7 +7,7 @@ use collections::HashMap; use futures::{ channel::{mpsc, oneshot}, stream::BoxStream, - FutureExt, SinkExt, StreamExt, + FutureExt, SinkExt, StreamExt, TryFutureExt, }; use parking_lot::{Mutex, RwLock}; use serde::{ser::SerializeStruct, Serialize}; @@ -71,6 +71,7 @@ impl Clone for Receipt { impl Copy for Receipt {} +#[derive(Clone, Debug)] pub struct TypedEnvelope { pub sender_id: ConnectionId, pub original_sender_id: Option, @@ -370,6 +371,15 @@ impl Peer { receiver_id: ConnectionId, request: T, ) -> impl Future> { + self.request_internal(None, receiver_id, request) + .map_ok(|envelope| envelope.payload) + } + + pub fn request_envelope( + &self, + receiver_id: ConnectionId, + request: T, + ) -> impl Future>> { self.request_internal(None, receiver_id, request) } @@ -380,6 +390,7 @@ impl Peer { request: T, ) -> impl Future> { self.request_internal(Some(sender_id), receiver_id, request) + .map_ok(|envelope| envelope.payload) } pub fn request_internal( @@ -387,7 +398,7 @@ impl Peer { original_sender_id: Option, receiver_id: ConnectionId, request: T, - ) -> impl Future> { + ) -> impl Future>> { let (tx, rx) = oneshot::channel(); let send = self.connection_state(receiver_id).and_then(|connection| { let message_id = connection.next_message_id.fetch_add(1, SeqCst); @@ -410,6 +421,7 @@ impl Peer { async move { send?; let (response, _barrier) = rx.await.map_err(|_| anyhow!("connection was closed"))?; + if let Some(proto::envelope::Payload::Error(error)) = &response.payload { Err(anyhow!( "RPC request {} failed - {}", @@ -417,8 +429,13 @@ impl Peer { error.message )) } else { - T::Response::from_envelope(response) - .ok_or_else(|| anyhow!("received response of the wrong type")) + Ok(TypedEnvelope { + message_id: response.id, + sender_id: receiver_id, + original_sender_id: response.original_sender_id, + payload: T::Response::from_envelope(response) + .ok_or_else(|| anyhow!("received response of the wrong type"))?, + }) } } } From 1159f5517b6a6aed577ce62358a379ce5ddfec88 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 4 Apr 2023 21:49:34 -0700 Subject: [PATCH 23/60] Avoid applying outdated UpdateProject methods after rejoining a room --- crates/call/src/room.rs | 6 ++++-- crates/project/src/project.rs | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index eeb8a6a5d8..70d70218f3 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -424,7 +424,7 @@ impl Room { false }); - let response = self.client.request(proto::RejoinRoom { + let response = self.client.request_envelope(proto::RejoinRoom { id: self.id, reshared_projects, rejoined_projects, @@ -432,6 +432,8 @@ impl Room { cx.spawn(|this, mut cx| async move { let response = response.await?; + let message_id = response.message_id; + let response = response.payload; let room_proto = response.room.ok_or_else(|| anyhow!("invalid room"))?; this.update(&mut cx, |this, cx| { this.status = RoomStatus::Online; @@ -448,7 +450,7 @@ impl Room { for rejoined_project in response.rejoined_projects { if let Some(project) = projects.get(&rejoined_project.id) { project.update(cx, |project, cx| { - project.rejoined(rejoined_project, cx).log_err(); + project.rejoined(rejoined_project, message_id, cx).log_err(); }); } } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 1e9721339f..3e5a450075 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1089,8 +1089,10 @@ impl Project { pub fn rejoined( &mut self, message: proto::RejoinedProject, + message_id: u32, cx: &mut ModelContext, ) -> Result<()> { + self.join_project_response_message_id = message_id; self.set_worktrees_from_proto(message.worktrees, cx)?; self.set_collaborators_from_proto(message.collaborators, cx)?; self.language_server_statuses = message From 781d66f628dfb302f07c11bdce3f3bd676a91470 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:04:27 -0700 Subject: [PATCH 24/60] Omit operations for non-existent users from serialized test plan --- .../src/tests/randomized_integration_tests.rs | 41 ++++++++++--------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index ffe09b74bd..e53f33b16d 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -120,8 +120,8 @@ async fn test_random_collaboration( let mut operation_channels = Vec::new(); loop { - let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; - let applied = apply_server_operation( + let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else { break }; + let did_apply = apply_server_operation( deterministic.clone(), &mut server, &mut clients, @@ -132,8 +132,8 @@ async fn test_random_collaboration( cx, ) .await; - if !applied { - skipped.store(true, SeqCst); + if did_apply { + applied.store(true, SeqCst); } } @@ -1207,8 +1207,8 @@ impl TestPlan { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); - for (operation, skipped) in &self.stored_operations { - if skipped.load(SeqCst) { + for (operation, applied) in &self.stored_operations { + if !applied.load(SeqCst) { continue; } if json.len() > 1 { @@ -1228,17 +1228,17 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { self.operation_ix += 1; - if let (StoredOperation::Server(operation), skipped) = stored_operation { - return Some((operation.clone(), skipped.clone())); + if let (StoredOperation::Server(operation), applied) = stored_operation { + return Some((operation.clone(), applied.clone())); } } None } else { let operation = self.generate_server_operation(clients)?; - let skipped = Arc::new(AtomicBool::new(false)); + let applied = Arc::new(AtomicBool::new(false)); self.stored_operations - .push((StoredOperation::Server(operation.clone()), skipped.clone())); - Some((operation, skipped)) + .push((StoredOperation::Server(operation.clone()), applied.clone())); + Some((operation, applied)) } } @@ -1263,27 +1263,27 @@ impl TestPlan { StoredOperation::Client { user_id, operation, .. }, - skipped, + applied, ) = stored_operation { if user_id == ¤t_user_id { - return Some((operation.clone(), skipped.clone())); + return Some((operation.clone(), applied.clone())); } } } None } else { let operation = self.generate_client_operation(current_user_id, client, cx)?; - let skipped = Arc::new(AtomicBool::new(false)); + let applied = Arc::new(AtomicBool::new(false)); self.stored_operations.push(( StoredOperation::Client { user_id: current_user_id, batch_id: current_batch_id, operation: operation.clone(), }, - skipped.clone(), + applied.clone(), )); - Some((operation, skipped)) + Some((operation, applied)) } } @@ -1851,11 +1851,14 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while let Some(batch_id) = operation_rx.next().await { - let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; + let Some((operation, applied)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { - Ok(()) => {} - Err(TestError::Inapplicable) => skipped.store(true, SeqCst), + Ok(()) => applied.store(true, SeqCst), + Err(TestError::Inapplicable) => { + log::info!("skipped operation"); + } Err(TestError::Other(error)) => { + applied.store(true, SeqCst); log::error!("{} error: {}", client.username, error); } } From 661fba864025f3d2e3b1efe53b9beb684e821031 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:05:32 -0700 Subject: [PATCH 25/60] Run executor until parked at end of each iteration of random collaboration test Without this, the server doesn't get dropped at the end of the test, and we eventually run out of file handles due to sqlite connections being retained. --- crates/collab/src/tests/randomized_integration_tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index e53f33b16d..b85633ba15 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -152,6 +152,8 @@ async fn test_random_collaboration( drop(client); }); } + + deterministic.run_until_parked(); } fn on_failure() { From 43a94cda5fa4e3dc9255656cc66416c23a75fc5d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:36:01 -0700 Subject: [PATCH 26/60] Don't skip worktree updates if unknown entries are removed When rejoining a project, if entries were both created and deleted since joining the project, the guest will receive those entries ids in as removed. --- crates/project/src/worktree.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index b1aebf29f1..cbd80def6c 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1227,11 +1227,10 @@ impl Snapshot { let mut entries_by_path_edits = Vec::new(); let mut entries_by_id_edits = Vec::new(); for entry_id in update.removed_entries { - let entry = self - .entry_for_id(ProjectEntryId::from_proto(entry_id)) - .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?; - entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone()))); - entries_by_id_edits.push(Edit::Remove(entry.id)); + if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) { + entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone()))); + entries_by_id_edits.push(Edit::Remove(entry.id)); + } } for entry in update.updated_entries { From 8e68c7f808bc3e07f17f0ce998f54388721daeba Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 16:52:39 -0700 Subject: [PATCH 27/60] Do include operations in serialized test plan if they cause a client to hang --- crates/collab/src/tests/randomized_integration_tests.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index b85633ba15..c70c76a76a 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1854,13 +1854,14 @@ async fn simulate_client( while let Some(batch_id) = operation_rx.next().await { let Some((operation, applied)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; + applied.store(true, SeqCst); match apply_client_operation(&client, operation, &mut cx).await { - Ok(()) => applied.store(true, SeqCst), + Ok(()) => {} Err(TestError::Inapplicable) => { + applied.store(false, SeqCst); log::info!("skipped operation"); } Err(TestError::Other(error)) => { - applied.store(true, SeqCst); log::error!("{} error: {}", client.username, error); } } From bda708622093660eef6ec6289093346d0ed13e85 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 16:53:36 -0700 Subject: [PATCH 28/60] Clear guest's shared buffers if they rejoin project after leaving while host was disconnected --- crates/project/src/project.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 3e5a450075..88a187982c 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4847,6 +4847,7 @@ impl Project { let collaborator = Collaborator::from_proto(collaborator)?; this.update(&mut cx, |this, cx| { + this.shared_buffers.remove(&collaborator.peer_id); this.collaborators .insert(collaborator.peer_id, collaborator); cx.notify(); From 1064b147794e06a49835fb164a51ccbebf0ebad0 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 17:50:04 -0700 Subject: [PATCH 29/60] Don't use TestPlan's rng in fake LSP handlers These should use the test context's rng, so that they behave the same whether a pre-recorded plan was used, or the plan is being generated. --- .../src/tests/randomized_integration_tests.rs | 40 +++++++++---------- crates/gpui/src/executor.rs | 10 +++++ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index c70c76a76a..a592881929 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1755,7 +1755,6 @@ async fn simulate_client( name: "the-fake-language-server", capabilities: lsp::LanguageServer::full_capabilities(), initializer: Some(Box::new({ - let plan = plan.clone(); let fs = client.fs.clone(); move |fake_server: &mut FakeLanguageServer| { fake_server.handle_request::( @@ -1797,16 +1796,15 @@ async fn simulate_client( fake_server.handle_request::({ let fs = fs.clone(); - let plan = plan.clone(); - move |_, _| { - let fs = fs.clone(); - let plan = plan.clone(); + move |_, cx| { + let background = cx.background(); + let mut rng = background.rng(); + let count = rng.gen_range::(1..3); + let files = fs.files(); + let files = (0..count) + .map(|_| files.choose(&mut *rng).unwrap().clone()) + .collect::>(); async move { - let files = fs.files(); - let count = plan.lock().rng.gen_range::(1..3); - let files = (0..count) - .map(|_| files.choose(&mut plan.lock().rng).unwrap()) - .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( files @@ -1821,17 +1819,19 @@ async fn simulate_client( } }); - fake_server.handle_request::({ - let plan = plan.clone(); - move |_, _| { + fake_server.handle_request::( + move |_, cx| { let mut highlights = Vec::new(); - let highlight_count = plan.lock().rng.gen_range(1..=5); + let background = cx.background(); + let mut rng = background.rng(); + + let highlight_count = rng.gen_range(1..=5); for _ in 0..highlight_count { - let start_row = plan.lock().rng.gen_range(0..100); - let start_column = plan.lock().rng.gen_range(0..100); + let start_row = rng.gen_range(0..100); + let start_column = rng.gen_range(0..100); + let end_row = rng.gen_range(0..100); + let end_column = rng.gen_range(0..100); let start = PointUtf16::new(start_row, start_column); - let end_row = plan.lock().rng.gen_range(0..100); - let end_column = plan.lock().rng.gen_range(0..100); let end = PointUtf16::new(end_row, end_column); let range = if start > end { end..start } else { start..end }; highlights.push(lsp::DocumentHighlight { @@ -1843,8 +1843,8 @@ async fn simulate_client( (highlight.range.start, highlight.range.end) }); async move { Ok(Some(highlights)) } - } - }); + }, + ); } })), ..Default::default() diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 16afa987e9..3ed6abc8e0 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -829,6 +829,16 @@ impl Background { } } + #[cfg(any(test, feature = "test-support"))] + pub fn rng<'a>(&'a self) -> impl 'a + std::ops::DerefMut { + match self { + Self::Deterministic { executor, .. } => { + parking_lot::lock_api::MutexGuard::map(executor.state.lock(), |s| &mut s.rng) + } + _ => panic!("this method can only be called on a deterministic executor"), + } + } + #[cfg(any(test, feature = "test-support"))] pub async fn simulate_random_delay(&self) { match self { From bf3b8adf359a4bf45222d99c6108bfd597c1a2b9 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 18:11:33 -0700 Subject: [PATCH 30/60] Avoid async fs call before checking if operation is applicable This way, the executor isn't influenced by operations that aren't applicable. --- .../src/tests/randomized_integration_tests.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a592881929..144484816b 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -735,11 +735,13 @@ async fn apply_client_operation( is_dir, content, } => { - client + if !client .fs - .metadata(&path.parent().unwrap()) - .await? - .ok_or(TestError::Inapplicable)?; + .directories() + .contains(&path.parent().unwrap().to_owned()) + { + return Err(TestError::Inapplicable); + } if is_dir { log::info!("{}: creating dir at {:?}", client.username, path); @@ -761,13 +763,8 @@ async fn apply_client_operation( repo_path, contents, } => { - if !client - .fs - .metadata(&repo_path) - .await? - .map_or(false, |m| m.is_dir) - { - Err(TestError::Inapplicable)?; + if !client.fs.directories().contains(&repo_path) { + return Err(TestError::Inapplicable); } log::info!( From d7f56d6126fd93cfa59429894446b348a9dec6f0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 13:49:09 +0200 Subject: [PATCH 31/60] Forget which buffers were shared when host reconnects This fixes a bug where the host would momentarily disconnect and the guest would close and reopen the project. This would cause the host to not observe the guest closing the project. When the guest tried to open one of the buffers opened prior to closing the project, the host would not send them the buffer state because it would still remember that the buffer was shared. The `shared_buffers` map is now cleared when the host reconnects and will slowly get re-filled as guests issue `SynchronizeBuffers` requests. --- crates/project/src/project.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 88a187982c..19078f31d7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1081,6 +1081,7 @@ impl Project { message: proto::ResharedProject, cx: &mut ModelContext, ) -> Result<()> { + self.shared_buffers.clear(); self.set_collaborators_from_proto(message.collaborators, cx)?; let _ = self.metadata_changed(cx); Ok(()) From f995d07542fd96630c593ff34fa0d432d1f73e12 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 14:42:19 +0200 Subject: [PATCH 32/60] Return error if subscribing to an entity that was already subscribed to --- crates/client/src/client.rs | 27 +++++++++++++++++---------- crates/project/src/project.rs | 13 ++++++------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index ae8cf8bf56..f405c14a18 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -473,18 +473,22 @@ impl Client { pub fn subscribe_to_entity( self: &Arc, remote_id: u64, - ) -> PendingEntitySubscription { + ) -> Result> { let id = (TypeId::of::(), remote_id); - self.state - .write() - .entities_by_type_and_remote_id - .insert(id, WeakSubscriber::Pending(Default::default())); - PendingEntitySubscription { - client: self.clone(), - remote_id, - consumed: false, - _entity_type: PhantomData, + let mut state = self.state.write(); + if state.entities_by_type_and_remote_id.contains_key(&id) { + return Err(anyhow!("already subscribed to entity")); + } else { + state + .entities_by_type_and_remote_id + .insert(id, WeakSubscriber::Pending(Default::default())); + Ok(PendingEntitySubscription { + client: self.clone(), + remote_id, + consumed: false, + _entity_type: PhantomData, + }) } } @@ -1605,14 +1609,17 @@ mod tests { let _subscription1 = client .subscribe_to_entity(1) + .unwrap() .set_model(&model1, &mut cx.to_async()); let _subscription2 = client .subscribe_to_entity(2) + .unwrap() .set_model(&model2, &mut cx.to_async()); // Ensure dropping a subscription for the same entity type still allows receiving of // messages for other entity IDs of the same type. let subscription3 = client .subscribe_to_entity(3) + .unwrap() .set_model(&model3, &mut cx.to_async()); drop(subscription3); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 19078f31d7..72f3d05cae 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -463,7 +463,7 @@ impl Project { ) -> Result> { client.authenticate_and_connect(true, &cx).await?; - let subscription = client.subscribe_to_entity(remote_id); + let subscription = client.subscribe_to_entity(remote_id)?; let response = client .request_envelope(proto::JoinProject { project_id: remote_id, @@ -989,6 +989,11 @@ impl Project { if self.client_state.is_some() { return Err(anyhow!("project was already shared")); } + self.client_subscriptions.push( + self.client + .subscribe_to_entity(project_id)? + .set_model(&cx.handle(), &mut cx.to_async()), + ); for open_buffer in self.opened_buffers.values_mut() { match open_buffer { @@ -1025,12 +1030,6 @@ impl Project { .log_err(); } - self.client_subscriptions.push( - self.client - .subscribe_to_entity(project_id) - .set_model(&cx.handle(), &mut cx.to_async()), - ); - let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, From 4a61e2dfa45f982585019b3859a41be727ec9aa2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 16:02:16 +0200 Subject: [PATCH 33/60] Save server operations that were in the middle of being applied Previously, if the test panicked before it had a chance to fully apply an operation, it would end up not being saved in the plan. With this commit we will mark the operation as applied before we start processing it, and mark it as not applied if, once we're done, we've found out that it couldn't be applied. This is consistent with what we do for client operations. --- crates/collab/src/tests/randomized_integration_tests.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 144484816b..6a13c4ef2e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -121,6 +121,7 @@ async fn test_random_collaboration( loop { let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else { break }; + applied.store(true, SeqCst); let did_apply = apply_server_operation( deterministic.clone(), &mut server, @@ -132,8 +133,8 @@ async fn test_random_collaboration( cx, ) .await; - if did_apply { - applied.store(true, SeqCst); + if !did_apply { + applied.store(false, SeqCst); } } From 8020ea783fc3eaa90d0aa38ee328f5c32e734005 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 16:23:10 +0200 Subject: [PATCH 34/60] Wait to see guest's buffer version before converting completion anchor --- crates/project/src/project.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 72f3d05cae..a9cab023cd 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5550,6 +5550,12 @@ impl Project { .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) })?; + let version = deserialize_version(envelope.payload.version); + buffer + .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) + .await; + let version = buffer.read_with(&cx, |buffer, _| buffer.version()); + let position = envelope .payload .position @@ -5561,12 +5567,6 @@ impl Project { }) .ok_or_else(|| anyhow!("invalid position"))?; - let version = deserialize_version(envelope.payload.version); - buffer - .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await; - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - let completions = this .update(&mut cx, |this, cx| this.completions(&buffer, position, cx)) .await?; From ef04dc14ccf54b716cb331690fce25ef83939d72 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 17:48:44 +0200 Subject: [PATCH 35/60] Update file on incomplete buffer instead of waiting for it to be opened This ensures that two successive file updates coming from the host are not applied in reverse order. --- crates/project/src/project.rs | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index a9cab023cd..4e39f5e155 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5313,28 +5313,20 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let buffer_id = envelope.payload.buffer_id; - let is_incomplete = this.read_with(&cx, |this, _| { - this.incomplete_remote_buffers.contains_key(&buffer_id) - }); - - let buffer = if is_incomplete { - Some( - this.update(&mut cx, |this, cx| { - this.wait_for_remote_buffer(buffer_id, cx) - }) - .await?, - ) - } else { - None - }; this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); - if let Some(buffer) = buffer.or_else(|| { - this.opened_buffers - .get(&buffer_id) - .and_then(|b| b.upgrade(cx)) - }) { + if let Some(buffer) = this + .opened_buffers + .get(&buffer_id) + .and_then(|b| b.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&buffer_id) + .cloned() + .flatten() + }) + { let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; let worktree = this .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx) From 22a6a243bc5711a8b313d286166685932c1ff3a1 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 6 Apr 2023 11:38:21 -0700 Subject: [PATCH 36/60] Move project assertions into main assertion function Co-authored-by: Antonio Scandurra --- .../src/tests/randomized_integration_tests.rs | 12 ++++++- crates/project/src/project.rs | 36 +++---------------- 2 files changed, 16 insertions(+), 32 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 6a13c4ef2e..20309d1a63 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -869,7 +869,17 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) } } - guest_project.check_invariants(cx); + for buffer in guest_project.opened_buffers(cx) { + let buffer = buffer.read(cx); + assert_eq!( + buffer.deferred_ops_len(), + 0, + "{} has deferred operations for buffer {:?} in project {:?}", + client.username, + buffer.file().unwrap().full_path(cx), + guest_project.remote_id(), + ); + } }); } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 4e39f5e155..7589a52fe3 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -659,37 +659,11 @@ impl Project { } #[cfg(any(test, feature = "test-support"))] - pub fn check_invariants(&self, cx: &AppContext) { - if self.is_local() { - let mut worktree_root_paths = HashMap::default(); - for worktree in self.worktrees(cx) { - let worktree = worktree.read(cx); - let abs_path = worktree.as_local().unwrap().abs_path().clone(); - let prev_worktree_id = worktree_root_paths.insert(abs_path.clone(), worktree.id()); - assert_eq!( - prev_worktree_id, - None, - "abs path {:?} for worktree {:?} is not unique ({:?} was already registered with the same path)", - abs_path, - worktree.id(), - prev_worktree_id - ) - } - } else { - let replica_id = self.replica_id(); - for buffer in self.opened_buffers.values() { - if let Some(buffer) = buffer.upgrade(cx) { - let buffer = buffer.read(cx); - assert_eq!( - buffer.deferred_ops_len(), - 0, - "replica {}, buffer {} has deferred operations", - replica_id, - buffer.remote_id() - ); - } - } - } + pub fn opened_buffers(&self, cx: &AppContext) -> Vec> { + self.opened_buffers + .values() + .filter_map(|b| b.upgrade(cx)) + .collect() } #[cfg(any(test, feature = "test-support"))] From aa7918c4b5442a4d85320830555d4b5dfd17659d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 6 Apr 2023 12:17:25 -0700 Subject: [PATCH 37/60] Fix handling of redundant buffer creation messages on guests Check if the buffer already exists *before* overwriting it. Ignore redundant registrations on remote projects. Co-authored-by: Antonio Scandurra --- crates/project/src/project.rs | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 7589a52fe3..bbaa76ea69 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1488,32 +1488,29 @@ impl Project { }); let remote_id = buffer.read(cx).remote_id(); - let open_buffer = if self.is_remote() || self.is_shared() { + let is_remote = self.is_remote(); + let open_buffer = if is_remote || self.is_shared() { OpenBuffer::Strong(buffer.clone()) } else { OpenBuffer::Weak(buffer.downgrade()) }; - match self.opened_buffers.insert(remote_id, open_buffer) { - None => {} - Some(OpenBuffer::Operations(operations)) => { - buffer.update(cx, |buffer, cx| buffer.apply_ops(operations, cx))? + match self.opened_buffers.entry(remote_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(open_buffer); } - Some(OpenBuffer::Weak(existing_handle)) => { - if existing_handle.upgrade(cx).is_some() { - debug_panic!("already registered buffer with remote id {}", remote_id); - Err(anyhow!( - "already registered buffer with remote id {}", - remote_id - ))? + hash_map::Entry::Occupied(mut entry) => { + if let OpenBuffer::Operations(operations) = entry.get_mut() { + buffer.update(cx, |b, cx| b.apply_ops(operations.drain(..), cx))?; + } else if entry.get().upgrade(cx).is_some() { + if is_remote { + return Ok(()); + } else { + debug_panic!("buffer {} was already registered", remote_id); + Err(anyhow!("buffer {} was already registered", remote_id))?; + } } - } - Some(OpenBuffer::Strong(_)) => { - debug_panic!("already registered buffer with remote id {}", remote_id); - Err(anyhow!( - "already registered buffer with remote id {}", - remote_id - ))? + entry.insert(open_buffer); } } cx.subscribe(buffer, |this, buffer, event, cx| { From f519f32ec2e3b1f7088157eccdb43b94f878e94c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 12:24:59 -0700 Subject: [PATCH 38/60] Fixed removal of closed projects in randomized test Co-authored-by: Antonio Scandurra --- crates/collab/src/tests/randomized_integration_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 20309d1a63..fc491fd7f3 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -448,7 +448,7 @@ async fn apply_client_operation( .unwrap(); cx.update(|_| { client.remote_projects_mut().remove(ix); - client.buffers().retain(|project, _| project != project); + client.buffers().retain(|p, _| *p != project); drop(project); }); } From e50c48852ae4d7569c6ab28f5019ec5addf314b4 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 16:27:48 -0700 Subject: [PATCH 39/60] Wait for host to acknowledge buffer updates before sending them to other guests --- crates/collab/src/rpc.rs | 35 +++++++++++++++++++++++++++++------ crates/project/src/project.rs | 6 +++--- crates/rpc/src/rpc.rs | 2 +- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 42a88d7d4c..c9b9efdc4c 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1655,17 +1655,40 @@ async fn update_buffer( ) -> Result<()> { session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = session - .db() - .await - .project_connection_ids(project_id, session.connection_id) - .await?; + let host_connection_id = { + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + host.connection_id + }; + + if host_connection_id != session.connection_id { + session + .peer + .forward_request(session.connection_id, host_connection_id, request.clone()) + .await?; + } session.executor.record_backtrace(); + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; broadcast( Some(session.connection_id), - project_connection_ids.iter().copied(), + collaborators + .iter() + .filter(|collaborator| !collaborator.is_host) + .map(|collaborator| collaborator.connection_id), |connection_id| { session .peer diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index bbaa76ea69..376c84a9d0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -380,7 +380,7 @@ impl Project { client.add_model_message_handler(Self::handle_unshare_project); client.add_model_message_handler(Self::handle_create_buffer_for_peer); client.add_model_message_handler(Self::handle_update_buffer_file); - client.add_model_message_handler(Self::handle_update_buffer); + client.add_model_request_handler(Self::handle_update_buffer); client.add_model_message_handler(Self::handle_update_diagnostic_summary); client.add_model_message_handler(Self::handle_update_worktree); client.add_model_request_handler(Self::handle_create_project_entry); @@ -5160,7 +5160,7 @@ impl Project { envelope: TypedEnvelope, _: Arc, mut cx: AsyncAppContext, - ) -> Result<()> { + ) -> Result { this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); let buffer_id = payload.buffer_id; @@ -5187,7 +5187,7 @@ impl Project { e.insert(OpenBuffer::Operations(ops)); } } - Ok(()) + Ok(proto::Ack {}) }) } diff --git a/crates/rpc/src/rpc.rs b/crates/rpc/src/rpc.rs index bec518b707..898c8c5e98 100644 --- a/crates/rpc/src/rpc.rs +++ b/crates/rpc/src/rpc.rs @@ -6,4 +6,4 @@ pub use conn::Connection; pub use peer::*; mod macros; -pub const PROTOCOL_VERSION: u32 = 50; +pub const PROTOCOL_VERSION: u32 = 51; From acbf9b55d71247488882a906416dd6da7f3a6fd6 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 17:31:47 -0700 Subject: [PATCH 40/60] Halt UpdateBuffer messages until sync if one errors Co-authored-by: Antonio Scandurra --- crates/project/src/project.rs | 158 ++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 44 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 376c84a9d0..f915d53c01 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,7 +13,10 @@ use client::{proto, Client, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ - channel::{mpsc, oneshot}, + channel::{ + mpsc::{self, UnboundedReceiver}, + oneshot, + }, future::{try_join_all, Shared}, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; @@ -92,6 +95,7 @@ pub trait Item { pub struct Project { worktrees: Vec, active_entry: Option, + buffer_changes_tx: mpsc::UnboundedSender, languages: Arc, language_servers: HashMap, language_server_ids: HashMap<(WorktreeId, LanguageServerName), usize>, @@ -130,6 +134,14 @@ pub struct Project { terminals: Terminals, } +enum BufferMessage { + Operation { + buffer_id: u64, + operation: proto::Operation, + }, + Resync, +} + enum OpenBuffer { Strong(ModelHandle), Weak(WeakModelHandle), @@ -417,39 +429,45 @@ impl Project { fs: Arc, cx: &mut MutableAppContext, ) -> ModelHandle { - cx.add_model(|cx: &mut ModelContext| Self { - worktrees: Default::default(), - collaborators: Default::default(), - opened_buffers: Default::default(), - shared_buffers: Default::default(), - incomplete_remote_buffers: Default::default(), - loading_buffers_by_path: Default::default(), - loading_local_worktrees: Default::default(), - buffer_snapshots: Default::default(), - join_project_response_message_id: 0, - client_state: None, - opened_buffer: watch::channel(), - client_subscriptions: Vec::new(), - _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], - _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), - _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), - active_entry: None, - languages, - client, - user_store, - fs, - next_entry_id: Default::default(), - next_diagnostic_group_id: Default::default(), - language_servers: Default::default(), - language_server_ids: Default::default(), - language_server_statuses: Default::default(), - last_workspace_edits_by_language_server: Default::default(), - buffers_being_formatted: Default::default(), - next_language_server_id: 0, - nonce: StdRng::from_entropy().gen(), - terminals: Terminals { - local_handles: Vec::new(), - }, + cx.add_model(|cx: &mut ModelContext| { + let (tx, rx) = mpsc::unbounded(); + cx.spawn_weak(|this, cx| Self::send_buffer_messages(this, rx, cx)) + .detach(); + Self { + worktrees: Default::default(), + buffer_changes_tx: tx, + collaborators: Default::default(), + opened_buffers: Default::default(), + shared_buffers: Default::default(), + incomplete_remote_buffers: Default::default(), + loading_buffers_by_path: Default::default(), + loading_local_worktrees: Default::default(), + buffer_snapshots: Default::default(), + join_project_response_message_id: 0, + client_state: None, + opened_buffer: watch::channel(), + client_subscriptions: Vec::new(), + _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], + _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), + _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), + active_entry: None, + languages, + client, + user_store, + fs, + next_entry_id: Default::default(), + next_diagnostic_group_id: Default::default(), + language_servers: Default::default(), + language_server_ids: Default::default(), + language_server_statuses: Default::default(), + last_workspace_edits_by_language_server: Default::default(), + buffers_being_formatted: Default::default(), + next_language_server_id: 0, + nonce: StdRng::from_entropy().gen(), + terminals: Terminals { + local_handles: Vec::new(), + }, + } }) } @@ -480,8 +498,12 @@ impl Project { worktrees.push(worktree); } + let (tx, rx) = mpsc::unbounded(); + cx.spawn_weak(|this, cx| Self::send_buffer_messages(this, rx, cx)) + .detach(); let mut this = Self { worktrees: Vec::new(), + buffer_changes_tx: tx, loading_buffers_by_path: Default::default(), opened_buffer: watch::channel(), shared_buffers: Default::default(), @@ -1084,8 +1106,9 @@ impl Project { ) }) .collect(); - self.synchronize_remote_buffers(cx).detach_and_log_err(cx); - + self.buffer_changes_tx + .unbounded_send(BufferMessage::Resync) + .unwrap(); cx.notify(); Ok(()) } @@ -1635,6 +1658,53 @@ impl Project { }); } + async fn send_buffer_messages( + this: WeakModelHandle, + mut rx: UnboundedReceiver, + mut cx: AsyncAppContext, + ) { + let mut needs_resync_with_host = false; + while let Some(change) = rx.next().await { + if let Some(this) = this.upgrade(&mut cx) { + let is_local = this.read_with(&cx, |this, _| this.is_local()); + match change { + BufferMessage::Operation { + buffer_id, + operation, + } => { + if needs_resync_with_host { + continue; + } + let request = this.read_with(&cx, |this, _| { + let project_id = this.remote_id()?; + Some(this.client.request(proto::UpdateBuffer { + buffer_id, + project_id, + operations: vec![operation], + })) + }); + if let Some(request) = request { + if request.await.is_err() && !is_local { + needs_resync_with_host = true; + } + } + } + BufferMessage::Resync => { + if this + .update(&mut cx, |this, cx| this.synchronize_remote_buffers(cx)) + .await + .is_ok() + { + needs_resync_with_host = false; + } + } + } + } else { + break; + } + } + } + fn on_buffer_event( &mut self, buffer: ModelHandle, @@ -1643,14 +1713,12 @@ impl Project { ) -> Option<()> { match event { BufferEvent::Operation(operation) => { - if let Some(project_id) = self.remote_id() { - let request = self.client.request(proto::UpdateBuffer { - project_id, + self.buffer_changes_tx + .unbounded_send(BufferMessage::Operation { buffer_id: buffer.read(cx).remote_id(), - operations: vec![language::proto::serialize_operation(operation)], - }); - cx.background().spawn(request).detach_and_log_err(cx); - } + operation: language::proto::serialize_operation(operation), + }) + .ok(); } BufferEvent::Edited { .. } => { let language_server = self @@ -4861,7 +4929,9 @@ impl Project { } if is_host { - this.synchronize_remote_buffers(cx).detach_and_log_err(cx); + this.buffer_changes_tx + .unbounded_send(BufferMessage::Resync) + .unwrap(); } cx.emit(Event::CollaboratorUpdated { From 7f73ebdab5686da524d57b7b45d157b44514ef60 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 08:41:31 +0200 Subject: [PATCH 41/60] Apply `BufferReloaded` message to incomplete remote buffers --- crates/project/src/project.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index f915d53c01..fd10e17bbf 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -6310,7 +6310,13 @@ impl Project { let buffer = this .opened_buffers .get(&payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)); + .and_then(|buffer| buffer.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&payload.buffer_id) + .cloned() + .flatten() + }); if let Some(buffer) = buffer { buffer.update(cx, |buffer, cx| { buffer.did_reload(version, fingerprint, line_ending, mtime, cx); From 3a82c04248377e58bf6e1347535c60526bfdb55e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 10:01:44 +0200 Subject: [PATCH 42/60] Improve assertion message when buffer state diverges --- .../src/tests/randomized_integration_tests.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index fc491fd7f3..eb78ffd47a 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -981,7 +981,8 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); assert_eq!( guest_saved_version, host_saved_version, - "guest saved version does not match host's for path {path:?} in project {project_id}", + "guest {} saved version does not match host's for path {path:?} in project {project_id}", + client.username ); let host_saved_version_fingerprint = @@ -990,26 +991,30 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); assert_eq!( guest_saved_version_fingerprint, host_saved_version_fingerprint, - "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", + "guest {} saved fingerprint does not match host's for path {path:?} in project {project_id}", + client.username ); let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); assert_eq!( guest_saved_mtime, host_saved_mtime, - "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + "guest {} saved mtime does not match host's for path {path:?} in project {project_id}", + client.username ); let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); assert_eq!(guest_is_dirty, host_is_dirty, - "guest's dirty status does not match host's for path {path:?} in project {project_id}", + "guest {} dirty status does not match host's for path {path:?} in project {project_id}", + client.username ); let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); assert_eq!(guest_has_conflict, host_has_conflict, - "guest's conflict status does not match host's for path {path:?} in project {project_id}", + "guest {} conflict status does not match host's for path {path:?} in project {project_id}", + client.username ); } } From 9761febf82d74c79fcfcef96a0fcd6b5c7ba43ba Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 10:02:13 +0200 Subject: [PATCH 43/60] Avoid broadcasting `SaveBuffer` in response to a client's save request The host will send a `SaveBuffer` message anyway and this prevents re-querying the database, which could cause two `BufferSaved` messages to race and, as a result, cause guest to apply them in the wrong order. --- crates/collab/src/rpc.rs | 47 +--------------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index c9b9efdc4c..ce5a6a0a1f 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -228,7 +228,7 @@ impl Server { .add_message_handler(update_buffer_file) .add_message_handler(buffer_reloaded) .add_message_handler(buffer_saved) - .add_request_handler(save_buffer) + .add_request_handler(forward_project_request::) .add_request_handler(get_users) .add_request_handler(fuzzy_search_users) .add_request_handler(request_contact) @@ -1591,51 +1591,6 @@ where Ok(()) } -async fn save_buffer( - request: proto::SaveBuffer, - response: Response, - session: Session, -) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let host_connection_id = { - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))? - .connection_id - }; - let response_payload = session - .peer - .forward_request(session.connection_id, host_connection_id, request.clone()) - .await?; - - let mut collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - collaborators.retain(|collaborator| collaborator.connection_id != session.connection_id); - let project_connection_ids = collaborators - .iter() - .map(|collaborator| collaborator.connection_id); - broadcast( - Some(host_connection_id), - project_connection_ids, - |conn_id| { - session - .peer - .forward_send(host_connection_id, conn_id, response_payload.clone()) - }, - ); - response.send(response_payload)?; - Ok(()) -} - async fn create_buffer_for_peer( request: proto::CreateBufferForPeer, session: Session, From e79815622c0dcff6414a268e88b1d9e8c9246ab7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 12:40:09 -0700 Subject: [PATCH 44/60] Preserve ordering between UpdateProject and CreateBufferForPeer messages Previously, because UpdateProject messages were sent in a separately- spawned task, they could be sent after CreateBufferForPeer messages that were intended to be sent after them. Co-authored-by: Antonio Scandurra --- crates/collab/src/tests/integration_tests.rs | 24 +-- crates/project/src/project.rs | 214 +++++++++---------- crates/workspace/src/workspace.rs | 8 +- 3 files changed, 117 insertions(+), 129 deletions(-) diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs index 82b542cb6b..dda8035874 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -1633,9 +1633,7 @@ async fn test_project_reconnect( }) .await .unwrap(); - worktree_a2 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; + deterministic.run_until_parked(); let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() @@ -1696,11 +1694,9 @@ async fn test_project_reconnect( .unwrap(); // While client A is disconnected, add and remove worktrees from client A's project. - project_a1 - .update(cx_a, |project, cx| { - project.remove_worktree(worktree2_id, cx) - }) - .await; + project_a1.update(cx_a, |project, cx| { + project.remove_worktree(worktree2_id, cx) + }); let (worktree_a3, _) = project_a1 .update(cx_a, |p, cx| { p.find_or_create_local_worktree("/root-1/dir3", true, cx) @@ -1824,18 +1820,14 @@ async fn test_project_reconnect( }) .await .unwrap(); - worktree_a4 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; + deterministic.run_until_parked(); let worktree4_id = worktree_a4.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() }); - project_a1 - .update(cx_a, |project, cx| { - project.remove_worktree(worktree3_id, cx) - }) - .await; + project_a1.update(cx_a, |project, cx| { + project.remove_worktree(worktree3_id, cx) + }); deterministic.run_until_parked(); // While client B is disconnected, mutate a buffer on both the host and the guest. diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 6a8bbb98d2..655425a2a8 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,10 +13,7 @@ use client::{proto, Client, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ - channel::{ - mpsc::{self, UnboundedReceiver}, - oneshot, - }, + channel::mpsc::{self, UnboundedReceiver}, future::{try_join_all, Shared}, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; @@ -142,6 +139,14 @@ enum BufferMessage { Resync, } +enum LocalProjectUpdate { + WorktreesChanged, + CreateBufferForPeer { + peer_id: proto::PeerId, + buffer_id: u64, + }, +} + enum OpenBuffer { Strong(ModelHandle), Weak(WeakModelHandle), @@ -156,8 +161,8 @@ enum WorktreeHandle { enum ProjectClientState { Local { remote_id: u64, - metadata_changed: mpsc::UnboundedSender>, - _maintain_metadata: Task<()>, + updates_tx: mpsc::UnboundedSender, + _send_updates: Task<()>, }, Remote { sharing_has_stopped: bool, @@ -725,22 +730,13 @@ impl Project { } } - fn metadata_changed(&mut self, cx: &mut ModelContext) -> impl Future { - let (tx, rx) = oneshot::channel(); - if let Some(ProjectClientState::Local { - metadata_changed, .. - }) = &mut self.client_state - { - let _ = metadata_changed.unbounded_send(tx); + fn metadata_changed(&mut self, cx: &mut ModelContext) { + if let Some(ProjectClientState::Local { updates_tx, .. }) = &mut self.client_state { + updates_tx + .unbounded_send(LocalProjectUpdate::WorktreesChanged) + .ok(); } cx.notify(); - - async move { - // If the project is shared, this will resolve when the `_maintain_metadata` task has - // a chance to update the metadata. Otherwise, it will resolve right away because `tx` - // will get dropped. - let _ = rx.await; - } } pub fn collaborators(&self) -> &HashMap { @@ -1026,40 +1022,90 @@ impl Project { .log_err(); } - let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); + let (updates_tx, mut updates_rx) = mpsc::unbounded(); + let client = self.client.clone(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, - metadata_changed: metadata_changed_tx, - _maintain_metadata: cx.spawn_weak(move |this, mut cx| async move { - let mut txs = Vec::new(); - while let Some(tx) = metadata_changed_rx.next().await { - txs.push(tx); - while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { - txs.push(next_tx); - } - + updates_tx, + _send_updates: cx.spawn_weak(move |this, mut cx| async move { + while let Some(update) = updates_rx.next().await { let Some(this) = this.upgrade(&cx) else { break }; - let worktrees = - this.read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); - let update_project = this - .read_with(&cx, |this, cx| { - this.client.request(proto::UpdateProject { - project_id, - worktrees: this.worktree_metadata_protos(cx), - }) - }) - .await; - if update_project.is_ok() { - for worktree in worktrees { - worktree.update(&mut cx, |worktree, cx| { - let worktree = worktree.as_local_mut().unwrap(); - worktree.share(project_id, cx).detach_and_log_err(cx) - }); - } - } - for tx in txs.drain(..) { - let _ = tx.send(()); + match update { + LocalProjectUpdate::WorktreesChanged => { + let worktrees = this + .read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); + let update_project = this + .read_with(&cx, |this, cx| { + this.client.request(proto::UpdateProject { + project_id, + worktrees: this.worktree_metadata_protos(cx), + }) + }) + .await; + if update_project.is_ok() { + for worktree in worktrees { + worktree.update(&mut cx, |worktree, cx| { + let worktree = worktree.as_local_mut().unwrap(); + worktree.share(project_id, cx).detach_and_log_err(cx) + }); + } + } + } + LocalProjectUpdate::CreateBufferForPeer { peer_id, buffer_id } => { + let buffer = this.update(&mut cx, |this, _| { + let buffer = this.opened_buffers.get(&buffer_id).unwrap(); + let shared_buffers = + this.shared_buffers.entry(peer_id).or_default(); + if shared_buffers.insert(buffer_id) { + if let OpenBuffer::Strong(buffer) = buffer { + Some(buffer.clone()) + } else { + None + } + } else { + None + } + }); + + let Some(buffer) = buffer else { continue }; + let operations = + buffer.read_with(&cx, |b, cx| b.serialize_ops(None, cx)); + let operations = operations.await; + let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); + + let initial_state = proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some(proto::create_buffer_for_peer::Variant::State(state)), + }; + if client.send(initial_state).log_err().is_some() { + let client = client.clone(); + cx.background() + .spawn(async move { + let mut chunks = split_operations(operations).peekable(); + while let Some(chunk) = chunks.next() { + let is_last = chunks.peek().is_none(); + client.send(proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some( + proto::create_buffer_for_peer::Variant::Chunk( + proto::BufferChunk { + buffer_id, + operations: chunk, + is_last, + }, + ), + ), + })?; + } + anyhow::Ok(()) + }) + .await + .log_err(); + } + } } } }), @@ -4493,15 +4539,13 @@ impl Project { &mut cx, ) .await; + project.update(&mut cx, |project, _| { project.loading_local_worktrees.remove(&path); }); + let worktree = worktree?; - - project - .update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)) - .await; - + project.update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)); Ok(worktree) } .map_err(Arc::new) @@ -4517,11 +4561,7 @@ impl Project { }) } - pub fn remove_worktree( - &mut self, - id_to_remove: WorktreeId, - cx: &mut ModelContext, - ) -> impl Future { + pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut ModelContext) { self.worktrees.retain(|worktree| { if let Some(worktree) = worktree.upgrade(cx) { let id = worktree.read(cx).id(); @@ -4535,14 +4575,10 @@ impl Project { false } }); - self.metadata_changed(cx) + self.metadata_changed(cx); } - fn add_worktree( - &mut self, - worktree: &ModelHandle, - cx: &mut ModelContext, - ) -> impl Future { + fn add_worktree(&mut self, worktree: &ModelHandle, cx: &mut ModelContext) { cx.observe(worktree, |_, _, cx| cx.notify()).detach(); if worktree.read(cx).is_local() { cx.subscribe(worktree, |this, worktree, event, cx| match event { @@ -4575,7 +4611,7 @@ impl Project { .detach(); cx.emit(Event::WorktreeAdded); - self.metadata_changed(cx) + self.metadata_changed(cx); } fn update_local_worktree_buffers( @@ -5963,47 +5999,11 @@ impl Project { cx: &mut AppContext, ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); - if let Some(project_id) = self.remote_id() { - let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); - if shared_buffers.insert(buffer_id) { - let buffer = buffer.clone(); - let operations = buffer.read(cx).serialize_ops(None, cx); - let client = self.client.clone(); - cx.spawn(move |cx| async move { - let operations = operations.await; - let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); - - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::State(state)), - })?; - - cx.background() - .spawn(async move { - let mut chunks = split_operations(operations).peekable(); - while let Some(chunk) = chunks.next() { - let is_last = chunks.peek().is_none(); - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::Chunk( - proto::BufferChunk { - buffer_id, - operations: chunk, - is_last, - }, - )), - })?; - } - anyhow::Ok(()) - }) - .await - }) - .detach() - } + if let Some(ProjectClientState::Local { updates_tx, .. }) = &self.client_state { + updates_tx + .unbounded_send(LocalProjectUpdate::CreateBufferForPeer { peer_id, buffer_id }) + .ok(); } - buffer_id } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 334578d67d..739153fc78 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1305,10 +1305,8 @@ impl Workspace { RemoveWorktreeFromProject(worktree_id): &RemoveWorktreeFromProject, cx: &mut ViewContext, ) { - let future = self - .project + self.project .update(cx, |project, cx| project.remove_worktree(*worktree_id, cx)); - cx.foreground().spawn(future).detach(); } fn project_path_for_path( @@ -3266,9 +3264,7 @@ mod tests { ); // Remove a project folder - project - .update(cx, |project, cx| project.remove_worktree(worktree_id, cx)) - .await; + project.update(cx, |project, cx| project.remove_worktree(worktree_id, cx)); assert_eq!( cx.current_window_title(window_id).as_deref(), Some("one.txt — root2") From e853e77d598e24f095b052d95eb9436d70e86109 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 16:03:49 -0700 Subject: [PATCH 45/60] Upgrade postage for oneshot channel drop fix Previously, dropping a oneshot sender didn't wake the receiver. --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4eb8ac1f95..4cc7ebc094 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4589,14 +4589,15 @@ checksum = "5da3b0203fd7ee5720aa0b5e790b591aa5d3f41c3ed2c34a3a393382198af2f7" [[package]] name = "postage" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a63d25391d04a097954b76aba742b6b5b74f213dfe3dbaeeb36e8ddc1c657f0b" +checksum = "af3fb618632874fb76937c2361a7f22afd393c982a2165595407edc75b06d3c1" dependencies = [ "atomic", "crossbeam-queue", "futures 0.3.25", "log", + "parking_lot 0.12.1", "pin-project", "pollster", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index 75178371d2..8113c0cfcf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ serde = { version = "1.0", features = ["derive", "rc"] } serde_derive = { version = "1.0", features = ["deserialize_in_place"] } serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] } rand = { version = "0.8" } -postage = { version = "0.4.1", features = ["futures-traits"] } +postage = { version = "0.5", features = ["futures-traits"] } [patch.crates-io] tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "c51896d32dcc11a38e41f36e3deb1a6a9c4f4b14" } From 25e3c4e58638f349de098083d8f1f5b114b73c7a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 16:06:28 -0700 Subject: [PATCH 46/60] Fix leak when project is unshared while LSP handler waits for edits --- crates/language/src/buffer.rs | 12 ++++--- crates/language/src/proto.rs | 8 ++--- crates/project/src/lsp_command.rs | 40 +++++++++++------------ crates/project/src/project.rs | 42 +++++++++++++----------- crates/project/src/worktree.rs | 2 +- crates/text/src/text.rs | 54 ++++++++++++++++++++++--------- 6 files changed, 95 insertions(+), 63 deletions(-) diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 8c9f34789f..fa8368f20b 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -377,7 +377,7 @@ impl Buffer { rpc::proto::LineEnding::from_i32(message.line_ending) .ok_or_else(|| anyhow!("missing line_ending"))?, )); - this.saved_version = proto::deserialize_version(message.saved_version); + this.saved_version = proto::deserialize_version(&message.saved_version); this.saved_version_fingerprint = proto::deserialize_fingerprint(&message.saved_version_fingerprint)?; this.saved_mtime = message @@ -1309,21 +1309,25 @@ impl Buffer { pub fn wait_for_edits( &mut self, edit_ids: impl IntoIterator, - ) -> impl Future { + ) -> impl Future> { self.text.wait_for_edits(edit_ids) } pub fn wait_for_anchors<'a>( &mut self, anchors: impl IntoIterator, - ) -> impl Future { + ) -> impl Future> { self.text.wait_for_anchors(anchors) } - pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future { + pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future> { self.text.wait_for_version(version) } + pub fn give_up_waiting(&mut self) { + self.text.give_up_waiting(); + } + pub fn set_active_selections( &mut self, selections: Arc<[Selection]>, diff --git a/crates/language/src/proto.rs b/crates/language/src/proto.rs index 1b95e3ace9..1f6ecd0a90 100644 --- a/crates/language/src/proto.rs +++ b/crates/language/src/proto.rs @@ -220,7 +220,7 @@ pub fn deserialize_operation(message: proto::Operation) -> Result EditOperation local: edit.local_timestamp, lamport: edit.lamport_timestamp, }, - version: deserialize_version(edit.version), + version: deserialize_version(&edit.version), ranges: edit.ranges.into_iter().map(deserialize_range).collect(), new_text: edit.new_text.into_iter().map(Arc::from).collect(), } @@ -509,7 +509,7 @@ pub fn deserialize_transaction(transaction: proto::Transaction) -> Result Range { FullOffset(range.start as usize)..FullOffset(range.end as usize) } -pub fn deserialize_version(message: Vec) -> clock::Global { +pub fn deserialize_version(message: &[proto::VectorClockEntry]) -> clock::Global { let mut version = clock::Global::new(); for entry in message { version.observe(clock::Local { diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 27c8712506..b6c5c633f0 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -161,9 +161,9 @@ impl LspCommand for PrepareRename { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), @@ -199,9 +199,9 @@ impl LspCommand for PrepareRename { if message.can_rename { buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; let start = message.start.and_then(deserialize_anchor); let end = message.end.and_then(deserialize_anchor); Ok(start.zip(end).map(|(start, end)| start..end)) @@ -281,9 +281,9 @@ impl LspCommand for PerformRename { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), new_name: message.new_name, @@ -378,9 +378,9 @@ impl LspCommand for GetDefinition { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -464,9 +464,9 @@ impl LspCommand for GetTypeDefinition { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -537,7 +537,7 @@ async fn location_links_from_proto( .ok_or_else(|| anyhow!("missing origin end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; Some(Location { buffer, range: start..end, @@ -562,7 +562,7 @@ async fn location_links_from_proto( .ok_or_else(|| anyhow!("missing target end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; let target = Location { buffer, range: start..end, @@ -774,9 +774,9 @@ impl LspCommand for GetReferences { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -827,7 +827,7 @@ impl LspCommand for GetReferences { .ok_or_else(|| anyhow!("missing target end"))?; target_buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; locations.push(Location { buffer: target_buffer, range: start..end, @@ -915,9 +915,9 @@ impl LspCommand for GetDocumentHighlights { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -965,7 +965,7 @@ impl LspCommand for GetDocumentHighlights { .ok_or_else(|| anyhow!("missing target end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; let kind = match proto::document_highlight::Kind::from_i32(highlight.kind) { Some(proto::document_highlight::Kind::Text) => DocumentHighlightKind::TEXT, Some(proto::document_highlight::Kind::Read) => DocumentHighlightKind::READ, @@ -1117,9 +1117,9 @@ impl LspCommand for GetHover { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 655425a2a8..803cac6a95 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1182,6 +1182,11 @@ impl Project { } for open_buffer in self.opened_buffers.values_mut() { + // Wake up any tasks waiting for peers' edits to this buffer. + if let Some(buffer) = open_buffer.upgrade(cx) { + buffer.update(cx, |buffer, _| buffer.give_up_waiting()); + } + if let OpenBuffer::Strong(buffer) = open_buffer { *open_buffer = OpenBuffer::Weak(buffer.downgrade()); } @@ -3738,9 +3743,9 @@ impl Project { } else { source_buffer_handle .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) + buffer.wait_for_version(deserialize_version(&response.version)) }) - .await; + .await?; let completions = response.completions.into_iter().map(|completion| { language::proto::deserialize_completion(completion, language.clone()) @@ -3831,7 +3836,7 @@ impl Project { .update(&mut cx, |buffer, _| { buffer.wait_for_edits(transaction.edit_ids.iter().copied()) }) - .await; + .await?; if push_to_history { buffer_handle.update(&mut cx, |buffer, _| { buffer.push_transaction(transaction.clone(), Instant::now()); @@ -3939,9 +3944,9 @@ impl Project { } else { buffer_handle .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) + buffer.wait_for_version(deserialize_version(&response.version)) }) - .await; + .await?; response .actions @@ -5425,8 +5430,6 @@ impl Project { mut cx: AsyncAppContext, ) -> Result { let buffer_id = envelope.payload.buffer_id; - let requested_version = deserialize_version(envelope.payload.version); - let (project_id, buffer) = this.update(&mut cx, |this, cx| { let project_id = this.remote_id().ok_or_else(|| anyhow!("not connected"))?; let buffer = this @@ -5434,13 +5437,14 @@ impl Project { .get(&buffer_id) .and_then(|buffer| buffer.upgrade(cx)) .ok_or_else(|| anyhow!("unknown buffer id {}", buffer_id))?; - Ok::<_, anyhow::Error>((project_id, buffer)) + anyhow::Ok((project_id, buffer)) })?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(requested_version) + buffer.wait_for_version(deserialize_version(&envelope.payload.version)) }) - .await; + .await?; + let buffer_id = buffer.read_with(&cx, |buffer, _| buffer.remote_id()); let (saved_version, fingerprint, mtime) = this .update(&mut cx, |this, cx| this.save_buffer(buffer, cx)) @@ -5503,7 +5507,7 @@ impl Project { this.shared_buffers.entry(guest_id).or_default().clear(); for buffer in envelope.payload.buffers { let buffer_id = buffer.id; - let remote_version = language::proto::deserialize_version(buffer.version); + let remote_version = language::proto::deserialize_version(&buffer.version); if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { this.shared_buffers .entry(guest_id) @@ -5619,10 +5623,10 @@ impl Project { .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) })?; - let version = deserialize_version(envelope.payload.version); + let version = deserialize_version(&envelope.payload.version); buffer .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await; + .await?; let version = buffer.read_with(&cx, |buffer, _| buffer.version()); let position = envelope @@ -5710,9 +5714,9 @@ impl Project { })?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(envelope.payload.version)) + buffer.wait_for_version(deserialize_version(&envelope.payload.version)) }) - .await; + .await?; let version = buffer.read_with(&cx, |buffer, _| buffer.version()); let code_actions = this.update(&mut cx, |this, cx| { @@ -5979,7 +5983,7 @@ impl Project { .update(&mut cx, |buffer, _| { buffer.wait_for_edits(transaction.edit_ids.iter().copied()) }) - .await; + .await?; if push_to_history { buffer.update(&mut cx, |buffer, _| { @@ -6098,7 +6102,7 @@ impl Project { let send_updates_for_buffers = response.buffers.into_iter().map(|buffer| { let client = client.clone(); let buffer_id = buffer.id; - let remote_version = language::proto::deserialize_version(buffer.version); + let remote_version = language::proto::deserialize_version(&buffer.version); this.read_with(&cx, |this, cx| { if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { let operations = buffer.read(cx).serialize_ops(Some(remote_version), cx); @@ -6263,7 +6267,7 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let fingerprint = deserialize_fingerprint(&envelope.payload.fingerprint)?; - let version = deserialize_version(envelope.payload.version); + let version = deserialize_version(&envelope.payload.version); let mtime = envelope .payload .mtime @@ -6296,7 +6300,7 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let payload = envelope.payload; - let version = deserialize_version(payload.version); + let version = deserialize_version(&payload.version); let fingerprint = deserialize_fingerprint(&payload.fingerprint)?; let line_ending = deserialize_line_ending( proto::LineEnding::from_i32(payload.line_ending) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index f845a8f637..792e09a00a 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1064,7 +1064,7 @@ impl RemoteWorktree { version: serialize_version(&version), }) .await?; - let version = deserialize_version(response.version); + let version = deserialize_version(&response.version); let fingerprint = deserialize_fingerprint(&response.fingerprint)?; let mtime = response .mtime diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index c9341df23d..b857ec5d5e 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -11,14 +11,14 @@ mod tests; mod undo_map; pub use anchor::*; -use anyhow::Result; +use anyhow::{anyhow, Result}; use clock::ReplicaId; use collections::{HashMap, HashSet}; use fs::LineEnding; use locator::Locator; use operation_queue::OperationQueue; pub use patch::Patch; -use postage::{barrier, oneshot, prelude::*}; +use postage::{oneshot, prelude::*}; pub use rope::*; pub use selection::*; @@ -52,7 +52,7 @@ pub struct Buffer { pub lamport_clock: clock::Lamport, subscriptions: Topic, edit_id_resolvers: HashMap>>, - version_barriers: Vec<(clock::Global, barrier::Sender)>, + wait_for_version_txs: Vec<(clock::Global, oneshot::Sender<()>)>, } #[derive(Clone)] @@ -522,7 +522,7 @@ impl Buffer { lamport_clock, subscriptions: Default::default(), edit_id_resolvers: Default::default(), - version_barriers: Default::default(), + wait_for_version_txs: Default::default(), } } @@ -793,8 +793,14 @@ impl Buffer { } } } - self.version_barriers - .retain(|(version, _)| !self.snapshot.version().observed_all(version)); + self.wait_for_version_txs.retain_mut(|(version, tx)| { + if self.snapshot.version().observed_all(version) { + tx.try_send(()).ok(); + false + } else { + true + } + }); Ok(()) } @@ -1305,7 +1311,7 @@ impl Buffer { pub fn wait_for_edits( &mut self, edit_ids: impl IntoIterator, - ) -> impl 'static + Future { + ) -> impl 'static + Future> { let mut futures = Vec::new(); for edit_id in edit_ids { if !self.version.observed(edit_id) { @@ -1317,15 +1323,18 @@ impl Buffer { async move { for mut future in futures { - future.recv().await; + if future.recv().await.is_none() { + Err(anyhow!("gave up waiting for edits"))?; + } } + Ok(()) } } pub fn wait_for_anchors<'a>( &mut self, anchors: impl IntoIterator, - ) -> impl 'static + Future { + ) -> impl 'static + Future> { let mut futures = Vec::new(); for anchor in anchors { if !self.version.observed(anchor.timestamp) @@ -1343,21 +1352,36 @@ impl Buffer { async move { for mut future in futures { - future.recv().await; + if future.recv().await.is_none() { + Err(anyhow!("gave up waiting for anchors"))?; + } } + Ok(()) } } - pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future { - let (tx, mut rx) = barrier::channel(); + pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future> { + let mut rx = None; if !self.snapshot.version.observed_all(&version) { - self.version_barriers.push((version, tx)); + let channel = oneshot::channel(); + self.wait_for_version_txs.push((version, channel.0)); + rx = Some(channel.1); } async move { - rx.recv().await; + if let Some(mut rx) = rx { + if rx.recv().await.is_none() { + Err(anyhow!("gave up waiting for version"))?; + } + } + Ok(()) } } + pub fn give_up_waiting(&mut self) { + self.edit_id_resolvers.clear(); + self.wait_for_version_txs.clear(); + } + fn resolve_edit(&mut self, edit_id: clock::Local) { for mut tx in self .edit_id_resolvers @@ -1365,7 +1389,7 @@ impl Buffer { .into_iter() .flatten() { - let _ = tx.try_send(()); + tx.try_send(()).ok(); } } } From abfbba68f055fd145c63c503b2246ecad883d3ae Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 18:28:34 -0700 Subject: [PATCH 47/60] Improve randomized test assertion message when diff base is wrong --- crates/collab/src/tests/randomized_integration_tests.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index eb78ffd47a..00273722c4 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -973,7 +973,11 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); let guest_diff_base = guest_buffer .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); - assert_eq!(guest_diff_base, host_diff_base); + assert_eq!( + guest_diff_base, host_diff_base, + "guest {} diff base does not match host's for path {path:?} in project {project_id}", + client.username + ); let host_saved_version = host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); From 643381ce0cb3f523ddf4a8fbc321e604180027e1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 08:50:26 +0200 Subject: [PATCH 48/60] Make `UpdateDiffBase` a `Foreground` message to prevent reordering --- crates/rpc/src/proto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 823ffa7a19..a27c6ac1bb 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -233,7 +233,7 @@ messages!( (UpdateProject, Foreground), (UpdateProjectCollaborator, Foreground), (UpdateWorktree, Foreground), - (UpdateDiffBase, Background), + (UpdateDiffBase, Foreground), (GetPrivateUserInfo, Foreground), (GetPrivateUserInfoResponse, Foreground), ); From 6ba5e06247fda2442754dda67eb6b44ffea8f692 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 10:42:43 +0200 Subject: [PATCH 49/60] Stop waiting for buffers when releasing a remote project --- crates/project/src/project.rs | 45 +++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 803cac6a95..1c1c91243d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1111,7 +1111,7 @@ impl Project { }), }); - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); Ok(()) @@ -1124,7 +1124,7 @@ impl Project { ) -> Result<()> { self.shared_buffers.clear(); self.set_collaborators_from_proto(message.collaborators, cx)?; - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); Ok(()) } @@ -1160,6 +1160,13 @@ impl Project { } pub fn unshare(&mut self, cx: &mut ModelContext) -> Result<()> { + self.unshare_internal(cx)?; + self.metadata_changed(cx); + cx.notify(); + Ok(()) + } + + fn unshare_internal(&mut self, cx: &mut AppContext) -> Result<()> { if self.is_remote() { return Err(anyhow!("attempted to unshare a remote project")); } @@ -1192,8 +1199,6 @@ impl Project { } } - let _ = self.metadata_changed(cx); - cx.notify(); self.client.send(proto::UnshareProject { project_id: remote_id, })?; @@ -1205,13 +1210,21 @@ impl Project { } pub fn disconnected_from_host(&mut self, cx: &mut ModelContext) { + self.disconnected_from_host_internal(cx); + cx.emit(Event::DisconnectedFromHost); + cx.notify(); + } + + fn disconnected_from_host_internal(&mut self, cx: &mut AppContext) { if let Some(ProjectClientState::Remote { sharing_has_stopped, .. }) = &mut self.client_state { *sharing_has_stopped = true; + self.collaborators.clear(); + for worktree in &self.worktrees { if let Some(worktree) = worktree.upgrade(cx) { worktree.update(cx, |worktree, _| { @@ -1221,8 +1234,17 @@ impl Project { }); } } - cx.emit(Event::DisconnectedFromHost); - cx.notify(); + + for open_buffer in self.opened_buffers.values_mut() { + // Wake up any tasks waiting for peers' edits to this buffer. + if let Some(buffer) = open_buffer.upgrade(cx) { + buffer.update(cx, |buffer, _| buffer.give_up_waiting()); + } + + if let OpenBuffer::Strong(buffer) = open_buffer { + *open_buffer = OpenBuffer::Weak(buffer.downgrade()); + } + } // Wake up all futures currently waiting on a buffer to get opened, // to give them a chance to fail now that we've disconnected. @@ -6183,7 +6205,7 @@ impl Project { } } - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); for (id, _) in old_worktrees_by_id { cx.emit(Event::WorktreeRemoved(id)); } @@ -6577,17 +6599,16 @@ impl<'a> Iterator for PathMatchCandidateSetIter<'a> { impl Entity for Project { type Event = Event; - fn release(&mut self, _: &mut gpui::AppContext) { + fn release(&mut self, cx: &mut gpui::AppContext) { match &self.client_state { - Some(ProjectClientState::Local { remote_id, .. }) => { - let _ = self.client.send(proto::UnshareProject { - project_id: *remote_id, - }); + Some(ProjectClientState::Local { .. }) => { + let _ = self.unshare_internal(cx); } Some(ProjectClientState::Remote { remote_id, .. }) => { let _ = self.client.send(proto::LeaveProject { project_id: *remote_id, }); + self.disconnected_from_host_internal(cx); } _ => {} } From 9e6d865882275bf3dbc7f9047d15ed5038a0184f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 10:43:05 +0200 Subject: [PATCH 50/60] Prevent already dropped model from being upgraded during `release` --- crates/gpui/src/app.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index bc38b28bba..534f77a349 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -2618,7 +2618,7 @@ impl UpgradeModelHandle for AppContext { &self, handle: &WeakModelHandle, ) -> Option> { - if self.models.contains_key(&handle.model_id) { + if self.ref_counts.lock().is_entity_alive(handle.model_id) { Some(ModelHandle::new(handle.model_id, &self.ref_counts)) } else { None @@ -2626,11 +2626,11 @@ impl UpgradeModelHandle for AppContext { } fn model_handle_is_upgradable(&self, handle: &WeakModelHandle) -> bool { - self.models.contains_key(&handle.model_id) + self.ref_counts.lock().is_entity_alive(handle.model_id) } fn upgrade_any_model_handle(&self, handle: &AnyWeakModelHandle) -> Option { - if self.models.contains_key(&handle.model_id) { + if self.ref_counts.lock().is_entity_alive(handle.model_id) { Some(AnyModelHandle::new( handle.model_id, handle.model_type, From ac532cb6fade9fe813ad5d21a6ca06ef17fbcd08 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 14:52:07 +0200 Subject: [PATCH 51/60] Use `LspCommand` to handle completions --- crates/project/src/lsp_command.rs | 209 ++++++++++++++++++++++++++- crates/project/src/project.rs | 227 +----------------------------- 2 files changed, 212 insertions(+), 224 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index b6c5c633f0..334fe37c3c 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -4,11 +4,13 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use client::proto::{self, PeerId}; +use fs::LineEnding; use gpui::{AppContext, AsyncAppContext, ModelHandle}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, - range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, PointUtf16, ToPointUtf16, + range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, Completion, PointUtf16, + ToOffset, ToPointUtf16, Unclipped, }; use lsp::{DocumentHighlightKind, LanguageServer, ServerCapabilities}; use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag}; @@ -91,6 +93,10 @@ pub(crate) struct GetHover { pub position: PointUtf16, } +pub(crate) struct GetCompletions { + pub position: PointUtf16, +} + #[async_trait(?Send)] impl LspCommand for PrepareRename { type Response = Option>; @@ -1199,3 +1205,204 @@ impl LspCommand for GetHover { message.buffer_id } } + +#[async_trait(?Send)] +impl LspCommand for GetCompletions { + type Response = Vec; + type LspRequest = lsp::request::Completion; + type ProtoRequest = proto::GetCompletions; + + fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::CompletionParams { + lsp::CompletionParams { + text_document_position: lsp::TextDocumentPositionParams::new( + lsp::TextDocumentIdentifier::new(lsp::Url::from_file_path(path).unwrap()), + point_to_lsp(self.position), + ), + context: Default::default(), + work_done_progress_params: Default::default(), + partial_result_params: Default::default(), + } + } + + async fn response_from_lsp( + self, + completions: Option, + _: ModelHandle, + buffer: ModelHandle, + cx: AsyncAppContext, + ) -> Result> { + let completions = if let Some(completions) = completions { + match completions { + lsp::CompletionResponse::Array(completions) => completions, + lsp::CompletionResponse::List(list) => list.items, + } + } else { + Default::default() + }; + + let completions = buffer.read_with(&cx, |buffer, _| { + let language = buffer.language().cloned(); + let snapshot = buffer.snapshot(); + let clipped_position = buffer.clip_point_utf16(Unclipped(self.position), Bias::Left); + let mut range_for_token = None; + completions + .into_iter() + .filter_map(move |mut lsp_completion| { + // For now, we can only handle additional edits if they are returned + // when resolving the completion, not if they are present initially. + if lsp_completion + .additional_text_edits + .as_ref() + .map_or(false, |edits| !edits.is_empty()) + { + return None; + } + + let (old_range, mut new_text) = match lsp_completion.text_edit.as_ref() { + // If the language server provides a range to overwrite, then + // check that the range is valid. + Some(lsp::CompletionTextEdit::Edit(edit)) => { + let range = range_from_lsp(edit.range); + let start = snapshot.clip_point_utf16(range.start, Bias::Left); + let end = snapshot.clip_point_utf16(range.end, Bias::Left); + if start != range.start.0 || end != range.end.0 { + log::info!("completion out of expected range"); + return None; + } + ( + snapshot.anchor_before(start)..snapshot.anchor_after(end), + edit.new_text.clone(), + ) + } + // If the language server does not provide a range, then infer + // the range based on the syntax tree. + None => { + if self.position != clipped_position { + log::info!("completion out of expected range"); + return None; + } + let Range { start, end } = range_for_token + .get_or_insert_with(|| { + let offset = self.position.to_offset(&snapshot); + let (range, kind) = snapshot.surrounding_word(offset); + if kind == Some(CharKind::Word) { + range + } else { + offset..offset + } + }) + .clone(); + let text = lsp_completion + .insert_text + .as_ref() + .unwrap_or(&lsp_completion.label) + .clone(); + ( + snapshot.anchor_before(start)..snapshot.anchor_after(end), + text, + ) + } + Some(lsp::CompletionTextEdit::InsertAndReplace(_)) => { + log::info!("unsupported insert/replace completion"); + return None; + } + }; + + let language = language.clone(); + LineEnding::normalize(&mut new_text); + Some(async move { + let mut label = None; + if let Some(language) = language { + language.process_completion(&mut lsp_completion).await; + label = language.label_for_completion(&lsp_completion).await; + } + Completion { + old_range, + new_text, + label: label.unwrap_or_else(|| { + language::CodeLabel::plain( + lsp_completion.label.clone(), + lsp_completion.filter_text.as_deref(), + ) + }), + lsp_completion, + } + }) + }) + }); + + Ok(futures::future::join_all(completions).await) + } + + fn to_proto(&self, project_id: u64, buffer: &Buffer) -> proto::GetCompletions { + let anchor = buffer.anchor_after(self.position); + proto::GetCompletions { + project_id, + buffer_id: buffer.remote_id(), + position: Some(language::proto::serialize_anchor(&anchor)), + version: serialize_version(&buffer.version()), + } + } + + async fn from_proto( + message: proto::GetCompletions, + project: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result { + let version = deserialize_version(&message.version); + buffer + .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) + .await?; + let position = message + .position + .and_then(language::proto::deserialize_anchor) + .map(|p| { + buffer.read_with(&cx, |buffer, _| { + buffer.clip_point_utf16(Unclipped(p.to_point_utf16(buffer)), Bias::Left) + }) + }) + .ok_or_else(|| anyhow!("invalid position"))?; + Ok(Self { position }) + } + + fn response_to_proto( + completions: Vec, + _: &mut Project, + _: PeerId, + buffer_version: &clock::Global, + _: &mut AppContext, + ) -> proto::GetCompletionsResponse { + proto::GetCompletionsResponse { + completions: completions + .iter() + .map(language::proto::serialize_completion) + .collect(), + version: serialize_version(&buffer_version), + } + } + + async fn response_from_proto( + self, + message: proto::GetCompletionsResponse, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result> { + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + + let language = buffer.read_with(&cx, |buffer, _| buffer.language().cloned()); + let completions = message.completions.into_iter().map(|completion| { + language::proto::deserialize_completion(completion, language.clone()) + }); + futures::future::try_join_all(completions).await + } + + fn buffer_id_from_proto(message: &proto::GetCompletions) -> u64 { + message.buffer_id + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 1c1c91243d..ed5b8e98be 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -410,7 +410,7 @@ impl Project { client.add_model_request_handler(Self::handle_synchronize_buffers); client.add_model_request_handler(Self::handle_format_buffers); client.add_model_request_handler(Self::handle_get_code_actions); - client.add_model_request_handler(Self::handle_get_completions); + client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); @@ -3596,188 +3596,12 @@ impl Project { pub fn completions( &self, - source_buffer_handle: &ModelHandle, + buffer: &ModelHandle, position: T, cx: &mut ModelContext, ) -> Task>> { - let source_buffer_handle = source_buffer_handle.clone(); - let source_buffer = source_buffer_handle.read(cx); - let buffer_id = source_buffer.remote_id(); - let language = source_buffer.language().cloned(); - let worktree; - let buffer_abs_path; - if let Some(file) = File::from_dyn(source_buffer.file()) { - worktree = file.worktree.clone(); - buffer_abs_path = file.as_local().map(|f| f.abs_path(cx)); - } else { - return Task::ready(Ok(Default::default())); - }; - - let position = Unclipped(position.to_point_utf16(source_buffer)); - let anchor = source_buffer.anchor_after(position); - - if worktree.read(cx).as_local().is_some() { - let buffer_abs_path = buffer_abs_path.unwrap(); - let lang_server = - if let Some((_, server)) = self.language_server_for_buffer(source_buffer, cx) { - server.clone() - } else { - return Task::ready(Ok(Default::default())); - }; - - cx.spawn(|_, cx| async move { - let completions = lang_server - .request::(lsp::CompletionParams { - text_document_position: lsp::TextDocumentPositionParams::new( - lsp::TextDocumentIdentifier::new( - lsp::Url::from_file_path(buffer_abs_path).unwrap(), - ), - point_to_lsp(position.0), - ), - context: Default::default(), - work_done_progress_params: Default::default(), - partial_result_params: Default::default(), - }) - .await - .context("lsp completion request failed")?; - - let completions = if let Some(completions) = completions { - match completions { - lsp::CompletionResponse::Array(completions) => completions, - lsp::CompletionResponse::List(list) => list.items, - } - } else { - Default::default() - }; - - let completions = source_buffer_handle.read_with(&cx, |this, _| { - let snapshot = this.snapshot(); - let clipped_position = this.clip_point_utf16(position, Bias::Left); - let mut range_for_token = None; - completions - .into_iter() - .filter_map(move |mut lsp_completion| { - // For now, we can only handle additional edits if they are returned - // when resolving the completion, not if they are present initially. - if lsp_completion - .additional_text_edits - .as_ref() - .map_or(false, |edits| !edits.is_empty()) - { - return None; - } - - let (old_range, mut new_text) = match lsp_completion.text_edit.as_ref() - { - // If the language server provides a range to overwrite, then - // check that the range is valid. - Some(lsp::CompletionTextEdit::Edit(edit)) => { - let range = range_from_lsp(edit.range); - let start = snapshot.clip_point_utf16(range.start, Bias::Left); - let end = snapshot.clip_point_utf16(range.end, Bias::Left); - if start != range.start.0 || end != range.end.0 { - log::info!("completion out of expected range"); - return None; - } - ( - snapshot.anchor_before(start)..snapshot.anchor_after(end), - edit.new_text.clone(), - ) - } - // If the language server does not provide a range, then infer - // the range based on the syntax tree. - None => { - if position.0 != clipped_position { - log::info!("completion out of expected range"); - return None; - } - let Range { start, end } = range_for_token - .get_or_insert_with(|| { - let offset = position.to_offset(&snapshot); - let (range, kind) = snapshot.surrounding_word(offset); - if kind == Some(CharKind::Word) { - range - } else { - offset..offset - } - }) - .clone(); - let text = lsp_completion - .insert_text - .as_ref() - .unwrap_or(&lsp_completion.label) - .clone(); - ( - snapshot.anchor_before(start)..snapshot.anchor_after(end), - text, - ) - } - Some(lsp::CompletionTextEdit::InsertAndReplace(_)) => { - log::info!("unsupported insert/replace completion"); - return None; - } - }; - - LineEnding::normalize(&mut new_text); - let language = language.clone(); - Some(async move { - let mut label = None; - if let Some(language) = language { - language.process_completion(&mut lsp_completion).await; - label = language.label_for_completion(&lsp_completion).await; - } - Completion { - old_range, - new_text, - label: label.unwrap_or_else(|| { - CodeLabel::plain( - lsp_completion.label.clone(), - lsp_completion.filter_text.as_deref(), - ) - }), - lsp_completion, - } - }) - }) - }); - - Ok(futures::future::join_all(completions).await) - }) - } else if let Some(project_id) = self.remote_id() { - let rpc = self.client.clone(); - let message = proto::GetCompletions { - project_id, - buffer_id, - position: Some(language::proto::serialize_anchor(&anchor)), - version: serialize_version(&source_buffer.version()), - }; - cx.spawn_weak(|this, mut cx| async move { - let response = rpc.request(message).await?; - - if this - .upgrade(&cx) - .ok_or_else(|| anyhow!("project was dropped"))? - .read_with(&cx, |this, _| this.is_read_only()) - { - return Err(anyhow!( - "failed to get completions: project was disconnected" - )); - } else { - source_buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&response.version)) - }) - .await?; - - let completions = response.completions.into_iter().map(|completion| { - language::proto::deserialize_completion(completion, language.clone()) - }); - futures::future::try_join_all(completions).await - } - }) - } else { - Task::ready(Ok(Default::default())) - } + let position = position.to_point_utf16(buffer.read(cx)); + self.request_lsp(buffer.clone(), GetCompletions { position }, cx) } pub fn apply_additional_edits_for_completion( @@ -5632,49 +5456,6 @@ impl Project { }) } - async fn handle_get_completions( - this: ModelHandle, - envelope: TypedEnvelope, - _: Arc, - mut cx: AsyncAppContext, - ) -> Result { - let buffer = this.read_with(&cx, |this, cx| { - this.opened_buffers - .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)) - .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) - })?; - - let version = deserialize_version(&envelope.payload.version); - buffer - .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await?; - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - - let position = envelope - .payload - .position - .and_then(language::proto::deserialize_anchor) - .map(|p| { - buffer.read_with(&cx, |buffer, _| { - buffer.clip_point_utf16(Unclipped(p.to_point_utf16(buffer)), Bias::Left) - }) - }) - .ok_or_else(|| anyhow!("invalid position"))?; - - let completions = this - .update(&mut cx, |this, cx| this.completions(&buffer, position, cx)) - .await?; - - Ok(proto::GetCompletionsResponse { - completions: completions - .iter() - .map(language::proto::serialize_completion) - .collect(), - version: serialize_version(&version), - }) - } - async fn handle_apply_additional_edits_for_completion( this: ModelHandle, envelope: TypedEnvelope, From 651a83977ee4d32644b8ddc177fc5aa13da6ba85 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 14:53:08 +0200 Subject: [PATCH 52/60] :fire: --- crates/project/src/lsp_command.rs | 2 +- crates/project/src/project.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 334fe37c3c..1841a43324 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -1346,7 +1346,7 @@ impl LspCommand for GetCompletions { async fn from_proto( message: proto::GetCompletions, - project: ModelHandle, + _: ModelHandle, buffer: ModelHandle, mut cx: AsyncAppContext, ) -> Result { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index ed5b8e98be..8a5f70369c 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -27,11 +27,11 @@ use language::{ deserialize_anchor, deserialize_fingerprint, deserialize_line_ending, deserialize_version, serialize_anchor, serialize_version, }, - range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, CodeAction, - CodeLabel, Completion, Diagnostic, DiagnosticEntry, DiagnosticSet, Diff, Event as BufferEvent, - File as _, Language, LanguageRegistry, LanguageServerName, LocalFile, OffsetRangeExt, - Operation, Patch, PointUtf16, RopeFingerprint, TextBufferSnapshot, ToOffset, ToPointUtf16, - Transaction, Unclipped, + range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CodeAction, CodeLabel, + Completion, Diagnostic, DiagnosticEntry, DiagnosticSet, Diff, Event as BufferEvent, File as _, + Language, LanguageRegistry, LanguageServerName, LocalFile, OffsetRangeExt, Operation, Patch, + PointUtf16, RopeFingerprint, TextBufferSnapshot, ToOffset, ToPointUtf16, Transaction, + Unclipped, }; use lsp::{ DiagnosticSeverity, DiagnosticTag, DidChangeWatchedFilesRegistrationOptions, From 589860023992f6a9f4cfbb043957f73b18c97121 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 15:11:30 +0200 Subject: [PATCH 53/60] Use `LspCommand` to handle code actions --- crates/project/src/lsp_command.rs | 206 ++++++++++++++++++++++++++++-- crates/project/src/project.rs | 146 +-------------------- 2 files changed, 199 insertions(+), 153 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 1841a43324..d9fafceab0 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -9,8 +9,8 @@ use gpui::{AppContext, AsyncAppContext, ModelHandle}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, - range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, Completion, PointUtf16, - ToOffset, ToPointUtf16, Unclipped, + range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, CodeAction, + Completion, OffsetRangeExt, PointUtf16, ToOffset, ToPointUtf16, Unclipped, }; use lsp::{DocumentHighlightKind, LanguageServer, ServerCapabilities}; use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag}; @@ -29,6 +29,8 @@ pub(crate) trait LspCommand: 'static + Sized { fn to_lsp( &self, path: &Path, + buffer: &Buffer, + language_server: &Arc, cx: &AppContext, ) -> ::Params; async fn response_from_lsp( @@ -97,6 +99,10 @@ pub(crate) struct GetCompletions { pub position: PointUtf16, } +pub(crate) struct GetCodeActions { + pub range: Range, +} + #[async_trait(?Send)] impl LspCommand for PrepareRename { type Response = Option>; @@ -111,7 +117,13 @@ impl LspCommand for PrepareRename { } } - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::TextDocumentPositionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::TextDocumentPositionParams { lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { uri: lsp::Url::from_file_path(path).unwrap(), @@ -227,7 +239,13 @@ impl LspCommand for PerformRename { type LspRequest = lsp::request::Rename; type ProtoRequest = proto::PerformRename; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::RenameParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::RenameParams { lsp::RenameParams { text_document_position: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -338,7 +356,13 @@ impl LspCommand for GetDefinition { type LspRequest = lsp::request::GotoDefinition; type ProtoRequest = proto::GetDefinition; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::GotoDefinitionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::GotoDefinitionParams { lsp::GotoDefinitionParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -424,7 +448,13 @@ impl LspCommand for GetTypeDefinition { type LspRequest = lsp::request::GotoTypeDefinition; type ProtoRequest = proto::GetTypeDefinition; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::GotoTypeDefinitionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::GotoTypeDefinitionParams { lsp::GotoTypeDefinitionParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -699,7 +729,13 @@ impl LspCommand for GetReferences { type LspRequest = lsp::request::References; type ProtoRequest = proto::GetReferences; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::ReferenceParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::ReferenceParams { lsp::ReferenceParams { text_document_position: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -857,7 +893,13 @@ impl LspCommand for GetDocumentHighlights { capabilities.document_highlight_provider.is_some() } - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::DocumentHighlightParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::DocumentHighlightParams { lsp::DocumentHighlightParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -997,7 +1039,13 @@ impl LspCommand for GetHover { type LspRequest = lsp::request::HoverRequest; type ProtoRequest = proto::GetHover; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::HoverParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::HoverParams { lsp::HoverParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -1212,7 +1260,13 @@ impl LspCommand for GetCompletions { type LspRequest = lsp::request::Completion; type ProtoRequest = proto::GetCompletions; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::CompletionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::CompletionParams { lsp::CompletionParams { text_document_position: lsp::TextDocumentPositionParams::new( lsp::TextDocumentIdentifier::new(lsp::Url::from_file_path(path).unwrap()), @@ -1406,3 +1460,135 @@ impl LspCommand for GetCompletions { message.buffer_id } } + +#[async_trait(?Send)] +impl LspCommand for GetCodeActions { + type Response = Vec; + type LspRequest = lsp::request::CodeActionRequest; + type ProtoRequest = proto::GetCodeActions; + + fn check_capabilities(&self, capabilities: &ServerCapabilities) -> bool { + capabilities.code_action_provider.is_some() + } + + fn to_lsp( + &self, + path: &Path, + buffer: &Buffer, + language_server: &Arc, + _: &AppContext, + ) -> lsp::CodeActionParams { + let relevant_diagnostics = buffer + .snapshot() + .diagnostics_in_range::<_, usize>(self.range.clone(), false) + .map(|entry| entry.to_lsp_diagnostic_stub()) + .collect(); + lsp::CodeActionParams { + text_document: lsp::TextDocumentIdentifier::new( + lsp::Url::from_file_path(path).unwrap(), + ), + range: range_to_lsp(self.range.to_point_utf16(buffer)), + work_done_progress_params: Default::default(), + partial_result_params: Default::default(), + context: lsp::CodeActionContext { + diagnostics: relevant_diagnostics, + only: language_server.code_action_kinds(), + }, + } + } + + async fn response_from_lsp( + self, + actions: Option, + _: ModelHandle, + _: ModelHandle, + _: AsyncAppContext, + ) -> Result> { + Ok(actions + .unwrap_or_default() + .into_iter() + .filter_map(|entry| { + if let lsp::CodeActionOrCommand::CodeAction(lsp_action) = entry { + Some(CodeAction { + range: self.range.clone(), + lsp_action, + }) + } else { + None + } + }) + .collect()) + } + + fn to_proto(&self, project_id: u64, buffer: &Buffer) -> proto::GetCodeActions { + proto::GetCodeActions { + project_id, + buffer_id: buffer.remote_id(), + start: Some(language::proto::serialize_anchor(&self.range.start)), + end: Some(language::proto::serialize_anchor(&self.range.end)), + version: serialize_version(&buffer.version()), + } + } + + async fn from_proto( + message: proto::GetCodeActions, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result { + let start = message + .start + .and_then(language::proto::deserialize_anchor) + .ok_or_else(|| anyhow!("invalid start"))?; + let end = message + .end + .and_then(language::proto::deserialize_anchor) + .ok_or_else(|| anyhow!("invalid end"))?; + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + + Ok(Self { range: start..end }) + } + + fn response_to_proto( + code_actions: Vec, + _: &mut Project, + _: PeerId, + buffer_version: &clock::Global, + _: &mut AppContext, + ) -> proto::GetCodeActionsResponse { + proto::GetCodeActionsResponse { + actions: code_actions + .iter() + .map(language::proto::serialize_code_action) + .collect(), + version: serialize_version(&buffer_version), + } + } + + async fn response_from_proto( + self, + message: proto::GetCodeActionsResponse, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result> { + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + message + .actions + .into_iter() + .map(language::proto::deserialize_code_action) + .collect() + } + + fn buffer_id_from_proto(message: &proto::GetCodeActions) -> u64 { + message.buffer_id + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 8a5f70369c..2daa959cc8 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -409,7 +409,7 @@ impl Project { client.add_model_request_handler(Self::handle_reload_buffers); client.add_model_request_handler(Self::handle_synchronize_buffers); client.add_model_request_handler(Self::handle_format_buffers); - client.add_model_request_handler(Self::handle_get_code_actions); + client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); @@ -3704,106 +3704,9 @@ impl Project { range: Range, cx: &mut ModelContext, ) -> Task>> { - let buffer_handle = buffer_handle.clone(); let buffer = buffer_handle.read(cx); - let snapshot = buffer.snapshot(); - let relevant_diagnostics = snapshot - .diagnostics_in_range::(range.to_offset(&snapshot), false) - .map(|entry| entry.to_lsp_diagnostic_stub()) - .collect(); - let buffer_id = buffer.remote_id(); - let worktree; - let buffer_abs_path; - if let Some(file) = File::from_dyn(buffer.file()) { - worktree = file.worktree.clone(); - buffer_abs_path = file.as_local().map(|f| f.abs_path(cx)); - } else { - return Task::ready(Ok(Vec::new())); - }; let range = buffer.anchor_before(range.start)..buffer.anchor_before(range.end); - - if worktree.read(cx).as_local().is_some() { - let buffer_abs_path = buffer_abs_path.unwrap(); - let lang_server = if let Some((_, server)) = self.language_server_for_buffer(buffer, cx) - { - server.clone() - } else { - return Task::ready(Ok(Vec::new())); - }; - - let lsp_range = range_to_lsp(range.to_point_utf16(buffer)); - cx.foreground().spawn(async move { - if lang_server.capabilities().code_action_provider.is_none() { - return Ok(Vec::new()); - } - - Ok(lang_server - .request::(lsp::CodeActionParams { - text_document: lsp::TextDocumentIdentifier::new( - lsp::Url::from_file_path(buffer_abs_path).unwrap(), - ), - range: lsp_range, - work_done_progress_params: Default::default(), - partial_result_params: Default::default(), - context: lsp::CodeActionContext { - diagnostics: relevant_diagnostics, - only: lang_server.code_action_kinds(), - }, - }) - .await? - .unwrap_or_default() - .into_iter() - .filter_map(|entry| { - if let lsp::CodeActionOrCommand::CodeAction(lsp_action) = entry { - Some(CodeAction { - range: range.clone(), - lsp_action, - }) - } else { - None - } - }) - .collect()) - }) - } else if let Some(project_id) = self.remote_id() { - let rpc = self.client.clone(); - let version = buffer.version(); - cx.spawn_weak(|this, mut cx| async move { - let response = rpc - .request(proto::GetCodeActions { - project_id, - buffer_id, - start: Some(language::proto::serialize_anchor(&range.start)), - end: Some(language::proto::serialize_anchor(&range.end)), - version: serialize_version(&version), - }) - .await?; - - if this - .upgrade(&cx) - .ok_or_else(|| anyhow!("project was dropped"))? - .read_with(&cx, |this, _| this.is_read_only()) - { - return Err(anyhow!( - "failed to get code actions: project was disconnected" - )); - } else { - buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&response.version)) - }) - .await?; - - response - .actions - .into_iter() - .map(language::proto::deserialize_code_action) - .collect() - } - }) - } else { - Task::ready(Ok(Default::default())) - } + self.request_lsp(buffer_handle.clone(), GetCodeActions { range }, cx) } pub fn apply_code_action( @@ -4288,7 +4191,7 @@ impl Project { self.language_server_for_buffer(buffer, cx) .map(|(_, server)| server.clone()), ) { - let lsp_params = request.to_lsp(&file.abs_path(cx), cx); + let lsp_params = request.to_lsp(&file.abs_path(cx), buffer, &language_server, cx); return cx.spawn(|this, cx| async move { if !request.check_capabilities(language_server.capabilities()) { return Ok(Default::default()); @@ -5493,49 +5396,6 @@ impl Project { }) } - async fn handle_get_code_actions( - this: ModelHandle, - envelope: TypedEnvelope, - _: Arc, - mut cx: AsyncAppContext, - ) -> Result { - let start = envelope - .payload - .start - .and_then(language::proto::deserialize_anchor) - .ok_or_else(|| anyhow!("invalid start"))?; - let end = envelope - .payload - .end - .and_then(language::proto::deserialize_anchor) - .ok_or_else(|| anyhow!("invalid end"))?; - let buffer = this.update(&mut cx, |this, cx| { - this.opened_buffers - .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)) - .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) - })?; - buffer - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&envelope.payload.version)) - }) - .await?; - - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - let code_actions = this.update(&mut cx, |this, cx| { - Ok::<_, anyhow::Error>(this.code_actions(&buffer, start..end, cx)) - })?; - - Ok(proto::GetCodeActionsResponse { - actions: code_actions - .await? - .iter() - .map(language::proto::serialize_code_action) - .collect(), - version: serialize_version(&version), - }) - } - async fn handle_apply_code_action( this: ModelHandle, envelope: TypedEnvelope, From 5e37c893c2a37693dbe51ec21aa59142f145675e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 15:14:32 +0200 Subject: [PATCH 54/60] Ensure project is still alive by the time remote LSP request starts --- crates/project/src/project.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 2daa959cc8..90985f8810 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4210,7 +4210,13 @@ impl Project { let rpc = self.client.clone(); let message = request.to_proto(project_id, buffer); return cx.spawn_weak(|this, cx| async move { + // Ensure the project is still alive by the time the task + // is scheduled. + this.upgrade(&cx) + .ok_or_else(|| anyhow!("project dropped"))?; + let response = rpc.request(message).await?; + let this = this .upgrade(&cx) .ok_or_else(|| anyhow!("project dropped"))?; From 172441ab7299f9d248809d0562b0ab8723ba9f0c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 16:33:08 +0200 Subject: [PATCH 55/60] Cancel pending calls when participant fails to reconnect Previously, we would only cancel pending calls when the room became empty. --- crates/collab/src/db.rs | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 72f8d9c703..f441bbfb00 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -175,25 +175,39 @@ impl Database { .map(|participant| participant.user_id) .collect::>(); - // Delete participants who failed to reconnect. + // Delete participants who failed to reconnect and cancel their calls. + let mut canceled_calls_to_user_ids = Vec::new(); room_participant::Entity::delete_many() .filter(stale_participant_filter) .exec(&*tx) .await?; + let called_participants = room_participant::Entity::find() + .filter( + Condition::all() + .add( + room_participant::Column::CallingUserId + .is_in(stale_participant_user_ids.iter().copied()), + ) + .add(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .all(&*tx) + .await?; + room_participant::Entity::delete_many() + .filter( + room_participant::Column::Id + .is_in(called_participants.iter().map(|participant| participant.id)), + ) + .exec(&*tx) + .await?; + canceled_calls_to_user_ids.extend( + called_participants + .into_iter() + .map(|participant| participant.user_id), + ); let room = self.get_room(room_id, &tx).await?; - let mut canceled_calls_to_user_ids = Vec::new(); - // Delete the room if it becomes empty and cancel pending calls. + // Delete the room if it becomes empty. if room.participants.is_empty() { - canceled_calls_to_user_ids.extend( - room.pending_participants - .iter() - .map(|pending_participant| UserId::from_proto(pending_participant.user_id)), - ); - room_participant::Entity::delete_many() - .filter(room_participant::Column::RoomId.eq(room_id)) - .exec(&*tx) - .await?; project::Entity::delete_many() .filter(project::Column::RoomId.eq(room_id)) .exec(&*tx) From 5eb1719ab8e9a05b2190d82afaaeb9eff5ab2d4b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 17:15:43 +0200 Subject: [PATCH 56/60] Re-send worktree if reconnecting while initial scan isn't finished yet Previously, if the client was disconnected while the initial worktree state was being sent, it would not see the remaining state after reconnecting. This was due to `scan_id` and `completed_scan_id` both being initialized to `0`, so the client would ask for updates since `0` and get nothing. This commit changes the worktree to initialize `scan_id` to `1` and `completed_scan_id` to `0`, so that we get the full worktree again on reconnect. --- crates/project/src/worktree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 792e09a00a..0f31303635 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -221,7 +221,7 @@ impl Worktree { root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(), entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 0, + scan_id: 1, completed_scan_id: 0, }, }; @@ -298,7 +298,7 @@ impl Worktree { .collect(), entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 0, + scan_id: 1, completed_scan_id: 0, }; From 42b10044fc3d962f0e7c6f8e0fbc9585b81cef86 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 17:43:05 +0200 Subject: [PATCH 57/60] Fix running client crate tests --- crates/client/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 86a608a00b..560a754bf7 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -45,3 +45,4 @@ collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } rpc = { path = "../rpc", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } +util = { path = "../util", features = ["test-support"] } From 727afae4ff8c8a077e481250a3a651c66024d30d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 11 Apr 2023 10:58:01 -0700 Subject: [PATCH 58/60] Fix unit tests after fixing gpui model drop semantics co-authored-by: Antonio Scandurra --- crates/auto_update/src/auto_update.rs | 4 ++-- crates/client/src/client.rs | 8 +++++--- crates/editor/src/blink_manager.rs | 9 +++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 9075e4df1a..a12a5dd3a9 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -63,10 +63,10 @@ pub fn init(http_client: Arc, server_url: String, cx: &mut AppCo cx.observe_global::(move |updater, cx| { if cx.global::().auto_update { if update_subscription.is_none() { - *(&mut update_subscription) = Some(updater.start_polling(cx)) + update_subscription = Some(updater.start_polling(cx)) } } else { - (&mut update_subscription).take(); + update_subscription.take(); } }) .detach(); diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index ce808cd08d..5a00f27ddf 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -1649,11 +1649,13 @@ mod tests { }, ); drop(subscription1); - let _subscription2 = - client.add_message_handler(model, move |_, _: TypedEnvelope, _, _| { + let _subscription2 = client.add_message_handler( + model.clone(), + move |_, _: TypedEnvelope, _, _| { done_tx2.try_send(()).unwrap(); async { Ok(()) } - }); + }, + ); server.send(proto::Ping {}); done_rx2.next().await.unwrap(); } diff --git a/crates/editor/src/blink_manager.rs b/crates/editor/src/blink_manager.rs index 9651182bd8..409b6f9b03 100644 --- a/crates/editor/src/blink_manager.rs +++ b/crates/editor/src/blink_manager.rs @@ -15,12 +15,9 @@ pub struct BlinkManager { impl BlinkManager { pub fn new(blink_interval: Duration, cx: &mut ModelContext) -> Self { - let weak_handle = cx.weak_handle(); - cx.observe_global::(move |_, cx| { - if let Some(this) = weak_handle.upgrade(cx) { - // Make sure we blink the cursors if the setting is re-enabled - this.update(cx, |this, cx| this.blink_cursors(this.blink_epoch, cx)); - } + cx.observe_global::(move |this, cx| { + // Make sure we blink the cursors if the setting is re-enabled + this.blink_cursors(this.blink_epoch, cx) }) .detach(); From 61d048cb25c02ac18b4ba7f475da4bbe60336192 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 11 Apr 2023 12:37:08 -0700 Subject: [PATCH 59/60] Don't wait for host's reply before broadcasting buffer updates to guests --- crates/collab/src/rpc.rs | 50 ++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index ce5a6a0a1f..16e7577d95 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1610,20 +1610,35 @@ async fn update_buffer( ) -> Result<()> { session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); - let host_connection_id = { + let mut guest_connection_ids; + let mut host_connection_id = None; + { let collaborators = session .db() .await .project_collaborators(project_id, session.connection_id) .await?; + guest_connection_ids = Vec::with_capacity(collaborators.len() - 1); + for collaborator in collaborators.iter() { + if collaborator.is_host { + host_connection_id = Some(collaborator.connection_id); + } else { + guest_connection_ids.push(collaborator.connection_id); + } + } + } + let host_connection_id = host_connection_id.ok_or_else(|| anyhow!("host not found"))?; - let host = collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - host.connection_id - }; - + session.executor.record_backtrace(); + broadcast( + Some(session.connection_id), + guest_connection_ids, + |connection_id| { + session + .peer + .forward_send(session.connection_id, connection_id, request.clone()) + }, + ); if host_connection_id != session.connection_id { session .peer @@ -1631,25 +1646,6 @@ async fn update_buffer( .await?; } - session.executor.record_backtrace(); - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - - broadcast( - Some(session.connection_id), - collaborators - .iter() - .filter(|collaborator| !collaborator.is_host) - .map(|collaborator| collaborator.connection_id), - |connection_id| { - session - .peer - .forward_send(session.connection_id, connection_id, request.clone()) - }, - ); response.send(proto::Ack {})?; Ok(()) } From 12a286ac509830333975d7e70e47c923abfbb2c8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Apr 2023 09:30:34 +0200 Subject: [PATCH 60/60] Forget buffered operations when resyncing with the host Previously, we could end up with a situation where the host did not see an operation but a guest that didn't have that buffer open would. When such guest would finally open the buffer, they would apply the operation without however sending it to the host. The guest wouldn't bother resyncing it because it wasn't part of its open buffers. --- crates/project/src/project.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 90985f8810..9192c7a411 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4725,6 +4725,8 @@ impl Project { } if is_host { + this.opened_buffers + .retain(|_, buffer| !matches!(buffer, OpenBuffer::Operations(_))); this.buffer_changes_tx .unbounded_send(BufferMessage::Resync) .unwrap();