Remove logic for preserving RPC message order between peers

* On the server, spawn a separate task for each incoming message
* In the peer, eliminate the barrier that was used to enforce ordering
  of responses with respect to other incoming messages

Co-Authored-By: Antonio Scandurra <me@as-cii.com>
This commit is contained in:
Max Brunsfeld 2022-02-14 10:51:12 -08:00
parent 90576cf32f
commit fe46b89500
2 changed files with 39 additions and 21 deletions

View file

@ -5,7 +5,7 @@ use futures::stream::BoxStream;
use futures::{FutureExt as _, StreamExt}; use futures::{FutureExt as _, StreamExt};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use postage::{ use postage::{
barrier, mpsc, mpsc,
prelude::{Sink as _, Stream as _}, prelude::{Sink as _, Stream as _},
}; };
use smol_timeout::TimeoutExt as _; use smol_timeout::TimeoutExt as _;
@ -91,8 +91,7 @@ pub struct Peer {
pub struct ConnectionState { pub struct ConnectionState {
outgoing_tx: futures::channel::mpsc::UnboundedSender<proto::Envelope>, outgoing_tx: futures::channel::mpsc::UnboundedSender<proto::Envelope>,
next_message_id: Arc<AtomicU32>, next_message_id: Arc<AtomicU32>,
response_channels: response_channels: Arc<Mutex<Option<HashMap<u32, mpsc::Sender<proto::Envelope>>>>>,
Arc<Mutex<Option<HashMap<u32, mpsc::Sender<(proto::Envelope, barrier::Sender)>>>>>,
} }
const WRITE_TIMEOUT: Duration = Duration::from_secs(10); const WRITE_TIMEOUT: Duration = Duration::from_secs(10);
@ -178,18 +177,12 @@ impl Peer {
if let Some(responding_to) = incoming.responding_to { if let Some(responding_to) = incoming.responding_to {
let channel = response_channels.lock().as_mut()?.remove(&responding_to); let channel = response_channels.lock().as_mut()?.remove(&responding_to);
if let Some(mut tx) = channel { if let Some(mut tx) = channel {
let mut requester_resumed = barrier::channel(); if let Err(error) = tx.send(incoming).await {
if let Err(error) = tx.send((incoming, requester_resumed.0)).await {
log::debug!( log::debug!(
"received RPC but request future was dropped {:?}", "received RPC but request future was dropped {:?}",
error.0 .0 error.0
); );
} }
// Drop response channel before awaiting on the barrier. This allows the
// barrier to get dropped even if the request's future is dropped before it
// has a chance to observe the response.
drop(tx);
requester_resumed.1.recv().await;
} else { } else {
log::warn!("received RPC response to unknown request {}", responding_to); log::warn!("received RPC response to unknown request {}", responding_to);
} }
@ -260,7 +253,7 @@ impl Peer {
}); });
async move { async move {
send?; send?;
let (response, _barrier) = rx let response = rx
.recv() .recv()
.await .await
.ok_or_else(|| anyhow!("connection was closed"))?; .ok_or_else(|| anyhow!("connection was closed"))?;

View file

@ -41,6 +41,12 @@ pub struct Server {
notifications: Option<mpsc::Sender<()>>, notifications: Option<mpsc::Sender<()>>,
} }
pub trait Executor {
fn spawn_detached<F: 'static + Send + Future<Output = ()>>(&self, future: F);
}
pub struct RealExecutor;
const MESSAGE_COUNT_PER_PAGE: usize = 100; const MESSAGE_COUNT_PER_PAGE: usize = 100;
const MAX_MESSAGE_LEN: usize = 1024; const MAX_MESSAGE_LEN: usize = 1024;
@ -144,12 +150,13 @@ impl Server {
}) })
} }
pub fn handle_connection( pub fn handle_connection<E: Executor>(
self: &Arc<Self>, self: &Arc<Self>,
connection: Connection, connection: Connection,
addr: String, addr: String,
user_id: UserId, user_id: UserId,
mut send_connection_id: Option<postage::mpsc::Sender<ConnectionId>>, mut send_connection_id: Option<postage::mpsc::Sender<ConnectionId>>,
executor: E,
) -> impl Future<Output = ()> { ) -> impl Future<Output = ()> {
let mut this = self.clone(); let mut this = self.clone();
async move { async move {
@ -183,12 +190,14 @@ impl Server {
let type_name = message.payload_type_name(); let type_name = message.payload_type_name();
log::info!("rpc message received. connection:{}, type:{}", connection_id, type_name); log::info!("rpc message received. connection:{}, type:{}", connection_id, type_name);
if let Some(handler) = this.handlers.get(&message.payload_type_id()) { if let Some(handler) = this.handlers.get(&message.payload_type_id()) {
if let Err(err) = (handler)(this.clone(), message).await { let handle_message = (handler)(this.clone(), message);
log::error!("rpc message error. connection:{}, type:{}, error:{:?}", connection_id, type_name, err); executor.spawn_detached(async move {
} else { if let Err(err) = handle_message.await {
log::info!("rpc message handled. connection:{}, type:{}, duration:{:?}", connection_id, type_name, start_time.elapsed()); log::error!("rpc message error. connection:{}, type:{}, error:{:?}", connection_id, type_name, err);
} } else {
log::info!("rpc message handled. connection:{}, type:{}, duration:{:?}", connection_id, type_name, start_time.elapsed());
}
});
if let Some(mut notifications) = this.notifications.clone() { if let Some(mut notifications) = this.notifications.clone() {
let _ = notifications.send(()).await; let _ = notifications.send(()).await;
} }
@ -966,6 +975,12 @@ impl Server {
} }
} }
impl Executor for RealExecutor {
fn spawn_detached<F: 'static + Send + Future<Output = ()>>(&self, future: F) {
task::spawn(future);
}
}
fn broadcast<F>( fn broadcast<F>(
sender_id: ConnectionId, sender_id: ConnectionId,
receiver_ids: Vec<ConnectionId>, receiver_ids: Vec<ConnectionId>,
@ -1032,6 +1047,7 @@ pub fn add_routes(app: &mut tide::Server<Arc<AppState>>, rpc: &Arc<Peer>) {
addr, addr,
user_id, user_id,
None, None,
RealExecutor,
) )
.await; .await;
} }
@ -1778,10 +1794,12 @@ mod tests {
let buffer_b = cx_b let buffer_b = cx_b
.background() .background()
.spawn(project_b.update(&mut cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); .spawn(project_b.update(&mut cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)));
task::yield_now().await;
// Edit the buffer as client A while client B is still opening it. // Edit the buffer as client A while client B is still opening it.
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "z", cx)); cx_b.background().simulate_random_delay().await;
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "X", cx));
cx_b.background().simulate_random_delay().await;
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([1..1], "Y", cx));
let text = buffer_a.read_with(&cx_a, |buf, _| buf.text()); let text = buffer_a.read_with(&cx_a, |buf, _| buf.text());
let buffer_b = buffer_b.await.unwrap(); let buffer_b = buffer_b.await.unwrap();
@ -3598,6 +3616,7 @@ mod tests {
client_name, client_name,
user_id, user_id,
Some(connection_id_tx), Some(connection_id_tx),
cx.background(),
)) ))
.detach(); .detach();
Ok(client_conn) Ok(client_conn)
@ -3701,6 +3720,12 @@ mod tests {
} }
} }
impl Executor for Arc<gpui::executor::Background> {
fn spawn_detached<F: 'static + Send + Future<Output = ()>>(&self, future: F) {
self.spawn(future).detach();
}
}
fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> {
channel channel
.messages() .messages()