Make block_with_timeout more robust (#11670)

The previous implementation relied on a background thread to wake up the
main thread,
which was prone to priority inversion under heavy load.

In a synthetic test, where we spawn 200 git processes while doing a 5ms
timeout, the old version blocked for 5-80ms, the new version blocks for
5.1-5.4ms.

Release Notes:

- Improved responsiveness of the main thread under high system load
This commit is contained in:
Conrad Irwin 2024-05-10 13:10:02 -06:00 committed by GitHub
parent b34ab6f3a1
commit c73d6502d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 160 additions and 135 deletions

View file

@ -1,5 +1,5 @@
use crate::{AppContext, PlatformDispatcher};
use futures::{channel::mpsc, pin_mut, FutureExt};
use futures::channel::mpsc;
use smol::prelude::*;
use std::{
fmt::Debug,
@ -9,7 +9,7 @@ use std::{
pin::Pin,
rc::Rc,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
atomic::{AtomicUsize, Ordering::SeqCst},
Arc,
},
task::{Context, Poll},
@ -164,7 +164,7 @@ impl BackgroundExecutor {
#[cfg(any(test, feature = "test-support"))]
#[track_caller]
pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
if let Ok(value) = self.block_internal(false, future, usize::MAX) {
if let Ok(value) = self.block_internal(false, future, None) {
value
} else {
unreachable!()
@ -174,24 +174,75 @@ impl BackgroundExecutor {
/// Block the current thread until the given future resolves.
/// Consider using `block_with_timeout` instead.
pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
if let Ok(value) = self.block_internal(true, future, usize::MAX) {
if let Ok(value) = self.block_internal(true, future, None) {
value
} else {
unreachable!()
}
}
#[cfg(not(any(test, feature = "test-support")))]
pub(crate) fn block_internal<R>(
&self,
_background_only: bool,
future: impl Future<Output = R>,
timeout: Option<Duration>,
) -> Result<R, impl Future<Output = R>> {
use std::time::Instant;
let mut future = Box::pin(future);
if timeout == Some(Duration::ZERO) {
return Err(future);
}
let deadline = timeout.map(|timeout| Instant::now() + timeout);
let unparker = self.dispatcher.unparker();
let waker = waker_fn(move || {
unparker.unpark();
});
let mut cx = std::task::Context::from_waker(&waker);
loop {
match future.as_mut().poll(&mut cx) {
Poll::Ready(result) => return Ok(result),
Poll::Pending => {
let timeout =
deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
if !self.dispatcher.park(timeout) {
if deadline.is_some_and(|deadline| deadline < Instant::now()) {
return Err(future);
}
}
}
}
}
}
#[cfg(any(test, feature = "test-support"))]
#[track_caller]
pub(crate) fn block_internal<R>(
&self,
background_only: bool,
future: impl Future<Output = R>,
mut max_ticks: usize,
) -> Result<R, ()> {
pin_mut!(future);
timeout: Option<Duration>,
) -> Result<R, impl Future<Output = R>> {
use std::sync::atomic::AtomicBool;
let mut future = Box::pin(future);
if timeout == Some(Duration::ZERO) {
return Err(future);
}
let Some(dispatcher) = self.dispatcher.as_test() else {
return Err(future);
};
let mut max_ticks = if timeout.is_some() {
dispatcher.gen_block_on_ticks()
} else {
usize::MAX
};
let unparker = self.dispatcher.unparker();
let awoken = Arc::new(AtomicBool::new(false));
let waker = waker_fn({
let awoken = awoken.clone();
move || {
@ -206,34 +257,30 @@ impl BackgroundExecutor {
Poll::Ready(result) => return Ok(result),
Poll::Pending => {
if max_ticks == 0 {
return Err(());
return Err(future);
}
max_ticks -= 1;
if !self.dispatcher.tick(background_only) {
if !dispatcher.tick(background_only) {
if awoken.swap(false, SeqCst) {
continue;
}
#[cfg(any(test, feature = "test-support"))]
if let Some(test) = self.dispatcher.as_test() {
if !test.parking_allowed() {
let mut backtrace_message = String::new();
let mut waiting_message = String::new();
if let Some(backtrace) = test.waiting_backtrace() {
backtrace_message =
format!("\nbacktrace of waiting future:\n{:?}", backtrace);
}
if let Some(waiting_hint) = test.waiting_hint() {
waiting_message = format!("\n waiting on: {}\n", waiting_hint);
}
panic!(
if !dispatcher.parking_allowed() {
let mut backtrace_message = String::new();
let mut waiting_message = String::new();
if let Some(backtrace) = dispatcher.waiting_backtrace() {
backtrace_message =
format!("\nbacktrace of waiting future:\n{:?}", backtrace);
}
if let Some(waiting_hint) = dispatcher.waiting_hint() {
waiting_message = format!("\n waiting on: {}\n", waiting_hint);
}
panic!(
"parked with nothing left to run{waiting_message}{backtrace_message}",
)
}
}
self.dispatcher.park();
self.dispatcher.park(None);
}
}
}
@ -247,31 +294,7 @@ impl BackgroundExecutor {
duration: Duration,
future: impl Future<Output = R>,
) -> Result<R, impl Future<Output = R>> {
let mut future = Box::pin(future.fuse());
if duration.is_zero() {
return Err(future);
}
#[cfg(any(test, feature = "test-support"))]
let max_ticks = self
.dispatcher
.as_test()
.map_or(usize::MAX, |dispatcher| dispatcher.gen_block_on_ticks());
#[cfg(not(any(test, feature = "test-support")))]
let max_ticks = usize::MAX;
let mut timer = self.timer(duration).fuse();
let timeout = async {
futures::select_biased! {
value = future => Ok(value),
_ = timer => Err(()),
}
};
match self.block_internal(true, timeout, max_ticks) {
Ok(Ok(value)) => Ok(value),
_ => Err(future),
}
self.block_internal(true, future, Some(duration))
}
/// Scoped lets you start a number of tasks and waits