Use Tokio::spawn instead of getting an executor handle (#36701)

This was causing panics due to the handles being dropped out of order.
It doesn't seem possible to guarantee the correct drop ordering given
that we're holding them over await points, so lets just spawn on the
tokio executor itself which gives us access to the state we needed those
handles for in the first place.

Fixes: ZED-1R

Release Notes:

- N/A

Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
Co-authored-by: Marshall Bowers <git@maxdeviant.com>
This commit is contained in:
Julia Ryan 2025-08-21 12:19:57 -05:00 committed by GitHub
parent d166ab95a1
commit 1b2ceae7ef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 38 additions and 18 deletions

1
Cargo.lock generated
View file

@ -7539,6 +7539,7 @@ dependencies = [
name = "gpui_tokio" name = "gpui_tokio"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow",
"gpui", "gpui",
"tokio", "tokio",
"util", "util",

View file

@ -1290,19 +1290,21 @@ impl Client {
"http" => Http, "http" => Http,
_ => Err(anyhow!("invalid rpc url: {}", rpc_url))?, _ => Err(anyhow!("invalid rpc url: {}", rpc_url))?,
}; };
let rpc_host = rpc_url
.host_str()
.zip(rpc_url.port_or_known_default())
.context("missing host in rpc url")?;
let stream = { let stream = gpui_tokio::Tokio::spawn_result(cx, {
let handle = cx.update(|cx| gpui_tokio::Tokio::handle(cx)).ok().unwrap(); let rpc_url = rpc_url.clone();
let _guard = handle.enter(); async move {
match proxy { let rpc_host = rpc_url
Some(proxy) => connect_proxy_stream(&proxy, rpc_host).await?, .host_str()
None => Box::new(TcpStream::connect(rpc_host).await?), .zip(rpc_url.port_or_known_default())
.context("missing host in rpc url")?;
Ok(match proxy {
Some(proxy) => connect_proxy_stream(&proxy, rpc_host).await?,
None => Box::new(TcpStream::connect(rpc_host).await?),
})
} }
}; })?
.await?;
log::info!("connected to rpc endpoint {}", rpc_url); log::info!("connected to rpc endpoint {}", rpc_url);

View file

@ -102,13 +102,7 @@ impl CloudApiClient {
let credentials = credentials.as_ref().context("no credentials provided")?; let credentials = credentials.as_ref().context("no credentials provided")?;
let authorization_header = format!("{} {}", credentials.user_id, credentials.access_token); let authorization_header = format!("{} {}", credentials.user_id, credentials.access_token);
Ok(cx.spawn(async move |cx| { Ok(Tokio::spawn_result(cx, async move {
let handle = cx
.update(|cx| Tokio::handle(cx))
.ok()
.context("failed to get Tokio handle")?;
let _guard = handle.enter();
let ws = WebSocket::connect(connect_url) let ws = WebSocket::connect(connect_url)
.with_request( .with_request(
request::Builder::new() request::Builder::new()

View file

@ -13,6 +13,7 @@ path = "src/gpui_tokio.rs"
doctest = false doctest = false
[dependencies] [dependencies]
anyhow.workspace = true
util.workspace = true util.workspace = true
gpui.workspace = true gpui.workspace = true
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }

View file

@ -52,6 +52,28 @@ impl Tokio {
}) })
} }
/// Spawns the given future on Tokio's thread pool, and returns it via a GPUI task
/// Note that the Tokio task will be cancelled if the GPUI task is dropped
pub fn spawn_result<C, Fut, R>(cx: &C, f: Fut) -> C::Result<Task<anyhow::Result<R>>>
where
C: AppContext,
Fut: Future<Output = anyhow::Result<R>> + Send + 'static,
R: Send + 'static,
{
cx.read_global(|tokio: &GlobalTokio, cx| {
let join_handle = tokio.runtime.spawn(f);
let abort_handle = join_handle.abort_handle();
let cancel = defer(move || {
abort_handle.abort();
});
cx.background_spawn(async move {
let result = join_handle.await?;
drop(cancel);
result
})
})
}
pub fn handle(cx: &App) -> tokio::runtime::Handle { pub fn handle(cx: &App) -> tokio::runtime::Handle {
GlobalTokio::global(cx).runtime.handle().clone() GlobalTokio::global(cx).runtime.handle().clone()
} }