Reopen windows concurrently (#33784)
Closes #21130 Release Notes: - Now when Zed reopens windows, they all reopen concurrently instead of one after another.
This commit is contained in:
parent
5f70a9cf59
commit
64c413b9b6
1 changed files with 96 additions and 26 deletions
|
@ -44,7 +44,10 @@ use theme::{
|
||||||
use util::{ConnectionResult, ResultExt, TryFutureExt, maybe};
|
use util::{ConnectionResult, ResultExt, TryFutureExt, maybe};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use welcome::{BaseKeymap, FIRST_OPEN, show_welcome_view};
|
use welcome::{BaseKeymap, FIRST_OPEN, show_welcome_view};
|
||||||
use workspace::{AppState, SerializedWorkspaceLocation, WorkspaceSettings, WorkspaceStore};
|
use workspace::{
|
||||||
|
AppState, SerializedWorkspaceLocation, Toast, Workspace, WorkspaceSettings, WorkspaceStore,
|
||||||
|
notifications::NotificationId,
|
||||||
|
};
|
||||||
use zed::{
|
use zed::{
|
||||||
OpenListener, OpenRequest, RawOpenRequest, app_menus, build_window_options,
|
OpenListener, OpenRequest, RawOpenRequest, app_menus, build_window_options,
|
||||||
derive_paths_with_position, handle_cli_connection, handle_keymap_file_changes,
|
derive_paths_with_position, handle_cli_connection, handle_keymap_file_changes,
|
||||||
|
@ -887,40 +890,107 @@ async fn installation_id() -> Result<IdType> {
|
||||||
|
|
||||||
async fn restore_or_create_workspace(app_state: Arc<AppState>, cx: &mut AsyncApp) -> Result<()> {
|
async fn restore_or_create_workspace(app_state: Arc<AppState>, cx: &mut AsyncApp) -> Result<()> {
|
||||||
if let Some(locations) = restorable_workspace_locations(cx, &app_state).await {
|
if let Some(locations) = restorable_workspace_locations(cx, &app_state).await {
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
|
||||||
for location in locations {
|
for location in locations {
|
||||||
match location {
|
match location {
|
||||||
SerializedWorkspaceLocation::Local(location, _) => {
|
SerializedWorkspaceLocation::Local(location, _) => {
|
||||||
let task = cx.update(|cx| {
|
let app_state = app_state.clone();
|
||||||
workspace::open_paths(
|
let paths = location.paths().to_vec();
|
||||||
location.paths().as_ref(),
|
let task = cx.spawn(async move |cx| {
|
||||||
app_state.clone(),
|
let open_task = cx.update(|cx| {
|
||||||
workspace::OpenOptions::default(),
|
workspace::open_paths(
|
||||||
cx,
|
&paths,
|
||||||
)
|
app_state,
|
||||||
})?;
|
workspace::OpenOptions::default(),
|
||||||
task.await?;
|
cx,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
open_task.await.map(|_| ())
|
||||||
|
});
|
||||||
|
tasks.push(task);
|
||||||
}
|
}
|
||||||
SerializedWorkspaceLocation::Ssh(ssh) => {
|
SerializedWorkspaceLocation::Ssh(ssh) => {
|
||||||
let connection_options = cx.update(|cx| {
|
|
||||||
SshSettings::get_global(cx)
|
|
||||||
.connection_options_for(ssh.host, ssh.port, ssh.user)
|
|
||||||
})?;
|
|
||||||
let app_state = app_state.clone();
|
let app_state = app_state.clone();
|
||||||
cx.spawn(async move |cx| {
|
let ssh_host = ssh.host.clone();
|
||||||
recent_projects::open_ssh_project(
|
let task = cx.spawn(async move |cx| {
|
||||||
connection_options,
|
let connection_options = cx.update(|cx| {
|
||||||
ssh.paths.into_iter().map(PathBuf::from).collect(),
|
SshSettings::get_global(cx)
|
||||||
app_state,
|
.connection_options_for(ssh.host, ssh.port, ssh.user)
|
||||||
workspace::OpenOptions::default(),
|
});
|
||||||
cx,
|
|
||||||
)
|
match connection_options {
|
||||||
.await
|
Ok(connection_options) => recent_projects::open_ssh_project(
|
||||||
.log_err();
|
connection_options,
|
||||||
})
|
ssh.paths.into_iter().map(PathBuf::from).collect(),
|
||||||
.detach();
|
app_state,
|
||||||
|
workspace::OpenOptions::default(),
|
||||||
|
cx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!(e)),
|
||||||
|
Err(e) => Err(anyhow::anyhow!(
|
||||||
|
"Failed to get SSH connection options for {}: {}",
|
||||||
|
ssh_host,
|
||||||
|
e
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
tasks.push(task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for all workspaces to open concurrently
|
||||||
|
let results = future::join_all(tasks).await;
|
||||||
|
|
||||||
|
// Show notifications for any errors that occurred
|
||||||
|
let mut error_count = 0;
|
||||||
|
for result in results {
|
||||||
|
if let Err(e) = result {
|
||||||
|
log::error!("Failed to restore workspace: {}", e);
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if error_count > 0 {
|
||||||
|
let message = if error_count == 1 {
|
||||||
|
"Failed to restore 1 workspace. Check logs for details.".to_string()
|
||||||
|
} else {
|
||||||
|
format!(
|
||||||
|
"Failed to restore {} workspaces. Check logs for details.",
|
||||||
|
error_count
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Try to find an active workspace to show the toast
|
||||||
|
let toast_shown = cx
|
||||||
|
.update(|cx| {
|
||||||
|
if let Some(window) = cx.active_window() {
|
||||||
|
if let Some(workspace) = window.downcast::<Workspace>() {
|
||||||
|
workspace
|
||||||
|
.update(cx, |workspace, _, cx| {
|
||||||
|
workspace.show_toast(
|
||||||
|
Toast::new(NotificationId::unique::<()>(), message),
|
||||||
|
cx,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
})
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
// If we couldn't show a toast (no windows opened successfully),
|
||||||
|
// we've already logged the errors above, so the user can check logs
|
||||||
|
if !toast_shown {
|
||||||
|
log::error!(
|
||||||
|
"Failed to show notification for window restoration errors, because no workspace windows were available."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if matches!(KEY_VALUE_STORE.read_kvp(FIRST_OPEN), Ok(None)) {
|
} else if matches!(KEY_VALUE_STORE.read_kvp(FIRST_OPEN), Ok(None)) {
|
||||||
cx.update(|cx| show_welcome_view(app_state, cx))?.await?;
|
cx.update(|cx| show_welcome_view(app_state, cx))?.await?;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue