Fix crash in collab when sending worktree updates (#19678)

This pull request does a couple of things:

- In 29c2df73e1, we introduced a safety
guard that prevents this crash from happening again in the future by
returning an error instead of panicking when the payload is too large.
- In 3e7a2e5c30, we introduced chunking
for updates coming from SSH servers (previously, we were sending the
whole changeset and initial set of paths in their entirety).
- In 122b5b4, we introduced a panic hook that sends panics to Axiom.

For posterity, this is how we figured out what the panic was:

```
kubectl logs current-pod-name --previous --namespace=production
```

Release Notes:

- N/A

---------

Co-authored-by: Thorsten <thorsten@zed.dev>
Co-authored-by: Bennet <bennet@zed.dev>
Co-authored-by: Kirill <kirill@zed.dev>
This commit is contained in:
Antonio Scandurra 2024-10-24 15:57:24 +02:00 committed by GitHub
parent b5aea548a8
commit 499e1459eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 57 additions and 28 deletions

View file

@ -272,6 +272,16 @@ impl Database {
update: &proto::UpdateWorktree,
connection: ConnectionId,
) -> Result<TransactionGuard<Vec<ConnectionId>>> {
if update.removed_entries.len() > proto::MAX_WORKTREE_UPDATE_MAX_CHUNK_SIZE
|| update.updated_entries.len() > proto::MAX_WORKTREE_UPDATE_MAX_CHUNK_SIZE
{
return Err(anyhow!(
"invalid worktree update. removed entries: {}, updated entries: {}",
update.removed_entries.len(),
update.updated_entries.len()
))?;
}
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
self.project_transaction(project_id, |tx| async move {

View file

@ -84,6 +84,8 @@ async fn main() -> Result<()> {
let config = envy::from_env::<Config>().expect("error loading config");
init_tracing(&config);
init_panic_hook();
let mut app = Router::new()
.route("/", get(handle_root))
.route("/healthz", get(handle_liveness_probe))
@ -378,3 +380,20 @@ pub fn init_tracing(config: &Config) -> Option<()> {
None
}
fn init_panic_hook() {
std::panic::set_hook(Box::new(move |panic_info| {
let panic_message = match panic_info.payload().downcast_ref::<&'static str>() {
Some(message) => *message,
None => match panic_info.payload().downcast_ref::<String>() {
Some(message) => message.as_str(),
None => "Box<Any>",
},
};
let backtrace = std::backtrace::Backtrace::force_capture();
let location = panic_info
.location()
.map(|loc| format!("{}:{}", loc.file(), loc.line()));
tracing::error!(panic = true, ?location, %panic_message, %backtrace, "Server Panic");
}));
}

View file

@ -1713,11 +1713,6 @@ fn notify_rejoined_projects(
for project in rejoined_projects {
for worktree in mem::take(&mut project.worktrees) {
#[cfg(any(test, feature = "test-support"))]
const MAX_CHUNK_SIZE: usize = 2;
#[cfg(not(any(test, feature = "test-support")))]
const MAX_CHUNK_SIZE: usize = 256;
// Stream this worktree's entries.
let message = proto::UpdateWorktree {
project_id: project.id.to_proto(),
@ -1731,7 +1726,7 @@ fn notify_rejoined_projects(
updated_repositories: worktree.updated_repositories,
removed_repositories: worktree.removed_repositories,
};
for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) {
for update in proto::split_worktree_update(message) {
session.peer.send(session.connection_id, update.clone())?;
}
@ -2195,11 +2190,6 @@ fn join_project_internal(
})?;
for (worktree_id, worktree) in mem::take(&mut project.worktrees) {
#[cfg(any(test, feature = "test-support"))]
const MAX_CHUNK_SIZE: usize = 2;
#[cfg(not(any(test, feature = "test-support")))]
const MAX_CHUNK_SIZE: usize = 256;
// Stream this worktree's entries.
let message = proto::UpdateWorktree {
project_id: project_id.to_proto(),
@ -2213,7 +2203,7 @@ fn join_project_internal(
updated_repositories: worktree.repository_entries.into_values().collect(),
removed_repositories: Default::default(),
};
for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) {
for update in proto::split_worktree_update(message) {
session.peer.send(session.connection_id, update.clone())?;
}