Make eval more resilient to bad input from LLM (#29703)

I saw a slice panic (for begin > end) in a debug build of the eval. This
should just be a failed assertion, not a panic that takes out the whole
eval run!

Release Notes:

- N/A
This commit is contained in:
Richard Feldman 2025-04-30 18:13:45 -04:00 committed by GitHub
parent 92dd6b67c7
commit afeb3d4fd9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -98,8 +98,17 @@ impl Example for CodeBlockCitations {
if let Some(content_len) = content_len {
// + 1 because there's a newline character after the citation.
let content =
&text[(citation.len() + 1)..content_len - (citation.len() + 1)];
let start_index = citation.len() + 1;
let end_index = content_len.saturating_sub(start_index);
if cx
.assert(
start_index <= end_index,
"Code block had a valid citation",
)
.is_ok()
{
let content = &text[start_index..end_index];
// deindent (trim the start of each line) because sometimes the model
// chooses to deindent its code snippets for the sake of readability,
@ -129,8 +138,8 @@ impl Example for CodeBlockCitations {
if let Some(end_col) = range.end.col {
let last_line = snippet.lines().last().unwrap();
snippet = snippet
[..snippet.len() - last_line.len() + end_col as usize]
snippet = snippet[..snippet.len() - last_line.len()
+ end_col as usize]
.to_string();
}
@ -150,6 +159,7 @@ impl Example for CodeBlockCitations {
}
}
}
}
} else {
cx.assert(
false,