More resilient eval (#32257)

Bubbles up rate limit information so that we can retry after a certain
duration if needed higher up in the stack.

Also caps the number of concurrent evals running at once to also help.

Release Notes:

- N/A
This commit is contained in:
Ben Brandt 2025-06-09 20:07:22 +02:00 committed by GitHub
parent fa54fa80d0
commit e4bd115a63
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 147 additions and 56 deletions

View file

@ -387,22 +387,34 @@ impl AnthropicModel {
&self,
request: anthropic::Request,
cx: &AsyncApp,
) -> BoxFuture<'static, Result<BoxStream<'static, Result<anthropic::Event, AnthropicError>>>>
{
) -> BoxFuture<
'static,
Result<
BoxStream<'static, Result<anthropic::Event, AnthropicError>>,
LanguageModelCompletionError,
>,
> {
let http_client = self.http_client.clone();
let Ok((api_key, api_url)) = cx.read_entity(&self.state, |state, cx| {
let settings = &AllLanguageModelSettings::get_global(cx).anthropic;
(state.api_key.clone(), settings.api_url.clone())
}) else {
return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
};
async move {
let api_key = api_key.context("Missing Anthropic API Key")?;
let request =
anthropic::stream_completion(http_client.as_ref(), &api_url, &api_key, request);
request.await.context("failed to stream completion")
request.await.map_err(|err| match err {
AnthropicError::RateLimit(duration) => {
LanguageModelCompletionError::RateLimit(duration)
}
err @ (AnthropicError::ApiError(..) | AnthropicError::Other(..)) => {
LanguageModelCompletionError::Other(anthropic_err_to_anyhow(err))
}
})
}
.boxed()
}
@ -473,6 +485,7 @@ impl LanguageModel for AnthropicModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let request = into_anthropic(
@ -484,12 +497,7 @@ impl LanguageModel for AnthropicModel {
);
let request = self.stream_completion(request, cx);
let future = self.request_limiter.stream(async move {
let response = request
.await
.map_err(|err| match err.downcast::<AnthropicError>() {
Ok(anthropic_err) => anthropic_err_to_anyhow(anthropic_err),
Err(err) => anyhow!(err),
})?;
let response = request.await?;
Ok(AnthropicEventMapper::new().map_stream(response))
});
async move { Ok(future.await?.boxed()) }.boxed()

View file

@ -527,6 +527,7 @@ impl LanguageModel for BedrockModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let Ok(region) = cx.read_entity(&self.state, |state, _cx| {
@ -539,16 +540,13 @@ impl LanguageModel for BedrockModel {
.or(settings_region)
.unwrap_or(String::from("us-east-1"))
}) else {
return async move {
anyhow::bail!("App State Dropped");
}
.boxed();
return async move { Err(anyhow::anyhow!("App State Dropped").into()) }.boxed();
};
let model_id = match self.model.cross_region_inference_id(&region) {
Ok(s) => s,
Err(e) => {
return async move { Err(e) }.boxed();
return async move { Err(e.into()) }.boxed();
}
};
@ -560,7 +558,7 @@ impl LanguageModel for BedrockModel {
self.model.mode(),
) {
Ok(request) => request,
Err(err) => return futures::future::ready(Err(err)).boxed(),
Err(err) => return futures::future::ready(Err(err.into())).boxed(),
};
let owned_handle = self.handler.clone();

View file

@ -807,6 +807,7 @@ impl LanguageModel for CloudLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let thread_id = request.thread_id.clone();
@ -848,7 +849,8 @@ impl LanguageModel for CloudLanguageModel {
mode,
provider: zed_llm_client::LanguageModelProvider::Anthropic,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)?,
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
)
.await
@ -884,7 +886,7 @@ impl LanguageModel for CloudLanguageModel {
let client = self.client.clone();
let model = match open_ai::Model::from_id(&self.model.id.0) {
Ok(model) => model,
Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(),
};
let request = into_open_ai(request, &model, None);
let llm_api_token = self.llm_api_token.clone();
@ -905,7 +907,8 @@ impl LanguageModel for CloudLanguageModel {
mode,
provider: zed_llm_client::LanguageModelProvider::OpenAi,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)?,
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
)
.await?;
@ -944,7 +947,8 @@ impl LanguageModel for CloudLanguageModel {
mode,
provider: zed_llm_client::LanguageModelProvider::Google,
model: request.model.model_id.clone(),
provider_request: serde_json::to_value(&request)?,
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
)
.await?;

View file

@ -265,13 +265,15 @@ impl LanguageModel for CopilotChatLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
if let Some(message) = request.messages.last() {
if message.contents_empty() {
const EMPTY_PROMPT_MSG: &str =
"Empty prompts aren't allowed. Please provide a non-empty prompt.";
return futures::future::ready(Err(anyhow::anyhow!(EMPTY_PROMPT_MSG))).boxed();
return futures::future::ready(Err(anyhow::anyhow!(EMPTY_PROMPT_MSG).into()))
.boxed();
}
// Copilot Chat has a restriction that the final message must be from the user.
@ -279,13 +281,13 @@ impl LanguageModel for CopilotChatLanguageModel {
// and provide a more helpful error message.
if !matches!(message.role, Role::User) {
const USER_ROLE_MSG: &str = "The final message must be from the user. To provide a system prompt, you must provide the system prompt followed by a user prompt.";
return futures::future::ready(Err(anyhow::anyhow!(USER_ROLE_MSG))).boxed();
return futures::future::ready(Err(anyhow::anyhow!(USER_ROLE_MSG).into())).boxed();
}
}
let copilot_request = match into_copilot_chat(&self.model, request) {
Ok(request) => request,
Err(err) => return futures::future::ready(Err(err)).boxed(),
Err(err) => return futures::future::ready(Err(err.into())).boxed(),
};
let is_streaming = copilot_request.stream;

View file

@ -348,6 +348,7 @@ impl LanguageModel for DeepSeekLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let request = into_deepseek(request, &self.model, self.max_output_tokens());

View file

@ -409,6 +409,7 @@ impl LanguageModel for GoogleLanguageModel {
'static,
Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
>,
LanguageModelCompletionError,
>,
> {
let request = into_google(

View file

@ -420,6 +420,7 @@ impl LanguageModel for LmStudioLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let request = self.to_lmstudio_request(request);

View file

@ -364,6 +364,7 @@ impl LanguageModel for MistralLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let request = into_mistral(

View file

@ -406,6 +406,7 @@ impl LanguageModel for OllamaLanguageModel {
'static,
Result<
BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
LanguageModelCompletionError,
>,
> {
let request = self.to_ollama_request(request);
@ -415,7 +416,7 @@ impl LanguageModel for OllamaLanguageModel {
let settings = &AllLanguageModelSettings::get_global(cx).ollama;
settings.api_url.clone()
}) else {
return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
return futures::future::ready(Err(anyhow!("App state dropped").into())).boxed();
};
let future = self.request_limiter.stream(async move {

View file

@ -339,6 +339,7 @@ impl LanguageModel for OpenAiLanguageModel {
'static,
Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
>,
LanguageModelCompletionError,
>,
> {
let request = into_open_ai(request, &self.model, self.max_output_tokens());

View file

@ -367,6 +367,7 @@ impl LanguageModel for OpenRouterLanguageModel {
'static,
Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
>,
LanguageModelCompletionError,
>,
> {
let request = into_open_router(request, &self.model, self.max_output_tokens());