Use anyhow
more idiomatically (#31052)
https://github.com/zed-industries/zed/issues/30972 brought up another case where our context is not enough to track the actual source of the issue: we get a general top-level error without inner error. The reason for this was `.ok_or_else(|| anyhow!("failed to read HEAD SHA"))?; ` on the top level. The PR finally reworks the way we use anyhow to reduce such issues (or at least make it simpler to bubble them up later in a fix). On top of that, uses a few more anyhow methods for better readability. * `.ok_or_else(|| anyhow!("..."))`, `map_err` and other similar error conversion/option reporting cases are replaced with `context` and `with_context` calls * in addition to that, various `anyhow!("failed to do ...")` are stripped with `.context("Doing ...")` messages instead to remove the parasitic `failed to` text * `anyhow::ensure!` is used instead of `if ... { return Err(...); }` calls * `anyhow::bail!` is used instead of `return Err(anyhow!(...));` Release Notes: - N/A
This commit is contained in:
parent
1e51a7ac44
commit
16366cf9f2
294 changed files with 2037 additions and 2610 deletions
|
@ -403,7 +403,7 @@ impl AnthropicModel {
|
|||
};
|
||||
|
||||
async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing Anthropic API Key"))?;
|
||||
let api_key = api_key.context("Missing Anthropic API Key")?;
|
||||
let request =
|
||||
anthropic::stream_completion(http_client.as_ref(), &api_url, &api_key, request);
|
||||
request.await.context("failed to stream completion")
|
||||
|
|
|
@ -365,10 +365,10 @@ struct BedrockModel {
|
|||
}
|
||||
|
||||
impl BedrockModel {
|
||||
fn get_or_init_client(&self, cx: &AsyncApp) -> Result<&BedrockClient, anyhow::Error> {
|
||||
fn get_or_init_client(&self, cx: &AsyncApp) -> anyhow::Result<&BedrockClient> {
|
||||
self.client
|
||||
.get_or_try_init_blocking(|| {
|
||||
let Ok((auth_method, credentials, endpoint, region, settings)) =
|
||||
let (auth_method, credentials, endpoint, region, settings) =
|
||||
cx.read_entity(&self.state, |state, _cx| {
|
||||
let auth_method = state
|
||||
.settings
|
||||
|
@ -390,10 +390,7 @@ impl BedrockModel {
|
|||
region,
|
||||
state.settings.clone(),
|
||||
)
|
||||
})
|
||||
else {
|
||||
return Err(anyhow!("App state dropped"));
|
||||
};
|
||||
})?;
|
||||
|
||||
let mut config_builder = aws_config::defaults(BehaviorVersion::latest())
|
||||
.stalled_stream_protection(StalledStreamProtectionConfig::disabled())
|
||||
|
@ -438,13 +435,11 @@ impl BedrockModel {
|
|||
}
|
||||
|
||||
let config = self.handler.block_on(config_builder.load());
|
||||
Ok(BedrockClient::new(&config))
|
||||
anyhow::Ok(BedrockClient::new(&config))
|
||||
})
|
||||
.map_err(|err| anyhow!("Failed to initialize Bedrock client: {err}"))?;
|
||||
.context("initializing Bedrock client")?;
|
||||
|
||||
self.client
|
||||
.get()
|
||||
.ok_or_else(|| anyhow!("Bedrock client not initialized"))
|
||||
self.client.get().context("Bedrock client not initialized")
|
||||
}
|
||||
|
||||
fn stream_completion(
|
||||
|
@ -544,7 +539,10 @@ impl LanguageModel for BedrockModel {
|
|||
|
||||
region
|
||||
}) else {
|
||||
return async move { Err(anyhow!("App State Dropped")) }.boxed();
|
||||
return async move {
|
||||
anyhow::bail!("App State Dropped");
|
||||
}
|
||||
.boxed();
|
||||
};
|
||||
|
||||
let model_id = match self.model.cross_region_inference_id(®ion) {
|
||||
|
@ -720,7 +718,7 @@ pub fn into_bedrock(
|
|||
BedrockToolChoice::Any(BedrockAnyToolChoice::builder().build())
|
||||
}
|
||||
Some(LanguageModelToolChoice::None) => {
|
||||
return Err(anyhow!("LanguageModelToolChoice::None is not supported"));
|
||||
anyhow::bail!("LanguageModelToolChoice::None is not supported");
|
||||
}
|
||||
};
|
||||
let tool_config: BedrockToolConfig = BedrockToolConfig::builder()
|
||||
|
|
|
@ -615,7 +615,7 @@ impl CloudLanguageModel {
|
|||
}
|
||||
}
|
||||
|
||||
return Err(anyhow!("Forbidden"));
|
||||
anyhow::bail!("Forbidden");
|
||||
} else if status.as_u16() >= 500 && status.as_u16() < 600 {
|
||||
// If we encounter an error in the 500 range, retry after a delay.
|
||||
// We've seen at least these in the wild from API providers:
|
||||
|
@ -626,10 +626,10 @@ impl CloudLanguageModel {
|
|||
if retries_remaining == 0 {
|
||||
let mut body = String::new();
|
||||
response.body_mut().read_to_string(&mut body).await?;
|
||||
return Err(anyhow!(
|
||||
anyhow::bail!(
|
||||
"cloud language model completion failed after {} retries with status {status}: {body}",
|
||||
Self::MAX_RETRIES
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
Timer::after(retry_delay).await;
|
||||
|
|
|
@ -251,7 +251,7 @@ impl DeepSeekLanguageModel {
|
|||
};
|
||||
|
||||
let future = self.request_limiter.stream(async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing DeepSeek API Key"))?;
|
||||
let api_key = api_key.context("Missing DeepSeek API Key")?;
|
||||
let request =
|
||||
deepseek::stream_completion(http_client.as_ref(), &api_url, &api_key, request);
|
||||
let response = request.await?;
|
||||
|
@ -355,7 +355,7 @@ impl LanguageModel for DeepSeekLanguageModel {
|
|||
response
|
||||
.choices
|
||||
.first()
|
||||
.ok_or_else(|| anyhow!("Empty response"))
|
||||
.context("Empty response")
|
||||
.map(|choice| {
|
||||
choice
|
||||
.delta
|
||||
|
|
|
@ -279,7 +279,7 @@ impl GoogleLanguageModel {
|
|||
};
|
||||
|
||||
async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing Google API key"))?;
|
||||
let api_key = api_key.context("Missing Google API key")?;
|
||||
let request = google_ai::stream_generate_content(
|
||||
http_client.as_ref(),
|
||||
&api_url,
|
||||
|
@ -351,7 +351,7 @@ impl LanguageModel for GoogleLanguageModel {
|
|||
let api_url = settings.api_url.clone();
|
||||
|
||||
async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing Google API key"))?;
|
||||
let api_key = api_key.context("Missing Google API key")?;
|
||||
let response = google_ai::count_tokens(
|
||||
http_client.as_ref(),
|
||||
&api_url,
|
||||
|
|
|
@ -277,7 +277,7 @@ impl MistralLanguageModel {
|
|||
};
|
||||
|
||||
let future = self.request_limiter.stream(async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing Mistral API Key"))?;
|
||||
let api_key = api_key.context("Missing Mistral API Key")?;
|
||||
let request =
|
||||
mistral::stream_completion(http_client.as_ref(), &api_url, &api_key, request);
|
||||
let response = request.await?;
|
||||
|
|
|
@ -265,7 +265,7 @@ impl OpenAiLanguageModel {
|
|||
};
|
||||
|
||||
let future = self.request_limiter.stream(async move {
|
||||
let api_key = api_key.ok_or_else(|| anyhow!("Missing OpenAI API Key"))?;
|
||||
let api_key = api_key.context("Missing OpenAI API Key")?;
|
||||
let request = stream_completion(http_client.as_ref(), &api_url, &api_key, request);
|
||||
let response = request.await?;
|
||||
Ok(response)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue