{
start: Point,
current: Point
,
contour_count: usize,
- base_scale: f32,
}
impl Path {
@@ -702,35 +707,25 @@ impl Path {
content_mask: Default::default(),
color: Default::default(),
contour_count: 0,
- base_scale: 1.0,
}
}
- /// Set the base scale of the path.
- pub fn scale(mut self, factor: f32) -> Self {
- self.base_scale = factor;
- self
- }
-
- /// Apply a scale to the path.
- pub(crate) fn apply_scale(&self, factor: f32) -> Path {
+ /// Scale this path by the given factor.
+ pub fn scale(&self, factor: f32) -> Path {
Path {
id: self.id,
order: self.order,
- bounds: self.bounds.scale(self.base_scale * factor),
- content_mask: self.content_mask.scale(self.base_scale * factor),
+ bounds: self.bounds.scale(factor),
+ content_mask: self.content_mask.scale(factor),
vertices: self
.vertices
.iter()
- .map(|vertex| vertex.scale(self.base_scale * factor))
+ .map(|vertex| vertex.scale(factor))
.collect(),
- start: self
- .start
- .map(|start| start.scale(self.base_scale * factor)),
- current: self.current.scale(self.base_scale * factor),
+ start: self.start.map(|start| start.scale(factor)),
+ current: self.current.scale(factor),
contour_count: self.contour_count,
color: self.color,
- base_scale: 1.0,
}
}
@@ -745,7 +740,10 @@ impl Path {
pub fn line_to(&mut self, to: Point) {
self.contour_count += 1;
if self.contour_count > 1 {
- self.push_triangle((self.start, self.current, to));
+ self.push_triangle(
+ (self.start, self.current, to),
+ (point(0., 1.), point(0., 1.), point(0., 1.)),
+ );
}
self.current = to;
}
@@ -754,15 +752,25 @@ impl Path {
pub fn curve_to(&mut self, to: Point, ctrl: Point) {
self.contour_count += 1;
if self.contour_count > 1 {
- self.push_triangle((self.start, self.current, to));
+ self.push_triangle(
+ (self.start, self.current, to),
+ (point(0., 1.), point(0., 1.), point(0., 1.)),
+ );
}
- self.push_triangle((self.current, ctrl, to));
+ self.push_triangle(
+ (self.current, ctrl, to),
+ (point(0., 0.), point(0.5, 0.), point(1., 1.)),
+ );
self.current = to;
}
/// Push a triangle to the Path.
- pub fn push_triangle(&mut self, xy: (Point, Point, Point)) {
+ pub fn push_triangle(
+ &mut self,
+ xy: (Point, Point, Point),
+ st: (Point, Point, Point),
+ ) {
self.bounds = self
.bounds
.union(&Bounds {
@@ -780,14 +788,17 @@ impl Path {
self.vertices.push(PathVertex {
xy_position: xy.0,
+ st_position: st.0,
content_mask: Default::default(),
});
self.vertices.push(PathVertex {
xy_position: xy.1,
+ st_position: st.1,
content_mask: Default::default(),
});
self.vertices.push(PathVertex {
xy_position: xy.2,
+ st_position: st.2,
content_mask: Default::default(),
});
}
@@ -803,6 +814,7 @@ impl From> for Primitive {
#[repr(C)]
pub(crate) struct PathVertex {
pub(crate) xy_position: Point,
+ pub(crate) st_position: Point,
pub(crate) content_mask: ContentMask,
}
@@ -810,6 +822,7 @@ impl PathVertex {
pub fn scale(&self, factor: f32) -> PathVertex {
PathVertex {
xy_position: self.xy_position.scale(factor),
+ st_position: self.st_position,
content_mask: self.content_mask.scale(factor),
}
}
diff --git a/crates/gpui/src/window.rs b/crates/gpui/src/window.rs
index 8c01b8afcf..be3b753d6a 100644
--- a/crates/gpui/src/window.rs
+++ b/crates/gpui/src/window.rs
@@ -2633,7 +2633,7 @@ impl Window {
path.color = color.opacity(opacity);
self.next_frame
.scene
- .insert_primitive(path.apply_scale(scale_factor));
+ .insert_primitive(path.scale(scale_factor));
}
/// Paint an underline into the scene for the next frame at the current z-index.
diff --git a/crates/icons/src/icons.rs b/crates/icons/src/icons.rs
index 332e38b038..332a8d5791 100644
--- a/crates/icons/src/icons.rs
+++ b/crates/icons/src/icons.rs
@@ -20,6 +20,7 @@ pub enum IconName {
AiOpenAi,
AiOpenRouter,
AiVZero,
+ AiXAi,
AiZed,
ArrowCircle,
ArrowDown,
diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs
index 81a0f7d8a1..6bd33fcdf5 100644
--- a/crates/language_model/src/language_model.rs
+++ b/crates/language_model/src/language_model.rs
@@ -116,6 +116,12 @@ pub enum LanguageModelCompletionError {
provider: LanguageModelProviderName,
message: String,
},
+ #[error("{message}")]
+ UpstreamProviderError {
+ message: String,
+ status: StatusCode,
+ retry_after: Option,
+ },
#[error("HTTP response error from {provider}'s API: status {status_code} - {message:?}")]
HttpResponseError {
provider: LanguageModelProviderName,
@@ -178,6 +184,21 @@ pub enum LanguageModelCompletionError {
}
impl LanguageModelCompletionError {
+ fn parse_upstream_error_json(message: &str) -> Option<(StatusCode, String)> {
+ let error_json = serde_json::from_str::(message).ok()?;
+ let upstream_status = error_json
+ .get("upstream_status")
+ .and_then(|v| v.as_u64())
+ .and_then(|status| u16::try_from(status).ok())
+ .and_then(|status| StatusCode::from_u16(status).ok())?;
+ let inner_message = error_json
+ .get("message")
+ .and_then(|v| v.as_str())
+ .unwrap_or(message)
+ .to_string();
+ Some((upstream_status, inner_message))
+ }
+
pub fn from_cloud_failure(
upstream_provider: LanguageModelProviderName,
code: String,
@@ -191,6 +212,18 @@ impl LanguageModelCompletionError {
Self::PromptTooLarge {
tokens: Some(tokens),
}
+ } else if code == "upstream_http_error" {
+ if let Some((upstream_status, inner_message)) =
+ Self::parse_upstream_error_json(&message)
+ {
+ return Self::from_http_status(
+ upstream_provider,
+ upstream_status,
+ inner_message,
+ retry_after,
+ );
+ }
+ anyhow!("completion request failed, code: {code}, message: {message}").into()
} else if let Some(status_code) = code
.strip_prefix("upstream_http_")
.and_then(|code| StatusCode::from_str(code).ok())
@@ -701,3 +734,104 @@ impl From for LanguageModelProviderName {
Self(SharedString::from(value))
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_from_cloud_failure_with_upstream_http_error() {
+ let error = LanguageModelCompletionError::from_cloud_failure(
+ String::from("anthropic").into(),
+ "upstream_http_error".to_string(),
+ r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
+ None,
+ );
+
+ match error {
+ LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
+ assert_eq!(provider.0, "anthropic");
+ }
+ _ => panic!(
+ "Expected ServerOverloaded error for 503 status, got: {:?}",
+ error
+ ),
+ }
+
+ let error = LanguageModelCompletionError::from_cloud_failure(
+ String::from("anthropic").into(),
+ "upstream_http_error".to_string(),
+ r#"{"code":"upstream_http_error","message":"Internal server error","upstream_status":500}"#.to_string(),
+ None,
+ );
+
+ match error {
+ LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
+ assert_eq!(provider.0, "anthropic");
+ assert_eq!(message, "Internal server error");
+ }
+ _ => panic!(
+ "Expected ApiInternalServerError for 500 status, got: {:?}",
+ error
+ ),
+ }
+ }
+
+ #[test]
+ fn test_from_cloud_failure_with_standard_format() {
+ let error = LanguageModelCompletionError::from_cloud_failure(
+ String::from("anthropic").into(),
+ "upstream_http_503".to_string(),
+ "Service unavailable".to_string(),
+ None,
+ );
+
+ match error {
+ LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
+ assert_eq!(provider.0, "anthropic");
+ }
+ _ => panic!("Expected ServerOverloaded error for upstream_http_503"),
+ }
+ }
+
+ #[test]
+ fn test_upstream_http_error_connection_timeout() {
+ let error = LanguageModelCompletionError::from_cloud_failure(
+ String::from("anthropic").into(),
+ "upstream_http_error".to_string(),
+ r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":503}"#.to_string(),
+ None,
+ );
+
+ match error {
+ LanguageModelCompletionError::ServerOverloaded { provider, .. } => {
+ assert_eq!(provider.0, "anthropic");
+ }
+ _ => panic!(
+ "Expected ServerOverloaded error for connection timeout with 503 status, got: {:?}",
+ error
+ ),
+ }
+
+ let error = LanguageModelCompletionError::from_cloud_failure(
+ String::from("anthropic").into(),
+ "upstream_http_error".to_string(),
+ r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout","upstream_status":500}"#.to_string(),
+ None,
+ );
+
+ match error {
+ LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
+ assert_eq!(provider.0, "anthropic");
+ assert_eq!(
+ message,
+ "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers. reset reason: connection timeout"
+ );
+ }
+ _ => panic!(
+ "Expected ApiInternalServerError for connection timeout with 500 status, got: {:?}",
+ error
+ ),
+ }
+ }
+}
diff --git a/crates/language_models/Cargo.toml b/crates/language_models/Cargo.toml
index 514443ddec..e928df8a74 100644
--- a/crates/language_models/Cargo.toml
+++ b/crates/language_models/Cargo.toml
@@ -44,6 +44,7 @@ ollama = { workspace = true, features = ["schemars"] }
open_ai = { workspace = true, features = ["schemars"] }
open_router = { workspace = true, features = ["schemars"] }
vercel = { workspace = true, features = ["schemars"] }
+x_ai = { workspace = true, features = ["schemars"] }
partial-json-fixer.workspace = true
proto.workspace = true
release_channel.workspace = true
diff --git a/crates/language_models/src/language_models.rs b/crates/language_models/src/language_models.rs
index c7324732c9..192f5a5fae 100644
--- a/crates/language_models/src/language_models.rs
+++ b/crates/language_models/src/language_models.rs
@@ -20,6 +20,7 @@ use crate::provider::ollama::OllamaLanguageModelProvider;
use crate::provider::open_ai::OpenAiLanguageModelProvider;
use crate::provider::open_router::OpenRouterLanguageModelProvider;
use crate::provider::vercel::VercelLanguageModelProvider;
+use crate::provider::x_ai::XAiLanguageModelProvider;
pub use crate::settings::*;
pub fn init(user_store: Entity, client: Arc, cx: &mut App) {
@@ -81,5 +82,6 @@ fn register_language_model_providers(
VercelLanguageModelProvider::new(client.http_client(), cx),
cx,
);
+ registry.register_provider(XAiLanguageModelProvider::new(client.http_client(), cx), cx);
registry.register_provider(CopilotChatLanguageModelProvider::new(cx), cx);
}
diff --git a/crates/language_models/src/provider.rs b/crates/language_models/src/provider.rs
index 6bc93bd366..c717be7c90 100644
--- a/crates/language_models/src/provider.rs
+++ b/crates/language_models/src/provider.rs
@@ -10,3 +10,4 @@ pub mod ollama;
pub mod open_ai;
pub mod open_router;
pub mod vercel;
+pub mod x_ai;
diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs
index 9b7fee228a..8d25af1a49 100644
--- a/crates/language_models/src/provider/cloud.rs
+++ b/crates/language_models/src/provider/cloud.rs
@@ -166,46 +166,9 @@ impl State {
}
let response = Self::fetch_models(client, llm_api_token, use_cloud).await?;
- cx.update(|cx| {
- this.update(cx, |this, cx| {
- let mut models = Vec::new();
-
- for model in response.models {
- models.push(Arc::new(model.clone()));
-
- // Right now we represent thinking variants of models as separate models on the client,
- // so we need to insert variants for any model that supports thinking.
- if model.supports_thinking {
- models.push(Arc::new(zed_llm_client::LanguageModel {
- id: zed_llm_client::LanguageModelId(
- format!("{}-thinking", model.id).into(),
- ),
- display_name: format!("{} Thinking", model.display_name),
- ..model
- }));
- }
- }
-
- this.default_model = models
- .iter()
- .find(|model| model.id == response.default_model)
- .cloned();
- this.default_fast_model = models
- .iter()
- .find(|model| model.id == response.default_fast_model)
- .cloned();
- this.recommended_models = response
- .recommended_models
- .iter()
- .filter_map(|id| models.iter().find(|model| &model.id == id))
- .cloned()
- .collect();
- this.models = models;
- cx.notify();
- })
- })??;
-
- anyhow::Ok(())
+ this.update(cx, |this, cx| {
+ this.update_models(response, cx);
+ })
})
.await
.context("failed to fetch Zed models")
@@ -216,12 +179,15 @@ impl State {
}),
_llm_token_subscription: cx.subscribe(
&refresh_llm_token_listener,
- |this, _listener, _event, cx| {
+ move |this, _listener, _event, cx| {
let client = this.client.clone();
let llm_api_token = this.llm_api_token.clone();
- cx.spawn(async move |_this, _cx| {
+ cx.spawn(async move |this, cx| {
llm_api_token.refresh(&client).await?;
- anyhow::Ok(())
+ let response = Self::fetch_models(client, llm_api_token, use_cloud).await?;
+ this.update(cx, |this, cx| {
+ this.update_models(response, cx);
+ })
})
.detach_and_log_err(cx);
},
@@ -264,6 +230,41 @@ impl State {
}));
}
+ fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context) {
+ let mut models = Vec::new();
+
+ for model in response.models {
+ models.push(Arc::new(model.clone()));
+
+ // Right now we represent thinking variants of models as separate models on the client,
+ // so we need to insert variants for any model that supports thinking.
+ if model.supports_thinking {
+ models.push(Arc::new(zed_llm_client::LanguageModel {
+ id: zed_llm_client::LanguageModelId(format!("{}-thinking", model.id).into()),
+ display_name: format!("{} Thinking", model.display_name),
+ ..model
+ }));
+ }
+ }
+
+ self.default_model = models
+ .iter()
+ .find(|model| model.id == response.default_model)
+ .cloned();
+ self.default_fast_model = models
+ .iter()
+ .find(|model| model.id == response.default_fast_model)
+ .cloned();
+ self.recommended_models = response
+ .recommended_models
+ .iter()
+ .filter_map(|id| models.iter().find(|model| &model.id == id))
+ .cloned()
+ .collect();
+ self.models = models;
+ cx.notify();
+ }
+
async fn fetch_models(
client: Arc,
llm_api_token: LlmApiToken,
@@ -653,8 +654,62 @@ struct ApiError {
headers: HeaderMap,
}
+/// Represents error responses from Zed's cloud API.
+///
+/// Example JSON for an upstream HTTP error:
+/// ```json
+/// {
+/// "code": "upstream_http_error",
+/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
+/// "upstream_status": 503
+/// }
+/// ```
+#[derive(Debug, serde::Deserialize)]
+struct CloudApiError {
+ code: String,
+ message: String,
+ #[serde(default)]
+ #[serde(deserialize_with = "deserialize_optional_status_code")]
+ upstream_status: Option,
+ #[serde(default)]
+ retry_after: Option,
+}
+
+fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result