Add feature flag to use cloud.zed.dev instead of llm.zed.dev (#34076)

This PR adds a new `zed-cloud` feature flag that can be used to send
traffic to `cloud.zed.dev` instead of `llm.zed.dev`.

This is just so Zed staff can test the new infrastructure. When we're
ready for prime-time we'll reroute traffic on the server.

Release Notes:

- N/A
This commit is contained in:
Marshall Bowers 2025-07-08 14:44:51 -04:00 committed by GitHub
parent 01bdef130b
commit 1220049089
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 70 additions and 12 deletions

View file

@ -2,6 +2,7 @@ use anthropic::AnthropicModelMode;
use anyhow::{Context as _, Result, anyhow};
use chrono::{DateTime, Utc};
use client::{Client, ModelRequestUsage, UserStore, zed_urls};
use feature_flags::{FeatureFlagAppExt as _, ZedCloudFeatureFlag};
use futures::{
AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
};
@ -136,6 +137,7 @@ impl State {
cx: &mut Context<Self>,
) -> Self {
let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
let use_cloud = cx.has_flag::<ZedCloudFeatureFlag>();
Self {
client: client.clone(),
@ -163,7 +165,7 @@ impl State {
.await;
}
let response = Self::fetch_models(client, llm_api_token).await?;
let response = Self::fetch_models(client, llm_api_token, use_cloud).await?;
cx.update(|cx| {
this.update(cx, |this, cx| {
let mut models = Vec::new();
@ -265,13 +267,18 @@ impl State {
async fn fetch_models(
client: Arc<Client>,
llm_api_token: LlmApiToken,
use_cloud: bool,
) -> Result<ListModelsResponse> {
let http_client = &client.http_client();
let token = llm_api_token.acquire(&client).await?;
let request = http_client::Request::builder()
.method(Method::GET)
.uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
.uri(
http_client
.build_zed_llm_url("/models", &[], use_cloud)?
.as_ref(),
)
.header("Authorization", format!("Bearer {token}"))
.body(AsyncBody::empty())?;
let mut response = http_client
@ -535,6 +542,7 @@ impl CloudLanguageModel {
llm_api_token: LlmApiToken,
app_version: Option<SemanticVersion>,
body: CompletionBody,
use_cloud: bool,
) -> Result<PerformLlmCompletionResponse> {
let http_client = &client.http_client();
@ -542,9 +550,11 @@ impl CloudLanguageModel {
let mut refreshed_token = false;
loop {
let request_builder = http_client::Request::builder()
.method(Method::POST)
.uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
let request_builder = http_client::Request::builder().method(Method::POST).uri(
http_client
.build_zed_llm_url("/completions", &[], use_cloud)?
.as_ref(),
);
let request_builder = if let Some(app_version) = app_version {
request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
} else {
@ -771,6 +781,7 @@ impl LanguageModel for CloudLanguageModel {
let model_id = self.model.id.to_string();
let generate_content_request =
into_google(request, model_id.clone(), GoogleModelMode::Default);
let use_cloud = cx.has_flag::<ZedCloudFeatureFlag>();
async move {
let http_client = &client.http_client();
let token = llm_api_token.acquire(&client).await?;
@ -786,7 +797,7 @@ impl LanguageModel for CloudLanguageModel {
.method(Method::POST)
.uri(
http_client
.build_zed_llm_url("/count_tokens", &[])?
.build_zed_llm_url("/count_tokens", &[], use_cloud)?
.as_ref(),
)
.header("Content-Type", "application/json")
@ -835,6 +846,9 @@ impl LanguageModel for CloudLanguageModel {
let intent = request.intent;
let mode = request.mode;
let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
let use_cloud = cx
.update(|cx| cx.has_flag::<ZedCloudFeatureFlag>())
.unwrap_or(false);
match self.model.provider {
zed_llm_client::LanguageModelProvider::Anthropic => {
let request = into_anthropic(
@ -872,6 +886,7 @@ impl LanguageModel for CloudLanguageModel {
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
use_cloud,
)
.await
.map_err(|err| match err.downcast::<ApiError>() {
@ -924,6 +939,7 @@ impl LanguageModel for CloudLanguageModel {
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
use_cloud,
)
.await?;
@ -964,6 +980,7 @@ impl LanguageModel for CloudLanguageModel {
provider_request: serde_json::to_value(&request)
.map_err(|e| anyhow!(e))?,
},
use_cloud,
)
.await?;