Allow AI interactions to be proxied through Zed's server so you don't need an API key (#7367)

Co-authored-by: Antonio <antonio@zed.dev>

Resurrected this from some assistant work I did in Spring of 2023.
- [x] Resurrect streaming responses
- [x] Use streaming responses to enable AI via Zed's servers by default
(but preserve API key option for now)
- [x] Simplify protobuf
- [x] Proxy to OpenAI on zed.dev
- [x] Proxy to Gemini on zed.dev
- [x] Improve UX for switching between openAI and google models
- We current disallow cycling when setting a custom model, but we need a
better solution to keep OpenAI models available while testing the google
ones
- [x] Show remaining tokens correctly for Google models
- [x] Remove semantic index
- [x] Delete `ai` crate
- [x] Cloud front so we can ban abuse
- [x] Rate-limiting
- [x] Fix panic when using inline assistant
- [x] Double check the upgraded `AssistantSettings` are
backwards-compatible
- [x] Add hosted LLM interaction behind a `language-models` feature
flag.

Release Notes:

- We are temporarily removing the semantic index in order to redesign it
from scratch.

---------

Co-authored-by: Antonio <antonio@zed.dev>
Co-authored-by: Antonio Scandurra <me@as-cii.com>
Co-authored-by: Thorsten <thorsten@zed.dev>
Co-authored-by: Max <max@zed.dev>
This commit is contained in:
Nathan Sobo 2024-03-19 12:22:26 -06:00 committed by GitHub
parent 905a24079a
commit 8ae5a3b61a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
87 changed files with 3647 additions and 8937 deletions

View file

@ -31,10 +31,12 @@ collections.workspace = true
dashmap = "5.4"
envy = "0.4.2"
futures.workspace = true
google_ai.workspace = true
hex.workspace = true
live_kit_server.workspace = true
log.workspace = true
nanoid = "0.4"
open_ai.workspace = true
parking_lot.workspace = true
prometheus = "0.13"
prost.workspace = true
@ -80,7 +82,6 @@ git = { workspace = true, features = ["test-support"] }
gpui = { workspace = true, features = ["test-support"] }
indoc.workspace = true
language = { workspace = true, features = ["test-support"] }
lazy_static.workspace = true
live_kit_client = { workspace = true, features = ["test-support"] }
lsp = { workspace = true, features = ["test-support"] }
menu.workspace = true

View file

@ -379,6 +379,16 @@ CREATE TABLE extension_versions (
CREATE UNIQUE INDEX "index_extensions_external_id" ON "extensions" ("external_id");
CREATE INDEX "index_extensions_total_download_count" ON "extensions" ("total_download_count");
CREATE TABLE rate_buckets (
user_id INT NOT NULL,
rate_limit_name VARCHAR(255) NOT NULL,
token_count INT NOT NULL,
last_refill TIMESTAMP WITHOUT TIME ZONE NOT NULL,
PRIMARY KEY (user_id, rate_limit_name),
FOREIGN KEY (user_id) REFERENCES users(id)
);
CREATE INDEX idx_user_id_rate_limit ON rate_buckets (user_id, rate_limit_name);
CREATE TABLE hosted_projects (
id INTEGER PRIMARY KEY AUTOINCREMENT,
channel_id INTEGER NOT NULL REFERENCES channels(id),

View file

@ -0,0 +1,11 @@
CREATE TABLE IF NOT EXISTS rate_buckets (
user_id INT NOT NULL,
rate_limit_name VARCHAR(255) NOT NULL,
token_count INT NOT NULL,
last_refill TIMESTAMP WITHOUT TIME ZONE NOT NULL,
PRIMARY KEY (user_id, rate_limit_name),
CONSTRAINT fk_user
FOREIGN KEY (user_id) REFERENCES users(id)
);
CREATE INDEX idx_user_id_rate_limit ON rate_buckets (user_id, rate_limit_name);

75
crates/collab/src/ai.rs Normal file
View file

@ -0,0 +1,75 @@
use anyhow::{anyhow, Result};
use rpc::proto;
pub fn language_model_request_to_open_ai(
request: proto::CompleteWithLanguageModel,
) -> Result<open_ai::Request> {
Ok(open_ai::Request {
model: open_ai::Model::from_id(&request.model).unwrap_or(open_ai::Model::FourTurbo),
messages: request
.messages
.into_iter()
.map(|message| {
let role = proto::LanguageModelRole::from_i32(message.role)
.ok_or_else(|| anyhow!("invalid role {}", message.role))?;
Ok(open_ai::RequestMessage {
role: match role {
proto::LanguageModelRole::LanguageModelUser => open_ai::Role::User,
proto::LanguageModelRole::LanguageModelAssistant => {
open_ai::Role::Assistant
}
proto::LanguageModelRole::LanguageModelSystem => open_ai::Role::System,
},
content: message.content,
})
})
.collect::<Result<Vec<open_ai::RequestMessage>>>()?,
stream: true,
stop: request.stop,
temperature: request.temperature,
})
}
pub fn language_model_request_to_google_ai(
request: proto::CompleteWithLanguageModel,
) -> Result<google_ai::GenerateContentRequest> {
Ok(google_ai::GenerateContentRequest {
contents: request
.messages
.into_iter()
.map(language_model_request_message_to_google_ai)
.collect::<Result<Vec<_>>>()?,
generation_config: None,
safety_settings: None,
})
}
pub fn language_model_request_message_to_google_ai(
message: proto::LanguageModelRequestMessage,
) -> Result<google_ai::Content> {
let role = proto::LanguageModelRole::from_i32(message.role)
.ok_or_else(|| anyhow!("invalid role {}", message.role))?;
Ok(google_ai::Content {
parts: vec![google_ai::Part::TextPart(google_ai::TextPart {
text: message.content,
})],
role: match role {
proto::LanguageModelRole::LanguageModelUser => google_ai::Role::User,
proto::LanguageModelRole::LanguageModelAssistant => google_ai::Role::Model,
proto::LanguageModelRole::LanguageModelSystem => google_ai::Role::User,
},
})
}
pub fn count_tokens_request_to_google_ai(
request: proto::CountTokensWithLanguageModel,
) -> Result<google_ai::CountTokensRequest> {
Ok(google_ai::CountTokensRequest {
contents: request
.messages
.into_iter()
.map(language_model_request_message_to_google_ai)
.collect::<Result<Vec<_>>>()?,
})
}

View file

@ -1,6 +1,5 @@
use crate::{
db::{ExtensionMetadata, NewExtensionVersion},
executor::Executor,
AppState, Error, Result,
};
use anyhow::{anyhow, Context as _};
@ -136,7 +135,7 @@ async fn download_extension(
const EXTENSION_FETCH_INTERVAL: Duration = Duration::from_secs(5 * 60);
const EXTENSION_DOWNLOAD_URL_LIFETIME: Duration = Duration::from_secs(3 * 60);
pub fn fetch_extensions_from_blob_store_periodically(app_state: Arc<AppState>, executor: Executor) {
pub fn fetch_extensions_from_blob_store_periodically(app_state: Arc<AppState>) {
let Some(blob_store_client) = app_state.blob_store_client.clone() else {
log::info!("no blob store client");
return;
@ -146,6 +145,7 @@ pub fn fetch_extensions_from_blob_store_periodically(app_state: Arc<AppState>, e
return;
};
let executor = app_state.executor.clone();
executor.spawn_detached({
let executor = executor.clone();
async move {

View file

@ -10,6 +10,7 @@ pub mod hosted_projects;
pub mod messages;
pub mod notifications;
pub mod projects;
pub mod rate_buckets;
pub mod rooms;
pub mod servers;
pub mod users;

View file

@ -0,0 +1,58 @@
use super::*;
use crate::db::tables::rate_buckets;
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
impl Database {
/// Saves the rate limit for the given user and rate limit name if the last_refill is later
/// than the currently saved timestamp.
pub async fn save_rate_buckets(&self, buckets: &[rate_buckets::Model]) -> Result<()> {
if buckets.is_empty() {
return Ok(());
}
self.transaction(|tx| async move {
rate_buckets::Entity::insert_many(buckets.iter().map(|bucket| {
rate_buckets::ActiveModel {
user_id: ActiveValue::Set(bucket.user_id),
rate_limit_name: ActiveValue::Set(bucket.rate_limit_name.clone()),
token_count: ActiveValue::Set(bucket.token_count),
last_refill: ActiveValue::Set(bucket.last_refill),
}
}))
.on_conflict(
OnConflict::columns([
rate_buckets::Column::UserId,
rate_buckets::Column::RateLimitName,
])
.update_columns([
rate_buckets::Column::TokenCount,
rate_buckets::Column::LastRefill,
])
.to_owned(),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
/// Retrieves the rate limit for the given user and rate limit name.
pub async fn get_rate_bucket(
&self,
user_id: UserId,
rate_limit_name: &str,
) -> Result<Option<rate_buckets::Model>> {
self.transaction(|tx| async move {
let rate_limit = rate_buckets::Entity::find()
.filter(rate_buckets::Column::UserId.eq(user_id))
.filter(rate_buckets::Column::RateLimitName.eq(rate_limit_name))
.one(&*tx)
.await?;
Ok(rate_limit)
})
.await
}
}

View file

@ -22,6 +22,7 @@ pub mod observed_buffer_edits;
pub mod observed_channel_messages;
pub mod project;
pub mod project_collaborator;
pub mod rate_buckets;
pub mod room;
pub mod room_participant;
pub mod server;

View file

@ -0,0 +1,31 @@
use crate::db::UserId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "rate_buckets")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub user_id: UserId,
#[sea_orm(primary_key, auto_increment = false)]
pub rate_limit_name: String,
pub token_count: i32,
pub last_refill: DateTime,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -1,8 +1,10 @@
pub mod ai;
pub mod api;
pub mod auth;
pub mod db;
pub mod env;
pub mod executor;
mod rate_limiter;
pub mod rpc;
#[cfg(test)]
@ -13,6 +15,7 @@ use aws_config::{BehaviorVersion, Region};
use axum::{http::StatusCode, response::IntoResponse};
use db::{ChannelId, Database};
use executor::Executor;
pub use rate_limiter::*;
use serde::Deserialize;
use std::{path::PathBuf, sync::Arc};
use util::ResultExt;
@ -126,6 +129,8 @@ pub struct Config {
pub blob_store_secret_key: Option<String>,
pub blob_store_bucket: Option<String>,
pub zed_environment: Arc<str>,
pub openai_api_key: Option<Arc<str>>,
pub google_ai_api_key: Option<Arc<str>>,
pub zed_client_checksum_seed: Option<String>,
pub slack_panics_webhook: Option<String>,
pub auto_join_channel_id: Option<ChannelId>,
@ -147,12 +152,14 @@ pub struct AppState {
pub db: Arc<Database>,
pub live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
pub blob_store_client: Option<aws_sdk_s3::Client>,
pub rate_limiter: Arc<RateLimiter>,
pub executor: Executor,
pub clickhouse_client: Option<clickhouse::Client>,
pub config: Config,
}
impl AppState {
pub async fn new(config: Config) -> Result<Arc<Self>> {
pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(config.database_max_connections);
let mut db = Database::new(db_options, Executor::Production).await?;
@ -173,10 +180,13 @@ impl AppState {
None
};
let db = Arc::new(db);
let this = Self {
db: Arc::new(db),
db: db.clone(),
live_kit_client,
blob_store_client: build_blob_store_client(&config).await.log_err(),
rate_limiter: Arc::new(RateLimiter::new(db)),
executor,
clickhouse_client: config
.clickhouse_url
.as_ref()

View file

@ -7,7 +7,7 @@ use axum::{
};
use collab::{
api::fetch_extensions_from_blob_store_periodically, db, env, executor::Executor, AppState,
Config, MigrateConfig, Result,
Config, MigrateConfig, RateLimiter, Result,
};
use db::Database;
use std::{
@ -62,18 +62,27 @@ async fn main() -> Result<()> {
run_migrations().await?;
let state = AppState::new(config).await?;
let state = AppState::new(config, Executor::Production).await?;
let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port))
.expect("failed to bind TCP listener");
let epoch = state
.db
.create_server(&state.config.zed_environment)
.await?;
let rpc_server = collab::rpc::Server::new(epoch, state.clone());
rpc_server.start().await?;
fetch_extensions_from_blob_store_periodically(state.clone());
RateLimiter::save_periodically(state.rate_limiter.clone(), state.executor.clone());
let rpc_server = if is_collab {
let epoch = state
.db
.create_server(&state.config.zed_environment)
.await?;
let rpc_server =
collab::rpc::Server::new(epoch, state.clone(), Executor::Production);
let rpc_server = collab::rpc::Server::new(epoch, state.clone());
rpc_server.start().await?;
Some(rpc_server)
@ -82,7 +91,7 @@ async fn main() -> Result<()> {
};
if is_api {
fetch_extensions_from_blob_store_periodically(state.clone(), Executor::Production);
fetch_extensions_from_blob_store_periodically(state.clone());
}
let mut app = collab::api::routes(rpc_server.clone(), state.clone());

View file

@ -0,0 +1,274 @@
use crate::{db::UserId, executor::Executor, Database, Error, Result};
use anyhow::anyhow;
use chrono::{DateTime, Duration, Utc};
use dashmap::{DashMap, DashSet};
use sea_orm::prelude::DateTimeUtc;
use std::sync::Arc;
use util::ResultExt;
pub trait RateLimit: 'static {
fn capacity() -> usize;
fn refill_duration() -> Duration;
fn db_name() -> &'static str;
}
/// Used to enforce per-user rate limits
pub struct RateLimiter {
buckets: DashMap<(UserId, String), RateBucket>,
dirty_buckets: DashSet<(UserId, String)>,
db: Arc<Database>,
}
impl RateLimiter {
pub fn new(db: Arc<Database>) -> Self {
RateLimiter {
buckets: DashMap::new(),
dirty_buckets: DashSet::new(),
db,
}
}
/// Spawns a new task that periodically saves rate limit data to the database.
pub fn save_periodically(rate_limiter: Arc<Self>, executor: Executor) {
const RATE_LIMITER_SAVE_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);
executor.clone().spawn_detached(async move {
loop {
executor.sleep(RATE_LIMITER_SAVE_INTERVAL).await;
rate_limiter.save().await.log_err();
}
});
}
/// Returns an error if the user has exceeded the specified `RateLimit`.
/// Attempts to read the from the database if no cached RateBucket currently exists.
pub async fn check<T: RateLimit>(&self, user_id: UserId) -> Result<()> {
self.check_internal::<T>(user_id, Utc::now()).await
}
async fn check_internal<T: RateLimit>(&self, user_id: UserId, now: DateTimeUtc) -> Result<()> {
let bucket_key = (user_id, T::db_name().to_string());
// Attempt to fetch the bucket from the database if it hasn't been cached.
// For now, we keep buckets in memory for the lifetime of the process rather than expiring them,
// but this enforces limits across restarts so long as the database is reachable.
if !self.buckets.contains_key(&bucket_key) {
if let Some(bucket) = self.load_bucket::<T>(user_id).await.log_err().flatten() {
self.buckets.insert(bucket_key.clone(), bucket);
self.dirty_buckets.insert(bucket_key.clone());
}
}
let mut bucket = self
.buckets
.entry(bucket_key.clone())
.or_insert_with(|| RateBucket::new(T::capacity(), T::refill_duration(), now));
if bucket.value_mut().allow(now) {
self.dirty_buckets.insert(bucket_key);
Ok(())
} else {
Err(anyhow!("rate limit exceeded"))?
}
}
async fn load_bucket<K: RateLimit>(
&self,
user_id: UserId,
) -> Result<Option<RateBucket>, Error> {
Ok(self
.db
.get_rate_bucket(user_id, K::db_name())
.await?
.map(|saved_bucket| RateBucket {
capacity: K::capacity(),
refill_time_per_token: K::refill_duration(),
token_count: saved_bucket.token_count as usize,
last_refill: DateTime::from_naive_utc_and_offset(saved_bucket.last_refill, Utc),
}))
}
pub async fn save(&self) -> Result<()> {
let mut buckets = Vec::new();
self.dirty_buckets.retain(|key| {
if let Some(bucket) = self.buckets.get(&key) {
buckets.push(crate::db::rate_buckets::Model {
user_id: key.0,
rate_limit_name: key.1.clone(),
token_count: bucket.token_count as i32,
last_refill: bucket.last_refill.naive_utc(),
});
}
false
});
match self.db.save_rate_buckets(&buckets).await {
Ok(()) => Ok(()),
Err(err) => {
for bucket in buckets {
self.dirty_buckets
.insert((bucket.user_id, bucket.rate_limit_name));
}
Err(err)
}
}
}
}
#[derive(Clone)]
struct RateBucket {
capacity: usize,
token_count: usize,
refill_time_per_token: Duration,
last_refill: DateTimeUtc,
}
impl RateBucket {
fn new(capacity: usize, refill_duration: Duration, now: DateTimeUtc) -> Self {
RateBucket {
capacity,
token_count: capacity,
refill_time_per_token: refill_duration / capacity as i32,
last_refill: now,
}
}
fn allow(&mut self, now: DateTimeUtc) -> bool {
self.refill(now);
if self.token_count > 0 {
self.token_count -= 1;
true
} else {
false
}
}
fn refill(&mut self, now: DateTimeUtc) {
let elapsed = now - self.last_refill;
if elapsed >= self.refill_time_per_token {
let new_tokens =
elapsed.num_milliseconds() / self.refill_time_per_token.num_milliseconds();
self.token_count = (self.token_count + new_tokens as usize).min(self.capacity);
self.last_refill = now;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::db::{NewUserParams, TestDb};
use gpui::TestAppContext;
#[gpui::test]
async fn test_rate_limiter(cx: &mut TestAppContext) {
let test_db = TestDb::sqlite(cx.executor().clone());
let db = test_db.db().clone();
let user_1 = db
.create_user(
"user-1@zed.dev",
false,
NewUserParams {
github_login: "user-1".into(),
github_user_id: 1,
},
)
.await
.unwrap()
.user_id;
let user_2 = db
.create_user(
"user-2@zed.dev",
false,
NewUserParams {
github_login: "user-2".into(),
github_user_id: 2,
},
)
.await
.unwrap()
.user_id;
let mut now = Utc::now();
let rate_limiter = RateLimiter::new(db.clone());
// User 1 can access resource A two times before being rate-limited.
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap();
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap();
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap_err();
// User 2 can access resource A and user 1 can access resource B.
rate_limiter
.check_internal::<RateLimitB>(user_2, now)
.await
.unwrap();
rate_limiter
.check_internal::<RateLimitB>(user_1, now)
.await
.unwrap();
// After one second, user 1 can make another request before being rate-limited again.
now += Duration::seconds(1);
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap();
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap_err();
rate_limiter.save().await.unwrap();
// Rate limits are reloaded from the database, so user A is still rate-limited
// for resource A.
let rate_limiter = RateLimiter::new(db.clone());
rate_limiter
.check_internal::<RateLimitA>(user_1, now)
.await
.unwrap_err();
}
struct RateLimitA;
impl RateLimit for RateLimitA {
fn capacity() -> usize {
2
}
fn refill_duration() -> Duration {
Duration::seconds(2)
}
fn db_name() -> &'static str {
"rate-limit-a"
}
}
struct RateLimitB;
impl RateLimit for RateLimitB {
fn capacity() -> usize {
10
}
fn refill_duration() -> Duration {
Duration::seconds(3)
}
fn db_name() -> &'static str {
"rate-limit-b"
}
}
}

View file

@ -9,9 +9,9 @@ use crate::{
User, UserId,
},
executor::Executor,
AppState, Error, Result,
AppState, Error, RateLimit, RateLimiter, Result,
};
use anyhow::anyhow;
use anyhow::{anyhow, Context as _};
use async_tungstenite::tungstenite::{
protocol::CloseFrame as TungsteniteCloseFrame, Message as TungsteniteMessage,
};
@ -30,6 +30,8 @@ use axum::{
};
use collections::{HashMap, HashSet};
pub use connection_pool::{ConnectionPool, ZedVersion};
use core::fmt::{self, Debug, Formatter};
use futures::{
channel::oneshot,
future::{self, BoxFuture},
@ -39,15 +41,14 @@ use futures::{
use prometheus::{register_int_gauge, IntGauge};
use rpc::{
proto::{
self, Ack, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, LiveKitConnectionInfo,
RequestMessage, ShareProject, UpdateChannelBufferCollaborators,
self, Ack, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, LanguageModelRole,
LiveKitConnectionInfo, RequestMessage, ShareProject, UpdateChannelBufferCollaborators,
},
Connection, ConnectionId, ErrorCode, ErrorCodeExt, ErrorExt, Peer, Receipt, TypedEnvelope,
};
use serde::{Serialize, Serializer};
use std::{
any::TypeId,
fmt,
future::Future,
marker::PhantomData,
mem,
@ -64,7 +65,7 @@ use time::OffsetDateTime;
use tokio::sync::{watch, Semaphore};
use tower::ServiceBuilder;
use tracing::{field, info_span, instrument, Instrument};
use util::SemanticVersion;
use util::{http::IsahcHttpClient, SemanticVersion};
pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(30);
@ -92,6 +93,18 @@ impl<R: RequestMessage> Response<R> {
}
}
struct StreamingResponse<R: RequestMessage> {
peer: Arc<Peer>,
receipt: Receipt<R>,
}
impl<R: RequestMessage> StreamingResponse<R> {
fn send(&self, payload: R::Response) -> Result<()> {
self.peer.respond(self.receipt, payload)?;
Ok(())
}
}
#[derive(Clone)]
struct Session {
user_id: UserId,
@ -100,6 +113,8 @@ struct Session {
peer: Arc<Peer>,
connection_pool: Arc<parking_lot::Mutex<ConnectionPool>>,
live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
http_client: IsahcHttpClient,
rate_limiter: Arc<RateLimiter>,
_executor: Executor,
}
@ -124,8 +139,8 @@ impl Session {
}
}
impl fmt::Debug for Session {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
impl Debug for Session {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("Session")
.field("user_id", &self.user_id)
.field("connection_id", &self.connection_id)
@ -148,7 +163,6 @@ pub struct Server {
peer: Arc<Peer>,
pub(crate) connection_pool: Arc<parking_lot::Mutex<ConnectionPool>>,
app_state: Arc<AppState>,
executor: Executor,
handlers: HashMap<TypeId, MessageHandler>,
teardown: watch::Sender<bool>,
}
@ -175,12 +189,11 @@ where
}
impl Server {
pub fn new(id: ServerId, app_state: Arc<AppState>, executor: Executor) -> Arc<Self> {
pub fn new(id: ServerId, app_state: Arc<AppState>) -> Arc<Self> {
let mut server = Self {
id: parking_lot::Mutex::new(id),
peer: Peer::new(id.0 as u32),
app_state,
executor,
app_state: app_state.clone(),
connection_pool: Default::default(),
handlers: Default::default(),
teardown: watch::channel(false).0,
@ -280,7 +293,30 @@ impl Server {
.add_message_handler(update_followers)
.add_request_handler(get_private_user_info)
.add_message_handler(acknowledge_channel_message)
.add_message_handler(acknowledge_buffer_version);
.add_message_handler(acknowledge_buffer_version)
.add_streaming_request_handler({
let app_state = app_state.clone();
move |request, response, session| {
complete_with_language_model(
request,
response,
session,
app_state.config.openai_api_key.clone(),
app_state.config.google_ai_api_key.clone(),
)
}
})
.add_request_handler({
let app_state = app_state.clone();
move |request, response, session| {
count_tokens_with_language_model(
request,
response,
session,
app_state.config.google_ai_api_key.clone(),
)
}
});
Arc::new(server)
}
@ -289,12 +325,12 @@ impl Server {
let server_id = *self.id.lock();
let app_state = self.app_state.clone();
let peer = self.peer.clone();
let timeout = self.executor.sleep(CLEANUP_TIMEOUT);
let timeout = self.app_state.executor.sleep(CLEANUP_TIMEOUT);
let pool = self.connection_pool.clone();
let live_kit_client = self.app_state.live_kit_client.clone();
let span = info_span!("start server");
self.executor.spawn_detached(
self.app_state.executor.spawn_detached(
async move {
tracing::info!("waiting for cleanup timeout");
timeout.await;
@ -536,6 +572,40 @@ impl Server {
})
}
fn add_streaming_request_handler<F, Fut, M>(&mut self, handler: F) -> &mut Self
where
F: 'static + Send + Sync + Fn(M, StreamingResponse<M>, Session) -> Fut,
Fut: Send + Future<Output = Result<()>>,
M: RequestMessage,
{
let handler = Arc::new(handler);
self.add_handler(move |envelope, session| {
let receipt = envelope.receipt();
let handler = handler.clone();
async move {
let peer = session.peer.clone();
let response = StreamingResponse {
peer: peer.clone(),
receipt,
};
match (handler)(envelope.payload, response, session).await {
Ok(()) => {
peer.end_stream(receipt)?;
Ok(())
}
Err(error) => {
let proto_err = match &error {
Error::Internal(err) => err.to_proto(),
_ => ErrorCode::Internal.message(format!("{}", error)).to_proto(),
};
peer.respond_with_error(receipt, proto_err)?;
Err(error)
}
}
}
})
}
#[allow(clippy::too_many_arguments)]
pub fn handle_connection(
self: &Arc<Self>,
@ -569,6 +639,14 @@ impl Server {
tracing::Span::current().record("connection_id", format!("{}", connection_id));
tracing::info!("connection opened");
let http_client = match IsahcHttpClient::new() {
Ok(http_client) => http_client,
Err(error) => {
tracing::error!(?error, "failed to create HTTP client");
return;
}
};
let session = Session {
user_id,
connection_id,
@ -576,7 +654,9 @@ impl Server {
peer: this.peer.clone(),
connection_pool: this.connection_pool.clone(),
live_kit_client: this.app_state.live_kit_client.clone(),
_executor: executor.clone()
http_client,
rate_limiter: this.app_state.rate_limiter.clone(),
_executor: executor.clone(),
};
if let Err(error) = this.send_initial_client_update(connection_id, user, zed_version, send_connection_id, &session).await {
@ -3220,6 +3300,207 @@ async fn acknowledge_buffer_version(
Ok(())
}
struct CompleteWithLanguageModelRateLimit;
impl RateLimit for CompleteWithLanguageModelRateLimit {
fn capacity() -> usize {
std::env::var("COMPLETE_WITH_LANGUAGE_MODEL_RATE_LIMIT_PER_HOUR")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(120) // Picked arbitrarily
}
fn refill_duration() -> chrono::Duration {
chrono::Duration::hours(1)
}
fn db_name() -> &'static str {
"complete-with-language-model"
}
}
async fn complete_with_language_model(
request: proto::CompleteWithLanguageModel,
response: StreamingResponse<proto::CompleteWithLanguageModel>,
session: Session,
open_ai_api_key: Option<Arc<str>>,
google_ai_api_key: Option<Arc<str>>,
) -> Result<()> {
authorize_access_to_language_models(&session).await?;
session
.rate_limiter
.check::<CompleteWithLanguageModelRateLimit>(session.user_id)
.await?;
if request.model.starts_with("gpt") {
let api_key =
open_ai_api_key.ok_or_else(|| anyhow!("no OpenAI API key configured on the server"))?;
complete_with_open_ai(request, response, session, api_key).await?;
} else if request.model.starts_with("gemini") {
let api_key = google_ai_api_key
.ok_or_else(|| anyhow!("no Google AI API key configured on the server"))?;
complete_with_google_ai(request, response, session, api_key).await?;
}
Ok(())
}
async fn complete_with_open_ai(
request: proto::CompleteWithLanguageModel,
response: StreamingResponse<proto::CompleteWithLanguageModel>,
session: Session,
api_key: Arc<str>,
) -> Result<()> {
const OPEN_AI_API_URL: &str = "https://api.openai.com/v1";
let mut completion_stream = open_ai::stream_completion(
&session.http_client,
OPEN_AI_API_URL,
&api_key,
crate::ai::language_model_request_to_open_ai(request)?,
)
.await
.context("open_ai::stream_completion request failed")?;
while let Some(event) = completion_stream.next().await {
let event = event?;
response.send(proto::LanguageModelResponse {
choices: event
.choices
.into_iter()
.map(|choice| proto::LanguageModelChoiceDelta {
index: choice.index,
delta: Some(proto::LanguageModelResponseMessage {
role: choice.delta.role.map(|role| match role {
open_ai::Role::User => LanguageModelRole::LanguageModelUser,
open_ai::Role::Assistant => LanguageModelRole::LanguageModelAssistant,
open_ai::Role::System => LanguageModelRole::LanguageModelSystem,
} as i32),
content: choice.delta.content,
}),
finish_reason: choice.finish_reason,
})
.collect(),
})?;
}
Ok(())
}
async fn complete_with_google_ai(
request: proto::CompleteWithLanguageModel,
response: StreamingResponse<proto::CompleteWithLanguageModel>,
session: Session,
api_key: Arc<str>,
) -> Result<()> {
let mut stream = google_ai::stream_generate_content(
&session.http_client,
google_ai::API_URL,
api_key.as_ref(),
crate::ai::language_model_request_to_google_ai(request)?,
)
.await
.context("google_ai::stream_generate_content request failed")?;
while let Some(event) = stream.next().await {
let event = event?;
response.send(proto::LanguageModelResponse {
choices: event
.candidates
.unwrap_or_default()
.into_iter()
.map(|candidate| proto::LanguageModelChoiceDelta {
index: candidate.index as u32,
delta: Some(proto::LanguageModelResponseMessage {
role: Some(match candidate.content.role {
google_ai::Role::User => LanguageModelRole::LanguageModelUser,
google_ai::Role::Model => LanguageModelRole::LanguageModelAssistant,
} as i32),
content: Some(
candidate
.content
.parts
.into_iter()
.filter_map(|part| match part {
google_ai::Part::TextPart(part) => Some(part.text),
google_ai::Part::InlineDataPart(_) => None,
})
.collect(),
),
}),
finish_reason: candidate.finish_reason.map(|reason| reason.to_string()),
})
.collect(),
})?;
}
Ok(())
}
struct CountTokensWithLanguageModelRateLimit;
impl RateLimit for CountTokensWithLanguageModelRateLimit {
fn capacity() -> usize {
std::env::var("COUNT_TOKENS_WITH_LANGUAGE_MODEL_RATE_LIMIT_PER_HOUR")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(600) // Picked arbitrarily
}
fn refill_duration() -> chrono::Duration {
chrono::Duration::hours(1)
}
fn db_name() -> &'static str {
"count-tokens-with-language-model"
}
}
async fn count_tokens_with_language_model(
request: proto::CountTokensWithLanguageModel,
response: Response<proto::CountTokensWithLanguageModel>,
session: Session,
google_ai_api_key: Option<Arc<str>>,
) -> Result<()> {
authorize_access_to_language_models(&session).await?;
if !request.model.starts_with("gemini") {
return Err(anyhow!(
"counting tokens for model: {:?} is not supported",
request.model
))?;
}
session
.rate_limiter
.check::<CountTokensWithLanguageModelRateLimit>(session.user_id)
.await?;
let api_key = google_ai_api_key
.ok_or_else(|| anyhow!("no Google AI API key configured on the server"))?;
let tokens_response = google_ai::count_tokens(
&session.http_client,
google_ai::API_URL,
&api_key,
crate::ai::count_tokens_request_to_google_ai(request)?,
)
.await?;
response.send(proto::CountTokensResponse {
token_count: tokens_response.total_tokens as u32,
})?;
Ok(())
}
async fn authorize_access_to_language_models(session: &Session) -> Result<(), Error> {
let db = session.db().await;
let flags = db.get_user_flags(session.user_id).await?;
if flags.iter().any(|flag| flag == "language-models") {
Ok(())
} else {
Err(anyhow!("permission denied"))?
}
}
/// Start receiving chat updates for a channel
async fn join_channel_chat(
request: proto::JoinChannelChat,

View file

@ -2,7 +2,7 @@ use crate::{
db::{tests::TestDb, NewUserParams, UserId},
executor::Executor,
rpc::{Server, ZedVersion, CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
AppState, Config,
AppState, Config, RateLimiter,
};
use anyhow::anyhow;
use call::ActiveCall;
@ -93,17 +93,14 @@ impl TestServer {
deterministic.clone(),
)
.unwrap();
let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
let executor = Executor::Deterministic(deterministic.clone());
let app_state = Self::build_app_state(&test_db, &live_kit_server, executor.clone()).await;
let epoch = app_state
.db
.create_server(&app_state.config.zed_environment)
.await
.unwrap();
let server = Server::new(
epoch,
app_state.clone(),
Executor::Deterministic(deterministic.clone()),
);
let server = Server::new(epoch, app_state.clone());
server.start().await.unwrap();
// Advance clock to ensure the server's cleanup task is finished.
deterministic.advance_clock(CLEANUP_TIMEOUT);
@ -482,12 +479,15 @@ impl TestServer {
pub async fn build_app_state(
test_db: &TestDb,
fake_server: &live_kit_client::TestServer,
live_kit_test_server: &live_kit_client::TestServer,
executor: Executor,
) -> Arc<AppState> {
Arc::new(AppState {
db: test_db.db().clone(),
live_kit_client: Some(Arc::new(fake_server.create_api_client())),
live_kit_client: Some(Arc::new(live_kit_test_server.create_api_client())),
blob_store_client: None,
rate_limiter: Arc::new(RateLimiter::new(test_db.db().clone())),
executor,
clickhouse_client: None,
config: Config {
http_port: 0,
@ -506,6 +506,8 @@ impl TestServer {
blob_store_access_key: None,
blob_store_secret_key: None,
blob_store_bucket: None,
openai_api_key: None,
google_ai_api_key: None,
clickhouse_url: None,
clickhouse_user: None,
clickhouse_password: None,