Get collab2 green

This commit is contained in:
Mikayla 2023-11-03 18:01:06 -07:00
parent c529343ba1
commit e1525e2b47
No known key found for this signature in database
265 changed files with 64477 additions and 40 deletions

184
crates/collab2/src/api.rs Normal file
View file

@ -0,0 +1,184 @@
use crate::{
auth,
db::{User, UserId},
rpc, AppState, Error, Result,
};
use anyhow::anyhow;
use axum::{
body::Body,
extract::{Path, Query},
http::{self, Request, StatusCode},
middleware::{self, Next},
response::IntoResponse,
routing::{get, post},
Extension, Json, Router,
};
use axum_extra::response::ErasedJson;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tower::ServiceBuilder;
use tracing::instrument;
pub fn routes(rpc_server: Arc<rpc::Server>, state: Arc<AppState>) -> Router<Body> {
Router::new()
.route("/user", get(get_authenticated_user))
.route("/users/:id/access_tokens", post(create_access_token))
.route("/panic", post(trace_panic))
.route("/rpc_server_snapshot", get(get_rpc_server_snapshot))
.layer(
ServiceBuilder::new()
.layer(Extension(state))
.layer(Extension(rpc_server))
.layer(middleware::from_fn(validate_api_token)),
)
}
pub async fn validate_api_token<B>(req: Request<B>, next: Next<B>) -> impl IntoResponse {
let token = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing authorization header".to_string(),
)
})?
.strip_prefix("token ")
.ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"invalid authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
if token != state.config.api_token {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid authorization token".to_string(),
))?
}
Ok::<_, Error>(next.run(req).await)
}
#[derive(Debug, Deserialize)]
struct AuthenticatedUserParams {
github_user_id: Option<i32>,
github_login: String,
github_email: Option<String>,
}
#[derive(Debug, Serialize)]
struct AuthenticatedUserResponse {
user: User,
metrics_id: String,
}
async fn get_authenticated_user(
Query(params): Query<AuthenticatedUserParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<AuthenticatedUserResponse>> {
let user = app
.db
.get_or_create_user_by_github_account(
&params.github_login,
params.github_user_id,
params.github_email.as_deref(),
)
.await?
.ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "user not found".into()))?;
let metrics_id = app.db.get_user_metrics_id(user.id).await?;
return Ok(Json(AuthenticatedUserResponse { user, metrics_id }));
}
#[derive(Deserialize, Debug)]
struct CreateUserParams {
github_user_id: i32,
github_login: String,
email_address: String,
email_confirmation_code: Option<String>,
#[serde(default)]
admin: bool,
#[serde(default)]
invite_count: i32,
}
#[derive(Serialize, Debug)]
struct CreateUserResponse {
user: User,
signup_device_id: Option<String>,
metrics_id: String,
}
#[derive(Debug, Deserialize)]
struct Panic {
version: String,
text: String,
}
#[instrument(skip(panic))]
async fn trace_panic(panic: Json<Panic>) -> Result<()> {
tracing::error!(version = %panic.version, text = %panic.text, "panic report");
Ok(())
}
async fn get_rpc_server_snapshot(
Extension(rpc_server): Extension<Arc<rpc::Server>>,
) -> Result<ErasedJson> {
Ok(ErasedJson::pretty(rpc_server.snapshot().await))
}
#[derive(Deserialize)]
struct CreateAccessTokenQueryParams {
public_key: String,
impersonate: Option<String>,
}
#[derive(Serialize)]
struct CreateAccessTokenResponse {
user_id: UserId,
encrypted_access_token: String,
}
async fn create_access_token(
Path(user_id): Path<UserId>,
Query(params): Query<CreateAccessTokenQueryParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<Json<CreateAccessTokenResponse>> {
let user = app
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user not found"))?;
let mut user_id = user.id;
if let Some(impersonate) = params.impersonate {
if user.admin {
if let Some(impersonated_user) = app.db.get_user_by_github_login(&impersonate).await? {
user_id = impersonated_user.id;
} else {
return Err(Error::Http(
StatusCode::UNPROCESSABLE_ENTITY,
format!("user {impersonate} does not exist"),
));
}
} else {
return Err(Error::Http(
StatusCode::UNAUTHORIZED,
"you do not have permission to impersonate other users".to_string(),
));
}
}
let access_token = auth::create_access_token(app.db.as_ref(), user_id).await?;
let encrypted_access_token =
auth::encrypt_access_token(&access_token, params.public_key.clone())?;
Ok(Json(CreateAccessTokenResponse {
user_id,
encrypted_access_token,
}))
}

151
crates/collab2/src/auth.rs Normal file
View file

@ -0,0 +1,151 @@
use crate::{
db::{self, AccessTokenId, Database, UserId},
AppState, Error, Result,
};
use anyhow::{anyhow, Context};
use axum::{
http::{self, Request, StatusCode},
middleware::Next,
response::IntoResponse,
};
use lazy_static::lazy_static;
use prometheus::{exponential_buckets, register_histogram, Histogram};
use rand::thread_rng;
use scrypt::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Scrypt,
};
use serde::{Deserialize, Serialize};
use std::{sync::Arc, time::Instant};
lazy_static! {
static ref METRIC_ACCESS_TOKEN_HASHING_TIME: Histogram = register_histogram!(
"access_token_hashing_time",
"time spent hashing access tokens",
exponential_buckets(10.0, 2.0, 10).unwrap(),
)
.unwrap();
}
pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
let mut auth_header = req
.headers()
.get(http::header::AUTHORIZATION)
.and_then(|header| header.to_str().ok())
.ok_or_else(|| {
Error::Http(
StatusCode::UNAUTHORIZED,
"missing authorization header".to_string(),
)
})?
.split_whitespace();
let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing user id in authorization header".to_string(),
)
})?);
let access_token = auth_header.next().ok_or_else(|| {
Error::Http(
StatusCode::BAD_REQUEST,
"missing access token in authorization header".to_string(),
)
})?;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
let credentials_valid = if let Some(admin_token) = access_token.strip_prefix("ADMIN_TOKEN:") {
state.config.api_token == admin_token
} else {
verify_access_token(&access_token, user_id, &state.db)
.await
.unwrap_or(false)
};
if credentials_valid {
let user = state
.db
.get_user_by_id(user_id)
.await?
.ok_or_else(|| anyhow!("user {} not found", user_id))?;
req.extensions_mut().insert(user);
Ok::<_, Error>(next.run(req).await)
} else {
Err(Error::Http(
StatusCode::UNAUTHORIZED,
"invalid credentials".to_string(),
))
}
}
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
#[derive(Serialize, Deserialize)]
struct AccessTokenJson {
version: usize,
id: AccessTokenId,
token: String,
}
pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result<String> {
const VERSION: usize = 1;
let access_token = rpc::auth::random_token();
let access_token_hash =
hash_access_token(&access_token).context("failed to hash access token")?;
let id = db
.create_access_token(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE)
.await?;
Ok(serde_json::to_string(&AccessTokenJson {
version: VERSION,
id,
token: access_token,
})?)
}
fn hash_access_token(token: &str) -> Result<String> {
// Avoid slow hashing in debug mode.
let params = if cfg!(debug_assertions) {
scrypt::Params::new(1, 1, 1).unwrap()
} else {
scrypt::Params::new(14, 8, 1).unwrap()
};
Ok(Scrypt
.hash_password(
token.as_bytes(),
None,
params,
&SaltString::generate(thread_rng()),
)
.map_err(anyhow::Error::new)?
.to_string())
}
pub fn encrypt_access_token(access_token: &str, public_key: String) -> Result<String> {
let native_app_public_key =
rpc::auth::PublicKey::try_from(public_key).context("failed to parse app public key")?;
let encrypted_access_token = native_app_public_key
.encrypt_string(access_token)
.context("failed to encrypt access token with public key")?;
Ok(encrypted_access_token)
}
pub async fn verify_access_token(token: &str, user_id: UserId, db: &Arc<Database>) -> Result<bool> {
let token: AccessTokenJson = serde_json::from_str(&token)?;
let db_token = db.get_access_token(token.id).await?;
if db_token.user_id != user_id {
return Err(anyhow!("no such access token"))?;
}
let db_hash = PasswordHash::new(&db_token.hash).map_err(anyhow::Error::new)?;
let t0 = Instant::now();
let is_valid = Scrypt
.verify_password(token.token.as_bytes(), &db_hash)
.is_ok();
let duration = t0.elapsed();
log::info!("hashed access token in {:?}", duration);
METRIC_ACCESS_TOKEN_HASHING_TIME.observe(duration.as_millis() as f64);
Ok(is_valid)
}

View file

@ -0,0 +1,20 @@
use anyhow::anyhow;
use std::fs;
fn main() -> anyhow::Result<()> {
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
)?;
for (key, value) in env {
let value = match value {
toml::Value::String(value) => value,
toml::Value::Integer(value) => value.to_string(),
toml::Value::Float(value) => value.to_string(),
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
};
println!("export {}=\"{}\"", key, value);
}
Ok(())
}

View file

@ -0,0 +1,107 @@
use collab::{db, executor::Executor};
use db::{ConnectOptions, Database};
use serde::{de::DeserializeOwned, Deserialize};
use std::fmt::Write;
#[derive(Debug, Deserialize)]
struct GitHubUser {
id: i32,
login: String,
email: Option<String>,
}
#[tokio::main]
async fn main() {
let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var");
let db = Database::new(ConnectOptions::new(database_url), Executor::Production)
.await
.expect("failed to connect to postgres database");
let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var");
let client = reqwest::Client::new();
let mut current_user =
fetch_github::<GitHubUser>(&client, &github_token, "https://api.github.com/user").await;
current_user
.email
.get_or_insert_with(|| "placeholder@example.com".to_string());
let staff_users = fetch_github::<Vec<GitHubUser>>(
&client,
&github_token,
"https://api.github.com/orgs/zed-industries/teams/staff/members",
)
.await;
let mut zed_users = Vec::new();
zed_users.push((current_user, true));
zed_users.extend(staff_users.into_iter().map(|user| (user, true)));
let user_count = db
.get_all_users(0, 200)
.await
.expect("failed to load users from db")
.len();
if user_count < 100 {
let mut last_user_id = None;
for _ in 0..10 {
let mut uri = "https://api.github.com/users?per_page=100".to_string();
if let Some(last_user_id) = last_user_id {
write!(&mut uri, "&since={}", last_user_id).unwrap();
}
let users = fetch_github::<Vec<GitHubUser>>(&client, &github_token, &uri).await;
if let Some(last_user) = users.last() {
last_user_id = Some(last_user.id);
zed_users.extend(users.into_iter().map(|user| (user, false)));
} else {
break;
}
}
}
for (github_user, admin) in zed_users {
if db
.get_user_by_github_login(&github_user.login)
.await
.expect("failed to fetch user")
.is_none()
{
if admin {
db.create_user(
&format!("{}@zed.dev", github_user.login),
admin,
db::NewUserParams {
github_login: github_user.login,
github_user_id: github_user.id,
},
)
.await
.expect("failed to insert user");
} else {
db.get_or_create_user_by_github_account(
&github_user.login,
Some(github_user.id),
github_user.email.as_deref(),
)
.await
.expect("failed to insert user");
}
}
}
}
async fn fetch_github<T: DeserializeOwned>(
client: &reqwest::Client,
access_token: &str,
url: &str,
) -> T {
let response = client
.get(url)
.bearer_auth(&access_token)
.header("user-agent", "zed")
.send()
.await
.expect(&format!("failed to fetch '{}'", url));
response
.json()
.await
.expect(&format!("failed to deserialize github user from '{}'", url))
}

672
crates/collab2/src/db.rs Normal file
View file

@ -0,0 +1,672 @@
#[cfg(test)]
pub mod tests;
#[cfg(test)]
pub use tests::TestDb;
mod ids;
mod queries;
mod tables;
use crate::{executor::Executor, Error, Result};
use anyhow::anyhow;
use collections::{BTreeMap, HashMap, HashSet};
use dashmap::DashMap;
use futures::StreamExt;
use rand::{prelude::StdRng, Rng, SeedableRng};
use rpc::{
proto::{self},
ConnectionId,
};
use sea_orm::{
entity::prelude::*,
sea_query::{Alias, Expr, OnConflict},
ActiveValue, Condition, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbErr,
FromQueryResult, IntoActiveModel, IsolationLevel, JoinType, QueryOrder, QuerySelect, Statement,
TransactionTrait,
};
use serde::{Deserialize, Serialize};
use sqlx::{
migrate::{Migrate, Migration, MigrationSource},
Connection,
};
use std::{
fmt::Write as _,
future::Future,
marker::PhantomData,
ops::{Deref, DerefMut},
path::Path,
rc::Rc,
sync::Arc,
time::Duration,
};
use tables::*;
use tokio::sync::{Mutex, OwnedMutexGuard};
pub use ids::*;
pub use sea_orm::ConnectOptions;
pub use tables::user::Model as User;
pub struct Database {
options: ConnectOptions,
pool: DatabaseConnection,
rooms: DashMap<RoomId, Arc<Mutex<()>>>,
rng: Mutex<StdRng>,
executor: Executor,
notification_kinds_by_id: HashMap<NotificationKindId, &'static str>,
notification_kinds_by_name: HashMap<String, NotificationKindId>,
#[cfg(test)]
runtime: Option<tokio::runtime::Runtime>,
}
// The `Database` type has so many methods that its impl blocks are split into
// separate files in the `queries` folder.
impl Database {
pub async fn new(options: ConnectOptions, executor: Executor) -> Result<Self> {
sqlx::any::install_default_drivers();
Ok(Self {
options: options.clone(),
pool: sea_orm::Database::connect(options).await?,
rooms: DashMap::with_capacity(16384),
rng: Mutex::new(StdRng::seed_from_u64(0)),
notification_kinds_by_id: HashMap::default(),
notification_kinds_by_name: HashMap::default(),
executor,
#[cfg(test)]
runtime: None,
})
}
#[cfg(test)]
pub fn reset(&self) {
self.rooms.clear();
}
pub async fn migrate(
&self,
migrations_path: &Path,
ignore_checksum_mismatch: bool,
) -> anyhow::Result<Vec<(Migration, Duration)>> {
let migrations = MigrationSource::resolve(migrations_path)
.await
.map_err(|err| anyhow!("failed to load migrations: {err:?}"))?;
let mut connection = sqlx::AnyConnection::connect(self.options.get_url()).await?;
connection.ensure_migrations_table().await?;
let applied_migrations: HashMap<_, _> = connection
.list_applied_migrations()
.await?
.into_iter()
.map(|m| (m.version, m))
.collect();
let mut new_migrations = Vec::new();
for migration in migrations {
match applied_migrations.get(&migration.version) {
Some(applied_migration) => {
if migration.checksum != applied_migration.checksum && !ignore_checksum_mismatch
{
Err(anyhow!(
"checksum mismatch for applied migration {}",
migration.description
))?;
}
}
None => {
let elapsed = connection.apply(&migration).await?;
new_migrations.push((migration, elapsed));
}
}
}
Ok(new_migrations)
}
pub async fn initialize_static_data(&mut self) -> Result<()> {
self.initialize_notification_kinds().await?;
Ok(())
}
pub async fn transaction<F, Fut, T>(&self, f: F) -> Result<T>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(result) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(result),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn optional_room_transaction<F, Fut, T>(&self, f: F) -> Result<Option<RoomGuard<T>>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<Option<(RoomId, T)>>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(Some((room_id, data))) => {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(Some(RoomGuard {
data,
_guard,
_not_send: PhantomData,
}));
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
}
Ok(None) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(None),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn room_transaction<F, Fut, T>(&self, room_id: RoomId, f: F) -> Result<RoomGuard<T>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(data) => match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(RoomGuard {
data,
_guard,
_not_send: PhantomData,
});
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn with_transaction<F, Fut, T>(&self, f: &F) -> Result<(DatabaseTransaction, Result<T>)>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<T>>,
{
let tx = self
.pool
.begin_with_config(Some(IsolationLevel::Serializable), None)
.await?;
let mut tx = Arc::new(Some(tx));
let result = f(TransactionHandle(tx.clone())).await;
let Some(tx) = Arc::get_mut(&mut tx).and_then(|tx| tx.take()) else {
return Err(anyhow!(
"couldn't complete transaction because it's still in use"
))?;
};
Ok((tx, result))
}
async fn run<F, T>(&self, future: F) -> Result<T>
where
F: Future<Output = Result<T>>,
{
#[cfg(test)]
{
if let Executor::Deterministic(executor) = &self.executor {
executor.simulate_random_delay().await;
}
self.runtime.as_ref().unwrap().block_on(future)
}
#[cfg(not(test))]
{
future.await
}
}
async fn retry_on_serialization_error(&self, error: &Error, prev_attempt_count: u32) -> bool {
// If the error is due to a failure to serialize concurrent transactions, then retry
// this transaction after a delay. With each subsequent retry, double the delay duration.
// Also vary the delay randomly in order to ensure different database connections retry
// at different times.
if is_serialization_error(error) {
let base_delay = 4_u64 << prev_attempt_count.min(16);
let randomized_delay = base_delay as f32 * self.rng.lock().await.gen_range(0.5..=2.0);
log::info!(
"retrying transaction after serialization error. delay: {} ms.",
randomized_delay
);
self.executor
.sleep(Duration::from_millis(randomized_delay as u64))
.await;
true
} else {
false
}
}
}
fn is_serialization_error(error: &Error) -> bool {
const SERIALIZATION_FAILURE_CODE: &'static str = "40001";
match error {
Error::Database(
DbErr::Exec(sea_orm::RuntimeErr::SqlxError(error))
| DbErr::Query(sea_orm::RuntimeErr::SqlxError(error)),
) if error
.as_database_error()
.and_then(|error| error.code())
.as_deref()
== Some(SERIALIZATION_FAILURE_CODE) =>
{
true
}
_ => false,
}
}
pub struct TransactionHandle(Arc<Option<DatabaseTransaction>>);
impl Deref for TransactionHandle {
type Target = DatabaseTransaction;
fn deref(&self) -> &Self::Target {
self.0.as_ref().as_ref().unwrap()
}
}
pub struct RoomGuard<T> {
data: T,
_guard: OwnedMutexGuard<()>,
_not_send: PhantomData<Rc<()>>,
}
impl<T> Deref for RoomGuard<T> {
type Target = T;
fn deref(&self) -> &T {
&self.data
}
}
impl<T> DerefMut for RoomGuard<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.data
}
}
impl<T> RoomGuard<T> {
pub fn into_inner(self) -> T {
self.data
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Contact {
Accepted { user_id: UserId, busy: bool },
Outgoing { user_id: UserId },
Incoming { user_id: UserId },
}
impl Contact {
pub fn user_id(&self) -> UserId {
match self {
Contact::Accepted { user_id, .. } => *user_id,
Contact::Outgoing { user_id } => *user_id,
Contact::Incoming { user_id, .. } => *user_id,
}
}
}
pub type NotificationBatch = Vec<(UserId, proto::Notification)>;
pub struct CreatedChannelMessage {
pub message_id: MessageId,
pub participant_connection_ids: Vec<ConnectionId>,
pub channel_members: Vec<UserId>,
pub notifications: NotificationBatch,
}
#[derive(Clone, Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)]
pub struct Invite {
pub email_address: String,
pub email_confirmation_code: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct NewSignup {
pub email_address: String,
pub platform_mac: bool,
pub platform_windows: bool,
pub platform_linux: bool,
pub editor_features: Vec<String>,
pub programming_languages: Vec<String>,
pub device_id: Option<String>,
pub added_to_mailing_list: bool,
pub created_at: Option<DateTime>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)]
pub struct WaitlistSummary {
pub count: i64,
pub linux_count: i64,
pub mac_count: i64,
pub windows_count: i64,
pub unknown_count: i64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct NewUserParams {
pub github_login: String,
pub github_user_id: i32,
}
#[derive(Debug)]
pub struct NewUserResult {
pub user_id: UserId,
pub metrics_id: String,
pub inviting_user_id: Option<UserId>,
pub signup_device_id: Option<String>,
}
#[derive(Debug)]
pub struct MoveChannelResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub moved_channels: HashSet<ChannelId>,
}
#[derive(Debug)]
pub struct RenameChannelResult {
pub channel: Channel,
pub participants_to_update: HashMap<UserId, Channel>,
}
#[derive(Debug)]
pub struct CreateChannelResult {
pub channel: Channel,
pub participants_to_update: Vec<(UserId, ChannelsForUser)>,
}
#[derive(Debug)]
pub struct SetChannelVisibilityResult {
pub participants_to_update: HashMap<UserId, ChannelsForUser>,
pub participants_to_remove: HashSet<UserId>,
pub channels_to_remove: Vec<ChannelId>,
}
#[derive(Debug)]
pub struct MembershipUpdated {
pub channel_id: ChannelId,
pub new_channels: ChannelsForUser,
pub removed_channels: Vec<ChannelId>,
}
#[derive(Debug)]
pub enum SetMemberRoleResult {
InviteUpdated(Channel),
MembershipUpdated(MembershipUpdated),
}
#[derive(Debug)]
pub struct InviteMemberResult {
pub channel: Channel,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RespondToChannelInvite {
pub membership_update: Option<MembershipUpdated>,
pub notifications: NotificationBatch,
}
#[derive(Debug)]
pub struct RemoveChannelMemberResult {
pub membership_update: MembershipUpdated,
pub notification_id: Option<NotificationId>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Channel {
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub role: ChannelRole,
pub parent_path: Vec<ChannelId>,
}
impl Channel {
fn from_model(value: channel::Model, role: ChannelRole) -> Self {
Channel {
id: value.id,
visibility: value.visibility,
name: value.clone().name,
role,
parent_path: value.ancestors().collect(),
}
}
pub fn to_proto(&self) -> proto::Channel {
proto::Channel {
id: self.id.to_proto(),
name: self.name.clone(),
visibility: self.visibility.into(),
role: self.role.into(),
parent_path: self.parent_path.iter().map(|c| c.to_proto()).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ChannelMember {
pub role: ChannelRole,
pub user_id: UserId,
pub kind: proto::channel_member::Kind,
}
impl ChannelMember {
pub fn to_proto(&self) -> proto::ChannelMember {
proto::ChannelMember {
role: self.role.into(),
user_id: self.user_id.to_proto(),
kind: self.kind.into(),
}
}
}
#[derive(Debug, PartialEq)]
pub struct ChannelsForUser {
pub channels: Vec<Channel>,
pub channel_participants: HashMap<ChannelId, Vec<UserId>>,
pub unseen_buffer_changes: Vec<proto::UnseenChannelBufferChange>,
pub channel_messages: Vec<proto::UnseenChannelMessage>,
}
#[derive(Debug)]
pub struct RejoinedChannelBuffer {
pub buffer: proto::RejoinedChannelBuffer,
pub old_connection_id: ConnectionId,
}
#[derive(Clone)]
pub struct JoinRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct RejoinedRoom {
pub room: proto::Room,
pub rejoined_projects: Vec<RejoinedProject>,
pub reshared_projects: Vec<ResharedProject>,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
}
pub struct ResharedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<proto::WorktreeMetadata>,
}
pub struct RejoinedProject {
pub id: ProjectId,
pub old_connection_id: ConnectionId,
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: Vec<RejoinedWorktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
#[derive(Debug)]
pub struct RejoinedWorktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub updated_entries: Vec<proto::Entry>,
pub removed_entries: Vec<u64>,
pub updated_repositories: Vec<proto::RepositoryEntry>,
pub removed_repositories: Vec<u64>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
pub struct LeftRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub left_projects: HashMap<ProjectId, LeftProject>,
pub canceled_calls_to_user_ids: Vec<UserId>,
pub deleted: bool,
}
pub struct RefreshedRoom {
pub room: proto::Room,
pub channel_id: Option<ChannelId>,
pub channel_members: Vec<UserId>,
pub stale_participant_user_ids: Vec<UserId>,
pub canceled_calls_to_user_ids: Vec<UserId>,
}
pub struct RefreshedChannelBuffer {
pub connection_ids: Vec<ConnectionId>,
pub collaborators: Vec<proto::Collaborator>,
}
pub struct Project {
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>,
}
pub struct ProjectCollaborator {
pub connection_id: ConnectionId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl ProjectCollaborator {
pub fn to_proto(&self) -> proto::Collaborator {
proto::Collaborator {
peer_id: Some(self.connection_id.into()),
replica_id: self.replica_id.0 as u32,
user_id: self.user_id.to_proto(),
}
}
}
#[derive(Debug)]
pub struct LeftProject {
pub id: ProjectId,
pub host_user_id: UserId,
pub host_connection_id: ConnectionId,
pub connection_ids: Vec<ConnectionId>,
}
pub struct Worktree {
pub id: u64,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
pub entries: Vec<proto::Entry>,
pub repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
pub settings_files: Vec<WorktreeSettingsFile>,
pub scan_id: u64,
pub completed_scan_id: u64,
}
#[derive(Debug)]
pub struct WorktreeSettingsFile {
pub path: String,
pub content: String,
}

View file

@ -0,0 +1,199 @@
use crate::Result;
use rpc::proto;
use sea_orm::{entity::prelude::*, DbErr};
use serde::{Deserialize, Serialize};
macro_rules! id_type {
($name:ident) => {
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
DeriveValueType,
)]
#[serde(transparent)]
pub struct $name(pub i32);
impl $name {
#[allow(unused)]
pub const MAX: Self = Self(i32::MAX);
#[allow(unused)]
pub fn from_proto(value: u64) -> Self {
Self(value as i32)
}
#[allow(unused)]
pub fn to_proto(self) -> u64 {
self.0 as u64
}
}
impl std::fmt::Display for $name {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl sea_orm::TryFromU64 for $name {
fn try_from_u64(n: u64) -> Result<Self, DbErr> {
Ok(Self(n.try_into().map_err(|_| {
DbErr::ConvertFromU64(concat!(
"error converting ",
stringify!($name),
" to u64"
))
})?))
}
}
impl sea_orm::sea_query::Nullable for $name {
fn null() -> Value {
Value::Int(None)
}
}
};
}
id_type!(BufferId);
id_type!(AccessTokenId);
id_type!(ChannelChatParticipantId);
id_type!(ChannelId);
id_type!(ChannelMemberId);
id_type!(MessageId);
id_type!(ContactId);
id_type!(FollowerId);
id_type!(RoomId);
id_type!(RoomParticipantId);
id_type!(ProjectId);
id_type!(ProjectCollaboratorId);
id_type!(ReplicaId);
id_type!(ServerId);
id_type!(SignupId);
id_type!(UserId);
id_type!(ChannelBufferCollaboratorId);
id_type!(FlagId);
id_type!(NotificationId);
id_type!(NotificationKindId);
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelRole {
#[sea_orm(string_value = "admin")]
Admin,
#[sea_orm(string_value = "member")]
#[default]
Member,
#[sea_orm(string_value = "guest")]
Guest,
#[sea_orm(string_value = "banned")]
Banned,
}
impl ChannelRole {
pub fn should_override(&self, other: Self) -> bool {
use ChannelRole::*;
match self {
Admin => matches!(other, Member | Banned | Guest),
Member => matches!(other, Banned | Guest),
Banned => matches!(other, Guest),
Guest => false,
}
}
pub fn max(&self, other: Self) -> Self {
if self.should_override(other) {
*self
} else {
other
}
}
pub fn can_see_all_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Admin | Member => true,
Guest | Banned => false,
}
}
pub fn can_only_see_public_descendants(&self) -> bool {
use ChannelRole::*;
match self {
Guest => true,
Admin | Member | Banned => false,
}
}
}
impl From<proto::ChannelRole> for ChannelRole {
fn from(value: proto::ChannelRole) -> Self {
match value {
proto::ChannelRole::Admin => ChannelRole::Admin,
proto::ChannelRole::Member => ChannelRole::Member,
proto::ChannelRole::Guest => ChannelRole::Guest,
proto::ChannelRole::Banned => ChannelRole::Banned,
}
}
}
impl Into<proto::ChannelRole> for ChannelRole {
fn into(self) -> proto::ChannelRole {
match self {
ChannelRole::Admin => proto::ChannelRole::Admin,
ChannelRole::Member => proto::ChannelRole::Member,
ChannelRole::Guest => proto::ChannelRole::Guest,
ChannelRole::Banned => proto::ChannelRole::Banned,
}
}
}
impl Into<i32> for ChannelRole {
fn into(self) -> i32 {
let proto: proto::ChannelRole = self.into();
proto.into()
}
}
#[derive(Eq, PartialEq, Copy, Clone, Debug, EnumIter, DeriveActiveEnum, Default, Hash)]
#[sea_orm(rs_type = "String", db_type = "String(None)")]
pub enum ChannelVisibility {
#[sea_orm(string_value = "public")]
Public,
#[sea_orm(string_value = "members")]
#[default]
Members,
}
impl From<proto::ChannelVisibility> for ChannelVisibility {
fn from(value: proto::ChannelVisibility) -> Self {
match value {
proto::ChannelVisibility::Public => ChannelVisibility::Public,
proto::ChannelVisibility::Members => ChannelVisibility::Members,
}
}
}
impl Into<proto::ChannelVisibility> for ChannelVisibility {
fn into(self) -> proto::ChannelVisibility {
match self {
ChannelVisibility::Public => proto::ChannelVisibility::Public,
ChannelVisibility::Members => proto::ChannelVisibility::Members,
}
}
}
impl Into<i32> for ChannelVisibility {
fn into(self) -> i32 {
let proto: proto::ChannelVisibility = self.into();
proto.into()
}
}

View file

@ -0,0 +1,12 @@
use super::*;
pub mod access_tokens;
pub mod buffers;
pub mod channels;
pub mod contacts;
pub mod messages;
pub mod notifications;
pub mod projects;
pub mod rooms;
pub mod servers;
pub mod users;

View file

@ -0,0 +1,54 @@
use super::*;
use sea_orm::sea_query::Query;
impl Database {
pub async fn create_access_token(
&self,
user_id: UserId,
access_token_hash: &str,
max_access_token_count: usize,
) -> Result<AccessTokenId> {
self.transaction(|tx| async {
let tx = tx;
let token = access_token::ActiveModel {
user_id: ActiveValue::set(user_id),
hash: ActiveValue::set(access_token_hash.into()),
..Default::default()
}
.insert(&*tx)
.await?;
access_token::Entity::delete_many()
.filter(
access_token::Column::Id.in_subquery(
Query::select()
.column(access_token::Column::Id)
.from(access_token::Entity)
.and_where(access_token::Column::UserId.eq(user_id))
.order_by(access_token::Column::Id, sea_orm::Order::Desc)
.limit(10000)
.offset(max_access_token_count as u64)
.to_owned(),
),
)
.exec(&*tx)
.await?;
Ok(token.id)
})
.await
}
pub async fn get_access_token(
&self,
access_token_id: AccessTokenId,
) -> Result<access_token::Model> {
self.transaction(|tx| async move {
Ok(access_token::Entity::find_by_id(access_token_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such access token"))?)
})
.await
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,353 @@
use super::*;
impl Database {
pub async fn get_contacts(&self, user_id: UserId) -> Result<Vec<Contact>> {
#[derive(Debug, FromQueryResult)]
struct ContactWithUserBusyStatuses {
user_id_a: UserId,
user_id_b: UserId,
a_to_b: bool,
accepted: bool,
user_a_busy: bool,
user_b_busy: bool,
}
self.transaction(|tx| async move {
let user_a_participant = Alias::new("user_a_participant");
let user_b_participant = Alias::new("user_b_participant");
let mut db_contacts = contact::Entity::find()
.column_as(
Expr::col((user_a_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_a_busy",
)
.column_as(
Expr::col((user_b_participant.clone(), room_participant::Column::Id))
.is_not_null(),
"user_b_busy",
)
.filter(
contact::Column::UserIdA
.eq(user_id)
.or(contact::Column::UserIdB.eq(user_id)),
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserARoomParticipant.def(),
user_a_participant,
)
.join_as(
JoinType::LeftJoin,
contact::Relation::UserBRoomParticipant.def(),
user_b_participant,
)
.into_model::<ContactWithUserBusyStatuses>()
.stream(&*tx)
.await?;
let mut contacts = Vec::new();
while let Some(db_contact) = db_contacts.next().await {
let db_contact = db_contact?;
if db_contact.user_id_a == user_id {
if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_b,
busy: db_contact.user_b_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_b,
})
} else {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_b,
});
}
} else if db_contact.accepted {
contacts.push(Contact::Accepted {
user_id: db_contact.user_id_a,
busy: db_contact.user_a_busy,
});
} else if db_contact.a_to_b {
contacts.push(Contact::Incoming {
user_id: db_contact.user_id_a,
});
} else {
contacts.push(Contact::Outgoing {
user_id: db_contact.user_id_a,
});
}
}
contacts.sort_unstable_by_key(|contact| contact.user_id());
Ok(contacts)
})
.await
}
pub async fn is_user_busy(&self, user_id: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let participant = room_participant::Entity::find()
.filter(room_participant::Column::UserId.eq(user_id))
.one(&*tx)
.await?;
Ok(participant.is_some())
})
.await
}
pub async fn has_contact(&self, user_id_1: UserId, user_id_2: UserId) -> Result<bool> {
self.transaction(|tx| async move {
let (id_a, id_b) = if user_id_1 < user_id_2 {
(user_id_1, user_id_2)
} else {
(user_id_2, user_id_1)
};
Ok(contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::Accepted.eq(true)),
)
.one(&*tx)
.await?
.is_some())
})
.await
}
pub async fn send_contact_request(
&self,
sender_id: UserId,
receiver_id: UserId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if sender_id < receiver_id {
(sender_id, receiver_id, true)
} else {
(receiver_id, sender_id, false)
};
let rows_affected = contact::Entity::insert(contact::ActiveModel {
user_id_a: ActiveValue::set(id_a),
user_id_b: ActiveValue::set(id_b),
a_to_b: ActiveValue::set(a_to_b),
accepted: ActiveValue::set(false),
should_notify: ActiveValue::set(true),
..Default::default()
})
.on_conflict(
OnConflict::columns([contact::Column::UserIdA, contact::Column::UserIdB])
.values([
(contact::Column::Accepted, true.into()),
(contact::Column::ShouldNotify, false.into()),
])
.action_and_where(
contact::Column::Accepted.eq(false).and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::UserIdA.eq(id_b))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::UserIdA.eq(id_a))),
),
)
.to_owned(),
)
.exec_without_returning(&*tx)
.await?;
if rows_affected == 0 {
Err(anyhow!("contact already requested"))?;
}
Ok(self
.create_notification(
receiver_id,
rpc::Notification::ContactRequest {
sender_id: sender_id.to_proto(),
},
true,
&*tx,
)
.await?
.into_iter()
.collect())
})
.await
}
/// Returns a bool indicating whether the removed contact had originally accepted or not
///
/// Deletes the contact identified by the requester and responder ids, and then returns
/// whether the deleted contact had originally accepted or was a pending contact request.
///
/// # Arguments
///
/// * `requester_id` - The user that initiates this request
/// * `responder_id` - The user that will be removed
pub async fn remove_contact(
&self,
requester_id: UserId,
responder_id: UserId,
) -> Result<(bool, Option<NotificationId>)> {
self.transaction(|tx| async move {
let (id_a, id_b) = if responder_id < requester_id {
(responder_id, requester_id)
} else {
(requester_id, responder_id)
};
let contact = contact::Entity::find()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b)),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such contact"))?;
contact::Entity::delete_by_id(contact.id).exec(&*tx).await?;
let mut deleted_notification_id = None;
if !contact.accepted {
deleted_notification_id = self
.remove_notification(
responder_id,
rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
&*tx,
)
.await?;
}
Ok((contact.accepted, deleted_notification_id))
})
.await
}
pub async fn dismiss_contact_notification(
&self,
user_id: UserId,
contact_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if user_id < contact_user_id {
(user_id, contact_user_id, true)
} else {
(contact_user_id, user_id, false)
};
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
should_notify: ActiveValue::set(false),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(
contact::Column::AToB
.eq(a_to_b)
.and(contact::Column::Accepted.eq(true))
.or(contact::Column::AToB
.ne(a_to_b)
.and(contact::Column::Accepted.eq(false))),
),
)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such contact request"))?
} else {
Ok(())
}
})
.await
}
pub async fn respond_to_contact_request(
&self,
responder_id: UserId,
requester_id: UserId,
accept: bool,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let (id_a, id_b, a_to_b) = if responder_id < requester_id {
(responder_id, requester_id, false)
} else {
(requester_id, responder_id, true)
};
let rows_affected = if accept {
let result = contact::Entity::update_many()
.set(contact::ActiveModel {
accepted: ActiveValue::set(true),
should_notify: ActiveValue::set(true),
..Default::default()
})
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b)),
)
.exec(&*tx)
.await?;
result.rows_affected
} else {
let result = contact::Entity::delete_many()
.filter(
contact::Column::UserIdA
.eq(id_a)
.and(contact::Column::UserIdB.eq(id_b))
.and(contact::Column::AToB.eq(a_to_b))
.and(contact::Column::Accepted.eq(false)),
)
.exec(&*tx)
.await?;
result.rows_affected
};
if rows_affected == 0 {
Err(anyhow!("no such contact request"))?
}
let mut notifications = Vec::new();
notifications.extend(
self.mark_notification_as_read_with_response(
responder_id,
&rpc::Notification::ContactRequest {
sender_id: requester_id.to_proto(),
},
accept,
&*tx,
)
.await?,
);
if accept {
notifications.extend(
self.create_notification(
requester_id,
rpc::Notification::ContactRequestAccepted {
responder_id: responder_id.to_proto(),
},
true,
&*tx,
)
.await?,
);
}
Ok(notifications)
})
.await
}
}

View file

@ -0,0 +1,505 @@
use super::*;
use rpc::Notification;
use sea_orm::TryInsertResult;
use time::OffsetDateTime;
impl Database {
pub async fn join_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
channel_chat_participant::ActiveModel {
id: ActiveValue::NotSet,
channel_id: ActiveValue::Set(channel_id),
user_id: ActiveValue::Set(user_id),
connection_id: ActiveValue::Set(connection_id.id as i32),
connection_server_id: ActiveValue::Set(ServerId(connection_id.owner_id as i32)),
}
.insert(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn channel_chat_connection_lost(
&self,
connection_id: ConnectionId,
tx: &DatabaseTransaction,
) -> Result<()> {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id)),
)
.exec(tx)
.await?;
Ok(())
}
pub async fn leave_channel_chat(
&self,
channel_id: ChannelId,
connection_id: ConnectionId,
_user_id: UserId,
) -> Result<()> {
self.transaction(|tx| async move {
channel_chat_participant::Entity::delete_many()
.filter(
Condition::all()
.add(
channel_chat_participant::Column::ConnectionServerId
.eq(connection_id.owner_id),
)
.add(channel_chat_participant::Column::ConnectionId.eq(connection_id.id))
.add(channel_chat_participant::Column::ChannelId.eq(channel_id)),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn get_channel_messages(
&self,
channel_id: ChannelId,
user_id: UserId,
count: usize,
before_message_id: Option<MessageId>,
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut condition =
Condition::all().add(channel_message::Column::ChannelId.eq(channel_id));
if let Some(before_message_id) = before_message_id {
condition = condition.add(channel_message::Column::Id.lt(before_message_id));
}
let rows = channel_message::Entity::find()
.filter(condition)
.order_by_desc(channel_message::Column::Id)
.limit(count as u64)
.all(&*tx)
.await?;
self.load_channel_messages(rows, &*tx).await
})
.await
}
pub async fn get_channel_messages_by_id(
&self,
user_id: UserId,
message_ids: &[MessageId],
) -> Result<Vec<proto::ChannelMessage>> {
self.transaction(|tx| async move {
let rows = channel_message::Entity::find()
.filter(channel_message::Column::Id.is_in(message_ids.iter().copied()))
.order_by_desc(channel_message::Column::Id)
.all(&*tx)
.await?;
let mut channels = HashMap::<ChannelId, channel::Model>::default();
for row in &rows {
channels.insert(
row.channel_id,
self.get_channel_internal(row.channel_id, &*tx).await?,
);
}
for (_, channel) in channels {
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
}
let messages = self.load_channel_messages(rows, &*tx).await?;
Ok(messages)
})
.await
}
async fn load_channel_messages(
&self,
rows: Vec<channel_message::Model>,
tx: &DatabaseTransaction,
) -> Result<Vec<proto::ChannelMessage>> {
let mut messages = rows
.into_iter()
.map(|row| {
let nonce = row.nonce.as_u64_pair();
proto::ChannelMessage {
id: row.id.to_proto(),
sender_id: row.sender_id.to_proto(),
body: row.body,
timestamp: row.sent_at.assume_utc().unix_timestamp() as u64,
mentions: vec![],
nonce: Some(proto::Nonce {
upper_half: nonce.0,
lower_half: nonce.1,
}),
}
})
.collect::<Vec<_>>();
messages.reverse();
let mut mentions = channel_message_mention::Entity::find()
.filter(channel_message_mention::Column::MessageId.is_in(messages.iter().map(|m| m.id)))
.order_by_asc(channel_message_mention::Column::MessageId)
.order_by_asc(channel_message_mention::Column::StartOffset)
.stream(&*tx)
.await?;
let mut message_ix = 0;
while let Some(mention) = mentions.next().await {
let mention = mention?;
let message_id = mention.message_id.to_proto();
while let Some(message) = messages.get_mut(message_ix) {
if message.id < message_id {
message_ix += 1;
} else {
if message.id == message_id {
message.mentions.push(proto::ChatMention {
range: Some(proto::Range {
start: mention.start_offset as u64,
end: mention.end_offset as u64,
}),
user_id: mention.user_id.to_proto(),
});
}
break;
}
}
}
Ok(messages)
}
pub async fn create_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
body: &str,
mentions: &[proto::ChatMention],
timestamp: OffsetDateTime,
nonce: u128,
) -> Result<CreatedChannelMessage> {
self.transaction(|tx| async move {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
self.check_user_is_channel_participant(&channel, user_id, &*tx)
.await?;
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
let mut participant_user_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_user_ids.push(row.user_id);
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let timestamp = timestamp.to_offset(time::UtcOffset::UTC);
let timestamp = time::PrimitiveDateTime::new(timestamp.date(), timestamp.time());
let result = channel_message::Entity::insert(channel_message::ActiveModel {
channel_id: ActiveValue::Set(channel_id),
sender_id: ActiveValue::Set(user_id),
body: ActiveValue::Set(body.to_string()),
sent_at: ActiveValue::Set(timestamp),
nonce: ActiveValue::Set(Uuid::from_u128(nonce)),
id: ActiveValue::NotSet,
})
.on_conflict(
OnConflict::columns([
channel_message::Column::SenderId,
channel_message::Column::Nonce,
])
.do_nothing()
.to_owned(),
)
.do_nothing()
.exec(&*tx)
.await?;
let message_id;
let mut notifications = Vec::new();
match result {
TryInsertResult::Inserted(result) => {
message_id = result.last_insert_id;
let mentioned_user_ids =
mentions.iter().map(|m| m.user_id).collect::<HashSet<_>>();
let mentions = mentions
.iter()
.filter_map(|mention| {
let range = mention.range.as_ref()?;
if !body.is_char_boundary(range.start as usize)
|| !body.is_char_boundary(range.end as usize)
{
return None;
}
Some(channel_message_mention::ActiveModel {
message_id: ActiveValue::Set(message_id),
start_offset: ActiveValue::Set(range.start as i32),
end_offset: ActiveValue::Set(range.end as i32),
user_id: ActiveValue::Set(UserId::from_proto(mention.user_id)),
})
})
.collect::<Vec<_>>();
if !mentions.is_empty() {
channel_message_mention::Entity::insert_many(mentions)
.exec(&*tx)
.await?;
}
for mentioned_user in mentioned_user_ids {
notifications.extend(
self.create_notification(
UserId::from_proto(mentioned_user),
rpc::Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: user_id.to_proto(),
channel_id: channel_id.to_proto(),
},
false,
&*tx,
)
.await?,
);
}
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
}
_ => {
message_id = channel_message::Entity::find()
.filter(channel_message::Column::Nonce.eq(Uuid::from_u128(nonce)))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("failed to insert message"))?
.id;
}
}
let mut channel_members = self.get_channel_participants(&channel, &*tx).await?;
channel_members.retain(|member| !participant_user_ids.contains(member));
Ok(CreatedChannelMessage {
message_id,
participant_connection_ids,
channel_members,
notifications,
})
})
.await
}
pub async fn observe_channel_message(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
self.observe_channel_message_internal(channel_id, user_id, message_id, &*tx)
.await?;
let mut batch = NotificationBatch::default();
batch.extend(
self.mark_notification_as_read(
user_id,
&Notification::ChannelMessageMention {
message_id: message_id.to_proto(),
sender_id: Default::default(),
channel_id: Default::default(),
},
&*tx,
)
.await?,
);
Ok(batch)
})
.await
}
async fn observe_channel_message_internal(
&self,
channel_id: ChannelId,
user_id: UserId,
message_id: MessageId,
tx: &DatabaseTransaction,
) -> Result<()> {
observed_channel_messages::Entity::insert(observed_channel_messages::ActiveModel {
user_id: ActiveValue::Set(user_id),
channel_id: ActiveValue::Set(channel_id),
channel_message_id: ActiveValue::Set(message_id),
})
.on_conflict(
OnConflict::columns([
observed_channel_messages::Column::ChannelId,
observed_channel_messages::Column::UserId,
])
.update_column(observed_channel_messages::Column::ChannelMessageId)
.action_cond_where(observed_channel_messages::Column::ChannelMessageId.lt(message_id))
.to_owned(),
)
// TODO: Try to upgrade SeaORM so we don't have to do this hack around their bug
.exec_without_returning(&*tx)
.await?;
Ok(())
}
pub async fn unseen_channel_messages(
&self,
user_id: UserId,
channel_ids: &[ChannelId],
tx: &DatabaseTransaction,
) -> Result<Vec<proto::UnseenChannelMessage>> {
let mut observed_messages_by_channel_id = HashMap::default();
let mut rows = observed_channel_messages::Entity::find()
.filter(observed_channel_messages::Column::UserId.eq(user_id))
.filter(observed_channel_messages::Column::ChannelId.is_in(channel_ids.iter().copied()))
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
observed_messages_by_channel_id.insert(row.channel_id, row);
}
drop(rows);
let mut values = String::new();
for id in channel_ids {
if !values.is_empty() {
values.push_str(", ");
}
write!(&mut values, "({})", id).unwrap();
}
if values.is_empty() {
return Ok(Default::default());
}
let sql = format!(
r#"
SELECT
*
FROM (
SELECT
*,
row_number() OVER (
PARTITION BY channel_id
ORDER BY id DESC
) as row_number
FROM channel_messages
WHERE
channel_id in ({values})
) AS messages
WHERE
row_number = 1
"#,
);
let stmt = Statement::from_string(self.pool.get_database_backend(), sql);
let last_messages = channel_message::Model::find_by_statement(stmt)
.all(&*tx)
.await?;
let mut changes = Vec::new();
for last_message in last_messages {
if let Some(observed_message) =
observed_messages_by_channel_id.get(&last_message.channel_id)
{
if observed_message.channel_message_id == last_message.id {
continue;
}
}
changes.push(proto::UnseenChannelMessage {
channel_id: last_message.channel_id.to_proto(),
message_id: last_message.id.to_proto(),
});
}
Ok(changes)
}
pub async fn remove_channel_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
user_id: UserId,
) -> Result<Vec<ConnectionId>> {
self.transaction(|tx| async move {
let mut rows = channel_chat_participant::Entity::find()
.filter(channel_chat_participant::Column::ChannelId.eq(channel_id))
.stream(&*tx)
.await?;
let mut is_participant = false;
let mut participant_connection_ids = Vec::new();
while let Some(row) = rows.next().await {
let row = row?;
if row.user_id == user_id {
is_participant = true;
}
participant_connection_ids.push(row.connection());
}
drop(rows);
if !is_participant {
Err(anyhow!("not a chat participant"))?;
}
let result = channel_message::Entity::delete_by_id(message_id)
.filter(channel_message::Column::SenderId.eq(user_id))
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
let channel = self.get_channel_internal(channel_id, &*tx).await?;
if self
.check_user_is_channel_admin(&channel, user_id, &*tx)
.await
.is_ok()
{
let result = channel_message::Entity::delete_by_id(message_id)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("no such message"))?;
}
} else {
Err(anyhow!("operation could not be completed"))?;
}
}
Ok(participant_connection_ids)
})
.await
}
}

View file

@ -0,0 +1,262 @@
use super::*;
use rpc::Notification;
impl Database {
pub async fn initialize_notification_kinds(&mut self) -> Result<()> {
notification_kind::Entity::insert_many(Notification::all_variant_names().iter().map(
|kind| notification_kind::ActiveModel {
name: ActiveValue::Set(kind.to_string()),
..Default::default()
},
))
.on_conflict(OnConflict::new().do_nothing().to_owned())
.exec_without_returning(&self.pool)
.await?;
let mut rows = notification_kind::Entity::find().stream(&self.pool).await?;
while let Some(row) = rows.next().await {
let row = row?;
self.notification_kinds_by_name.insert(row.name, row.id);
}
for name in Notification::all_variant_names() {
if let Some(id) = self.notification_kinds_by_name.get(*name).copied() {
self.notification_kinds_by_id.insert(id, name);
}
}
Ok(())
}
pub async fn get_notifications(
&self,
recipient_id: UserId,
limit: usize,
before_id: Option<NotificationId>,
) -> Result<Vec<proto::Notification>> {
self.transaction(|tx| async move {
let mut result = Vec::new();
let mut condition =
Condition::all().add(notification::Column::RecipientId.eq(recipient_id));
if let Some(before_id) = before_id {
condition = condition.add(notification::Column::Id.lt(before_id));
}
let mut rows = notification::Entity::find()
.filter(condition)
.order_by_desc(notification::Column::Id)
.limit(limit as u64)
.stream(&*tx)
.await?;
while let Some(row) = rows.next().await {
let row = row?;
let kind = row.kind;
if let Some(proto) = model_to_proto(self, row) {
result.push(proto);
} else {
log::warn!("unknown notification kind {:?}", kind);
}
}
result.reverse();
Ok(result)
})
.await
}
/// Create a notification. If `avoid_duplicates` is set to true, then avoid
/// creating a new notification if the given recipient already has an
/// unread notification with the given kind and entity id.
pub async fn create_notification(
&self,
recipient_id: UserId,
notification: Notification,
avoid_duplicates: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if avoid_duplicates {
if self
.find_notification(recipient_id, &notification, tx)
.await?
.is_some()
{
return Ok(None);
}
}
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
let model = notification::ActiveModel {
recipient_id: ActiveValue::Set(recipient_id),
kind: ActiveValue::Set(kind),
entity_id: ActiveValue::Set(proto.entity_id.map(|id| id as i32)),
content: ActiveValue::Set(proto.content.clone()),
..Default::default()
}
.save(&*tx)
.await?;
Ok(Some((
recipient_id,
proto::Notification {
id: model.id.as_ref().to_proto(),
kind: proto.kind,
timestamp: model.created_at.as_ref().assume_utc().unix_timestamp() as u64,
is_read: false,
response: None,
content: proto.content,
entity_id: proto.entity_id,
},
)))
}
/// Remove an unread notification with the given recipient, kind and
/// entity id.
pub async fn remove_notification(
&self,
recipient_id: UserId,
notification: Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let id = self
.find_notification(recipient_id, &notification, tx)
.await?;
if let Some(id) = id {
notification::Entity::delete_by_id(id).exec(tx).await?;
}
Ok(id)
}
/// Populate the response for the notification with the given kind and
/// entity id.
pub async fn mark_notification_as_read_with_response(
&self,
recipient_id: UserId,
notification: &Notification,
response: bool,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, Some(response), tx)
.await
}
pub async fn mark_notification_as_read(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
self.mark_notification_as_read_internal(recipient_id, notification, None, tx)
.await
}
pub async fn mark_notification_as_read_by_id(
&self,
recipient_id: UserId,
notification_id: NotificationId,
) -> Result<NotificationBatch> {
self.transaction(|tx| async move {
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(notification_id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
..Default::default()
})
.exec(&*tx)
.await?;
Ok(model_to_proto(self, row)
.map(|notification| (recipient_id, notification))
.into_iter()
.collect())
})
.await
}
async fn mark_notification_as_read_internal(
&self,
recipient_id: UserId,
notification: &Notification,
response: Option<bool>,
tx: &DatabaseTransaction,
) -> Result<Option<(UserId, proto::Notification)>> {
if let Some(id) = self
.find_notification(recipient_id, notification, &*tx)
.await?
{
let row = notification::Entity::update(notification::ActiveModel {
id: ActiveValue::Unchanged(id),
recipient_id: ActiveValue::Unchanged(recipient_id),
is_read: ActiveValue::Set(true),
response: if let Some(response) = response {
ActiveValue::Set(Some(response))
} else {
ActiveValue::NotSet
},
..Default::default()
})
.exec(tx)
.await?;
Ok(model_to_proto(self, row).map(|notification| (recipient_id, notification)))
} else {
Ok(None)
}
}
/// Find an unread notification by its recipient, kind and entity id.
async fn find_notification(
&self,
recipient_id: UserId,
notification: &Notification,
tx: &DatabaseTransaction,
) -> Result<Option<NotificationId>> {
let proto = notification.to_proto();
let kind = notification_kind_from_proto(self, &proto)?;
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryIds {
Id,
}
Ok(notification::Entity::find()
.select_only()
.column(notification::Column::Id)
.filter(
Condition::all()
.add(notification::Column::RecipientId.eq(recipient_id))
.add(notification::Column::IsRead.eq(false))
.add(notification::Column::Kind.eq(kind))
.add(if proto.entity_id.is_some() {
notification::Column::EntityId.eq(proto.entity_id)
} else {
notification::Column::EntityId.is_null()
}),
)
.into_values::<_, QueryIds>()
.one(&*tx)
.await?)
}
}
fn model_to_proto(this: &Database, row: notification::Model) -> Option<proto::Notification> {
let kind = this.notification_kinds_by_id.get(&row.kind)?;
Some(proto::Notification {
id: row.id.to_proto(),
kind: kind.to_string(),
timestamp: row.created_at.assume_utc().unix_timestamp() as u64,
is_read: row.is_read,
response: row.response,
content: row.content,
entity_id: row.entity_id.map(|id| id as u64),
})
}
fn notification_kind_from_proto(
this: &Database,
proto: &proto::Notification,
) -> Result<NotificationKindId> {
Ok(this
.notification_kinds_by_name
.get(&proto.kind)
.copied()
.ok_or_else(|| anyhow!("invalid notification kind {:?}", proto.kind))?)
}

View file

@ -0,0 +1,960 @@
use super::*;
impl Database {
pub async fn project_count_excluding_admins(&self) -> Result<usize> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
Count,
}
self.transaction(|tx| async move {
Ok(project::Entity::find()
.select_only()
.column_as(project::Column::Id.count(), QueryAs::Count)
.inner_join(user::Entity)
.filter(user::Column::Admin.eq(false))
.into_values::<_, QueryAs>()
.one(&*tx)
.await?
.unwrap_or(0i64) as usize)
})
.await
}
pub async fn share_project(
&self,
room_id: RoomId,
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(ProjectId, proto::Room)>> {
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
.add(
room_participant::Column::AnsweringConnectionId
.eq(connection.id as i32),
)
.add(
room_participant::Column::AnsweringConnectionServerId
.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("could not find participant"))?;
if participant.room_id != room_id {
return Err(anyhow!("shared project on unexpected room"))?;
}
let project = project::ActiveModel {
room_id: ActiveValue::set(participant.room_id),
host_user_id: ActiveValue::set(participant.user_id),
host_connection_id: ActiveValue::set(Some(connection.id as i32)),
host_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
..Default::default()
}
.insert(&*tx)
.await?;
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| {
worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
completed_scan_id: ActiveValue::set(0),
}
}))
.exec(&*tx)
.await?;
}
project_collaborator::ActiveModel {
project_id: ActiveValue::set(project.id),
connection_id: ActiveValue::set(connection.id as i32),
connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)),
user_id: ActiveValue::set(participant.user_id),
replica_id: ActiveValue::set(ReplicaId(0)),
is_host: ActiveValue::set(true),
..Default::default()
}
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((project.id, room))
})
.await
}
pub async fn unshare_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project not found"))?;
if project.host_connection()? == connection {
project::Entity::delete(project.into_active_model())
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room, guest_connection_ids))
} else {
Err(anyhow!("cannot unshare a project hosted by another user"))?
}
})
.await
}
pub async fn update_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id as i32))
.add(
project::Column::HostConnectionServerId.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
self.update_project_worktrees(project.id, worktrees, &tx)
.await?;
let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?;
let room = self.get_room(project.room_id, &tx).await?;
Ok((room, guest_connection_ids))
})
.await
}
pub(in crate::db) async fn update_project_worktrees(
&self,
project_id: ProjectId,
worktrees: &[proto::WorktreeMetadata],
tx: &DatabaseTransaction,
) -> Result<()> {
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project_id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
completed_scan_id: ActiveValue::set(0),
}))
.on_conflict(
OnConflict::columns([worktree::Column::ProjectId, worktree::Column::Id])
.update_column(worktree::Column::RootName)
.to_owned(),
)
.exec(&*tx)
.await?;
}
worktree::Entity::delete_many()
.filter(worktree::Column::ProjectId.eq(project_id).and(
worktree::Column::Id.is_not_in(worktrees.iter().map(|worktree| worktree.id as i64)),
))
.exec(&*tx)
.await?;
Ok(())
}
pub async fn update_worktree(
&self,
update: &proto::UpdateWorktree,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
// Ensure the update comes from the host.
let _project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id as i32))
.add(
project::Column::HostConnectionServerId.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
// Update metadata.
worktree::Entity::update(worktree::ActiveModel {
id: ActiveValue::set(worktree_id),
project_id: ActiveValue::set(project_id),
root_name: ActiveValue::set(update.root_name.clone()),
scan_id: ActiveValue::set(update.scan_id as i64),
completed_scan_id: if update.is_last_update {
ActiveValue::set(update.scan_id as i64)
} else {
ActiveValue::default()
},
abs_path: ActiveValue::set(update.abs_path.clone()),
..Default::default()
})
.exec(&*tx)
.await?;
if !update.updated_entries.is_empty() {
worktree_entry::Entity::insert_many(update.updated_entries.iter().map(|entry| {
let mtime = entry.mtime.clone().unwrap_or_default();
worktree_entry::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
id: ActiveValue::set(entry.id as i64),
is_dir: ActiveValue::set(entry.is_dir),
path: ActiveValue::set(entry.path.clone()),
inode: ActiveValue::set(entry.inode as i64),
mtime_seconds: ActiveValue::set(mtime.seconds as i64),
mtime_nanos: ActiveValue::set(mtime.nanos as i32),
is_symlink: ActiveValue::set(entry.is_symlink),
is_ignored: ActiveValue::set(entry.is_ignored),
is_external: ActiveValue::set(entry.is_external),
git_status: ActiveValue::set(entry.git_status.map(|status| status as i64)),
is_deleted: ActiveValue::set(false),
scan_id: ActiveValue::set(update.scan_id as i64),
}
}))
.on_conflict(
OnConflict::columns([
worktree_entry::Column::ProjectId,
worktree_entry::Column::WorktreeId,
worktree_entry::Column::Id,
])
.update_columns([
worktree_entry::Column::IsDir,
worktree_entry::Column::Path,
worktree_entry::Column::Inode,
worktree_entry::Column::MtimeSeconds,
worktree_entry::Column::MtimeNanos,
worktree_entry::Column::IsSymlink,
worktree_entry::Column::IsIgnored,
worktree_entry::Column::GitStatus,
worktree_entry::Column::ScanId,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
if !update.removed_entries.is_empty() {
worktree_entry::Entity::update_many()
.filter(
worktree_entry::Column::ProjectId
.eq(project_id)
.and(worktree_entry::Column::WorktreeId.eq(worktree_id))
.and(
worktree_entry::Column::Id
.is_in(update.removed_entries.iter().map(|id| *id as i64)),
),
)
.set(worktree_entry::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
if !update.updated_repositories.is_empty() {
worktree_repository::Entity::insert_many(update.updated_repositories.iter().map(
|repository| worktree_repository::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
work_directory_id: ActiveValue::set(repository.work_directory_id as i64),
scan_id: ActiveValue::set(update.scan_id as i64),
branch: ActiveValue::set(repository.branch.clone()),
is_deleted: ActiveValue::set(false),
},
))
.on_conflict(
OnConflict::columns([
worktree_repository::Column::ProjectId,
worktree_repository::Column::WorktreeId,
worktree_repository::Column::WorkDirectoryId,
])
.update_columns([
worktree_repository::Column::ScanId,
worktree_repository::Column::Branch,
])
.to_owned(),
)
.exec(&*tx)
.await?;
}
if !update.removed_repositories.is_empty() {
worktree_repository::Entity::update_many()
.filter(
worktree_repository::Column::ProjectId
.eq(project_id)
.and(worktree_repository::Column::WorktreeId.eq(worktree_id))
.and(
worktree_repository::Column::WorkDirectoryId
.is_in(update.removed_repositories.iter().map(|id| *id as i64)),
),
)
.set(worktree_repository::ActiveModel {
is_deleted: ActiveValue::Set(true),
scan_id: ActiveValue::Set(update.scan_id as i64),
..Default::default()
})
.exec(&*tx)
.await?;
}
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn update_diagnostic_summary(
&self,
update: &proto::UpdateDiagnosticSummary,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let summary = update
.summary
.as_ref()
.ok_or_else(|| anyhow!("invalid summary"))?;
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
// Update summary.
worktree_diagnostic_summary::Entity::insert(worktree_diagnostic_summary::ActiveModel {
project_id: ActiveValue::set(project_id),
worktree_id: ActiveValue::set(worktree_id),
path: ActiveValue::set(summary.path.clone()),
language_server_id: ActiveValue::set(summary.language_server_id as i64),
error_count: ActiveValue::set(summary.error_count as i32),
warning_count: ActiveValue::set(summary.warning_count as i32),
..Default::default()
})
.on_conflict(
OnConflict::columns([
worktree_diagnostic_summary::Column::ProjectId,
worktree_diagnostic_summary::Column::WorktreeId,
worktree_diagnostic_summary::Column::Path,
])
.update_columns([
worktree_diagnostic_summary::Column::LanguageServerId,
worktree_diagnostic_summary::Column::ErrorCount,
worktree_diagnostic_summary::Column::WarningCount,
])
.to_owned(),
)
.exec(&*tx)
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn start_language_server(
&self,
update: &proto::StartLanguageServer,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let server = update
.server
.as_ref()
.ok_or_else(|| anyhow!("invalid language server"))?;
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
// Add the newly-started language server.
language_server::Entity::insert(language_server::ActiveModel {
project_id: ActiveValue::set(project_id),
id: ActiveValue::set(server.id as i64),
name: ActiveValue::set(server.name.clone()),
..Default::default()
})
.on_conflict(
OnConflict::columns([
language_server::Column::ProjectId,
language_server::Column::Id,
])
.update_column(language_server::Column::Name)
.to_owned(),
)
.exec(&*tx)
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn update_worktree_settings(
&self,
update: &proto::UpdateWorktreeSettings,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
let project_id = ProjectId::from_proto(update.project_id);
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.host_connection()? != connection {
return Err(anyhow!("can't update a project hosted by someone else"))?;
}
if let Some(content) = &update.content {
worktree_settings_file::Entity::insert(worktree_settings_file::ActiveModel {
project_id: ActiveValue::Set(project_id),
worktree_id: ActiveValue::Set(update.worktree_id as i64),
path: ActiveValue::Set(update.path.clone()),
content: ActiveValue::Set(content.clone()),
})
.on_conflict(
OnConflict::columns([
worktree_settings_file::Column::ProjectId,
worktree_settings_file::Column::WorktreeId,
worktree_settings_file::Column::Path,
])
.update_column(worktree_settings_file::Column::Content)
.to_owned(),
)
.exec(&*tx)
.await?;
} else {
worktree_settings_file::Entity::delete(worktree_settings_file::ActiveModel {
project_id: ActiveValue::Set(project_id),
worktree_id: ActiveValue::Set(update.worktree_id as i64),
path: ActiveValue::Set(update.path.clone()),
..Default::default()
})
.exec(&*tx)
.await?;
}
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok(connection_ids)
})
.await
}
pub async fn join_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(Project, ReplicaId)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
.add(
room_participant::Column::AnsweringConnectionId
.eq(connection.id as i32),
)
.add(
room_participant::Column::AnsweringConnectionServerId
.eq(connection.owner_id as i32),
),
)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("must join a room first"))?;
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
if project.room_id != participant.room_id {
return Err(anyhow!("no such project"))?;
}
let mut collaborators = project
.find_related(project_collaborator::Entity)
.all(&*tx)
.await?;
let replica_ids = collaborators
.iter()
.map(|c| c.replica_id)
.collect::<HashSet<_>>();
let mut replica_id = ReplicaId(1);
while replica_ids.contains(&replica_id) {
replica_id.0 += 1;
}
let new_collaborator = project_collaborator::ActiveModel {
project_id: ActiveValue::set(project_id),
connection_id: ActiveValue::set(connection.id as i32),
connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)),
user_id: ActiveValue::set(participant.user_id),
replica_id: ActiveValue::set(replica_id),
is_host: ActiveValue::set(false),
..Default::default()
}
.insert(&*tx)
.await?;
collaborators.push(new_collaborator);
let db_worktrees = project.find_related(worktree::Entity).all(&*tx).await?;
let mut worktrees = db_worktrees
.into_iter()
.map(|db_worktree| {
(
db_worktree.id as u64,
Worktree {
id: db_worktree.id as u64,
abs_path: db_worktree.abs_path,
root_name: db_worktree.root_name,
visible: db_worktree.visible,
entries: Default::default(),
repository_entries: Default::default(),
diagnostic_summaries: Default::default(),
settings_files: Default::default(),
scan_id: db_worktree.scan_id as u64,
completed_scan_id: db_worktree.completed_scan_id as u64,
},
)
})
.collect::<BTreeMap<_, _>>();
// Populate worktree entries.
{
let mut db_entries = worktree_entry::Entity::find()
.filter(
Condition::all()
.add(worktree_entry::Column::ProjectId.eq(project_id))
.add(worktree_entry::Column::IsDeleted.eq(false)),
)
.stream(&*tx)
.await?;
while let Some(db_entry) = db_entries.next().await {
let db_entry = db_entry?;
if let Some(worktree) = worktrees.get_mut(&(db_entry.worktree_id as u64)) {
worktree.entries.push(proto::Entry {
id: db_entry.id as u64,
is_dir: db_entry.is_dir,
path: db_entry.path,
inode: db_entry.inode as u64,
mtime: Some(proto::Timestamp {
seconds: db_entry.mtime_seconds as u64,
nanos: db_entry.mtime_nanos as u32,
}),
is_symlink: db_entry.is_symlink,
is_ignored: db_entry.is_ignored,
is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
});
}
}
}
// Populate repository entries.
{
let mut db_repository_entries = worktree_repository::Entity::find()
.filter(
Condition::all()
.add(worktree_repository::Column::ProjectId.eq(project_id))
.add(worktree_repository::Column::IsDeleted.eq(false)),
)
.stream(&*tx)
.await?;
while let Some(db_repository_entry) = db_repository_entries.next().await {
let db_repository_entry = db_repository_entry?;
if let Some(worktree) =
worktrees.get_mut(&(db_repository_entry.worktree_id as u64))
{
worktree.repository_entries.insert(
db_repository_entry.work_directory_id as u64,
proto::RepositoryEntry {
work_directory_id: db_repository_entry.work_directory_id as u64,
branch: db_repository_entry.branch,
},
);
}
}
}
// Populate worktree diagnostic summaries.
{
let mut db_summaries = worktree_diagnostic_summary::Entity::find()
.filter(worktree_diagnostic_summary::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
while let Some(db_summary) = db_summaries.next().await {
let db_summary = db_summary?;
if let Some(worktree) = worktrees.get_mut(&(db_summary.worktree_id as u64)) {
worktree
.diagnostic_summaries
.push(proto::DiagnosticSummary {
path: db_summary.path,
language_server_id: db_summary.language_server_id as u64,
error_count: db_summary.error_count as u32,
warning_count: db_summary.warning_count as u32,
});
}
}
}
// Populate worktree settings files
{
let mut db_settings_files = worktree_settings_file::Entity::find()
.filter(worktree_settings_file::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
while let Some(db_settings_file) = db_settings_files.next().await {
let db_settings_file = db_settings_file?;
if let Some(worktree) =
worktrees.get_mut(&(db_settings_file.worktree_id as u64))
{
worktree.settings_files.push(WorktreeSettingsFile {
path: db_settings_file.path,
content: db_settings_file.content,
});
}
}
}
// Populate language servers.
let language_servers = project
.find_related(language_server::Entity)
.all(&*tx)
.await?;
let project = Project {
collaborators: collaborators
.into_iter()
.map(|collaborator| ProjectCollaborator {
connection_id: collaborator.connection(),
user_id: collaborator.user_id,
replica_id: collaborator.replica_id,
is_host: collaborator.is_host,
})
.collect(),
worktrees,
language_servers: language_servers
.into_iter()
.map(|language_server| proto::LanguageServer {
id: language_server.id as u64,
name: language_server.name,
})
.collect(),
};
Ok((project, replica_id as ReplicaId))
})
.await
}
pub async fn leave_project(
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(proto::Room, LeftProject)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let result = project_collaborator::Entity::delete_many()
.filter(
Condition::all()
.add(project_collaborator::Column::ProjectId.eq(project_id))
.add(project_collaborator::Column::ConnectionId.eq(connection.id as i32))
.add(
project_collaborator::Column::ConnectionServerId
.eq(connection.owner_id as i32),
),
)
.exec(&*tx)
.await?;
if result.rows_affected == 0 {
Err(anyhow!("not a collaborator on this project"))?;
}
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let collaborators = project
.find_related(project_collaborator::Entity)
.all(&*tx)
.await?;
let connection_ids = collaborators
.into_iter()
.map(|collaborator| collaborator.connection())
.collect();
follower::Entity::delete_many()
.filter(
Condition::any()
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(Some(project_id)))
.add(
follower::Column::LeaderConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(connection.id)),
)
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(Some(project_id)))
.add(
follower::Column::FollowerConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(connection.id)),
),
)
.exec(&*tx)
.await?;
let room = self.get_room(project.room_id, &tx).await?;
let left_project = LeftProject {
id: project_id,
host_user_id: project.host_user_id,
host_connection_id: project.host_connection()?,
connection_ids,
};
Ok((room, left_project))
})
.await
}
pub async fn project_collaborators(
&self,
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<Vec<ProjectCollaborator>>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.all(&*tx)
.await?
.into_iter()
.map(|collaborator| ProjectCollaborator {
connection_id: collaborator.connection(),
user_id: collaborator.user_id,
replica_id: collaborator.replica_id,
is_host: collaborator.is_host,
})
.collect::<Vec<_>>();
if collaborators
.iter()
.any(|collaborator| collaborator.connection_id == connection_id)
{
Ok(collaborators)
} else {
Err(anyhow!("no such project"))?
}
})
.await
}
pub async fn project_connection_ids(
&self,
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<HashSet<ConnectionId>>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let mut collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.stream(&*tx)
.await?;
let mut connection_ids = HashSet::default();
while let Some(collaborator) = collaborators.next().await {
let collaborator = collaborator?;
connection_ids.insert(collaborator.connection());
}
if connection_ids.contains(&connection_id) {
Ok(connection_ids)
} else {
Err(anyhow!("no such project"))?
}
})
.await
}
async fn project_guest_connection_ids(
&self,
project_id: ProjectId,
tx: &DatabaseTransaction,
) -> Result<Vec<ConnectionId>> {
let mut collaborators = project_collaborator::Entity::find()
.filter(
project_collaborator::Column::ProjectId
.eq(project_id)
.and(project_collaborator::Column::IsHost.eq(false)),
)
.stream(tx)
.await?;
let mut guest_connection_ids = Vec::new();
while let Some(collaborator) = collaborators.next().await {
let collaborator = collaborator?;
guest_connection_ids.push(collaborator.connection());
}
Ok(guest_connection_ids)
}
pub async fn room_id_for_project(&self, project_id: ProjectId) -> Result<RoomId> {
self.transaction(|tx| async move {
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project {} not found", project_id))?;
Ok(project.room_id)
})
.await
}
pub async fn check_room_participants(
&self,
room_id: RoomId,
leader_id: ConnectionId,
follower_id: ConnectionId,
) -> Result<()> {
self.transaction(|tx| async move {
use room_participant::Column;
let count = room_participant::Entity::find()
.filter(
Condition::all().add(Column::RoomId.eq(room_id)).add(
Condition::any()
.add(Column::AnsweringConnectionId.eq(leader_id.id as i32).and(
Column::AnsweringConnectionServerId.eq(leader_id.owner_id as i32),
))
.add(Column::AnsweringConnectionId.eq(follower_id.id as i32).and(
Column::AnsweringConnectionServerId.eq(follower_id.owner_id as i32),
)),
),
)
.count(&*tx)
.await?;
if count < 2 {
Err(anyhow!("not room participants"))?;
}
Ok(())
})
.await
}
pub async fn follow(
&self,
room_id: RoomId,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(room_id, |tx| async move {
follower::ActiveModel {
room_id: ActiveValue::set(room_id),
project_id: ActiveValue::set(project_id),
leader_connection_server_id: ActiveValue::set(ServerId(
leader_connection.owner_id as i32,
)),
leader_connection_id: ActiveValue::set(leader_connection.id as i32),
follower_connection_server_id: ActiveValue::set(ServerId(
follower_connection.owner_id as i32,
)),
follower_connection_id: ActiveValue::set(follower_connection.id as i32),
..Default::default()
}
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
pub async fn unfollow(
&self,
room_id: RoomId,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(room_id, |tx| async move {
follower::Entity::delete_many()
.filter(
Condition::all()
.add(follower::Column::RoomId.eq(room_id))
.add(follower::Column::ProjectId.eq(project_id))
.add(
follower::Column::LeaderConnectionServerId
.eq(leader_connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(leader_connection.id))
.add(
follower::Column::FollowerConnectionServerId
.eq(follower_connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(follower_connection.id)),
)
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,99 @@
use super::*;
impl Database {
pub async fn create_server(&self, environment: &str) -> Result<ServerId> {
self.transaction(|tx| async move {
let server = server::ActiveModel {
environment: ActiveValue::set(environment.into()),
..Default::default()
}
.insert(&*tx)
.await?;
Ok(server.id)
})
.await
}
pub async fn stale_server_resource_ids(
&self,
environment: &str,
new_server_id: ServerId,
) -> Result<(Vec<RoomId>, Vec<ChannelId>)> {
self.transaction(|tx| async move {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryRoomIds {
RoomId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryChannelIds {
ChannelId,
}
let stale_server_epochs = self
.stale_server_ids(environment, new_server_id, &tx)
.await?;
let room_ids = room_participant::Entity::find()
.select_only()
.column(room_participant::Column::RoomId)
.distinct()
.filter(
room_participant::Column::AnsweringConnectionServerId
.is_in(stale_server_epochs.iter().copied()),
)
.into_values::<_, QueryRoomIds>()
.all(&*tx)
.await?;
let channel_ids = channel_buffer_collaborator::Entity::find()
.select_only()
.column(channel_buffer_collaborator::Column::ChannelId)
.distinct()
.filter(
channel_buffer_collaborator::Column::ConnectionServerId
.is_in(stale_server_epochs.iter().copied()),
)
.into_values::<_, QueryChannelIds>()
.all(&*tx)
.await?;
Ok((room_ids, channel_ids))
})
.await
}
pub async fn delete_stale_servers(
&self,
environment: &str,
new_server_id: ServerId,
) -> Result<()> {
self.transaction(|tx| async move {
server::Entity::delete_many()
.filter(
Condition::all()
.add(server::Column::Environment.eq(environment))
.add(server::Column::Id.ne(new_server_id)),
)
.exec(&*tx)
.await?;
Ok(())
})
.await
}
async fn stale_server_ids(
&self,
environment: &str,
new_server_id: ServerId,
tx: &DatabaseTransaction,
) -> Result<Vec<ServerId>> {
let stale_servers = server::Entity::find()
.filter(
Condition::all()
.add(server::Column::Environment.eq(environment))
.add(server::Column::Id.ne(new_server_id)),
)
.all(&*tx)
.await?;
Ok(stale_servers.into_iter().map(|server| server.id).collect())
}
}

View file

@ -0,0 +1,259 @@
use super::*;
impl Database {
pub async fn create_user(
&self,
email_address: &str,
admin: bool,
params: NewUserParams,
) -> Result<NewUserResult> {
self.transaction(|tx| async {
let tx = tx;
let user = user::Entity::insert(user::ActiveModel {
email_address: ActiveValue::set(Some(email_address.into())),
github_login: ActiveValue::set(params.github_login.clone()),
github_user_id: ActiveValue::set(Some(params.github_user_id)),
admin: ActiveValue::set(admin),
metrics_id: ActiveValue::set(Uuid::new_v4()),
..Default::default()
})
.on_conflict(
OnConflict::column(user::Column::GithubLogin)
.update_column(user::Column::GithubLogin)
.to_owned(),
)
.exec_with_returning(&*tx)
.await?;
Ok(NewUserResult {
user_id: user.id,
metrics_id: user.metrics_id.to_string(),
signup_device_id: None,
inviting_user_id: None,
})
})
.await
}
pub async fn get_user_by_id(&self, id: UserId) -> Result<Option<user::Model>> {
self.transaction(|tx| async move { Ok(user::Entity::find_by_id(id).one(&*tx).await?) })
.await
}
pub async fn get_users_by_ids(&self, ids: Vec<UserId>) -> Result<Vec<user::Model>> {
self.transaction(|tx| async {
let tx = tx;
Ok(user::Entity::find()
.filter(user::Column::Id.is_in(ids.iter().copied()))
.all(&*tx)
.await?)
})
.await
}
pub async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
self.transaction(|tx| async move {
Ok(user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(&*tx)
.await?)
})
.await
}
pub async fn get_or_create_user_by_github_account(
&self,
github_login: &str,
github_user_id: Option<i32>,
github_email: Option<&str>,
) -> Result<Option<User>> {
self.transaction(|tx| async move {
let tx = &*tx;
if let Some(github_user_id) = github_user_id {
if let Some(user_by_github_user_id) = user::Entity::find()
.filter(user::Column::GithubUserId.eq(github_user_id))
.one(tx)
.await?
{
let mut user_by_github_user_id = user_by_github_user_id.into_active_model();
user_by_github_user_id.github_login = ActiveValue::set(github_login.into());
Ok(Some(user_by_github_user_id.update(tx).await?))
} else if let Some(user_by_github_login) = user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(tx)
.await?
{
let mut user_by_github_login = user_by_github_login.into_active_model();
user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id));
Ok(Some(user_by_github_login.update(tx).await?))
} else {
let user = user::Entity::insert(user::ActiveModel {
email_address: ActiveValue::set(github_email.map(|email| email.into())),
github_login: ActiveValue::set(github_login.into()),
github_user_id: ActiveValue::set(Some(github_user_id)),
admin: ActiveValue::set(false),
invite_count: ActiveValue::set(0),
invite_code: ActiveValue::set(None),
metrics_id: ActiveValue::set(Uuid::new_v4()),
..Default::default()
})
.exec_with_returning(&*tx)
.await?;
Ok(Some(user))
}
} else {
Ok(user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(tx)
.await?)
}
})
.await
}
pub async fn get_all_users(&self, page: u32, limit: u32) -> Result<Vec<User>> {
self.transaction(|tx| async move {
Ok(user::Entity::find()
.order_by_asc(user::Column::GithubLogin)
.limit(limit as u64)
.offset(page as u64 * limit as u64)
.all(&*tx)
.await?)
})
.await
}
pub async fn get_user_metrics_id(&self, id: UserId) -> Result<String> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
MetricsId,
}
self.transaction(|tx| async move {
let metrics_id: Uuid = user::Entity::find_by_id(id)
.select_only()
.column(user::Column::MetricsId)
.into_values::<_, QueryAs>()
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("could not find user"))?;
Ok(metrics_id.to_string())
})
.await
}
pub async fn set_user_connected_once(&self, id: UserId, connected_once: bool) -> Result<()> {
self.transaction(|tx| async move {
user::Entity::update_many()
.filter(user::Column::Id.eq(id))
.set(user::ActiveModel {
connected_once: ActiveValue::set(connected_once),
..Default::default()
})
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn destroy_user(&self, id: UserId) -> Result<()> {
self.transaction(|tx| async move {
access_token::Entity::delete_many()
.filter(access_token::Column::UserId.eq(id))
.exec(&*tx)
.await?;
user::Entity::delete_by_id(id).exec(&*tx).await?;
Ok(())
})
.await
}
pub async fn fuzzy_search_users(&self, name_query: &str, limit: u32) -> Result<Vec<User>> {
self.transaction(|tx| async {
let tx = tx;
let like_string = Self::fuzzy_like_string(name_query);
let query = "
SELECT users.*
FROM users
WHERE github_login ILIKE $1
ORDER BY github_login <-> $2
LIMIT $3
";
Ok(user::Entity::find()
.from_raw_sql(Statement::from_sql_and_values(
self.pool.get_database_backend(),
query,
vec![like_string.into(), name_query.into(), limit.into()],
))
.all(&*tx)
.await?)
})
.await
}
pub fn fuzzy_like_string(string: &str) -> String {
let mut result = String::with_capacity(string.len() * 2 + 1);
for c in string.chars() {
if c.is_alphanumeric() {
result.push('%');
result.push(c);
}
}
result.push('%');
result
}
pub async fn create_user_flag(&self, flag: &str) -> Result<FlagId> {
self.transaction(|tx| async move {
let flag = feature_flag::Entity::insert(feature_flag::ActiveModel {
flag: ActiveValue::set(flag.to_string()),
..Default::default()
})
.exec(&*tx)
.await?
.last_insert_id;
Ok(flag)
})
.await
}
pub async fn add_user_flag(&self, user: UserId, flag: FlagId) -> Result<()> {
self.transaction(|tx| async move {
user_feature::Entity::insert(user_feature::ActiveModel {
user_id: ActiveValue::set(user),
feature_id: ActiveValue::set(flag),
})
.exec(&*tx)
.await?;
Ok(())
})
.await
}
pub async fn get_user_flags(&self, user: UserId) -> Result<Vec<String>> {
self.transaction(|tx| async move {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
Flag,
}
let flags = user::Model {
id: user,
..Default::default()
}
.find_linked(user::UserFlags)
.select_only()
.column(feature_flag::Column::Flag)
.into_values::<_, QueryAs>()
.all(&*tx)
.await?;
Ok(flags)
})
.await
}
}

View file

@ -0,0 +1,32 @@
pub mod access_token;
pub mod buffer;
pub mod buffer_operation;
pub mod buffer_snapshot;
pub mod channel;
pub mod channel_buffer_collaborator;
pub mod channel_chat_participant;
pub mod channel_member;
pub mod channel_message;
pub mod channel_message_mention;
pub mod contact;
pub mod feature_flag;
pub mod follower;
pub mod language_server;
pub mod notification;
pub mod notification_kind;
pub mod observed_buffer_edits;
pub mod observed_channel_messages;
pub mod project;
pub mod project_collaborator;
pub mod room;
pub mod room_participant;
pub mod server;
pub mod signup;
pub mod user;
pub mod user_feature;
pub mod worktree;
pub mod worktree_diagnostic_summary;
pub mod worktree_entry;
pub mod worktree_repository;
pub mod worktree_repository_statuses;
pub mod worktree_settings_file;

View file

@ -0,0 +1,29 @@
use crate::db::{AccessTokenId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "access_tokens")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: AccessTokenId,
pub user_id: UserId,
pub hash: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,45 @@
use crate::db::{BufferId, ChannelId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: BufferId,
pub epoch: i32,
pub channel_id: ChannelId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::buffer_operation::Entity")]
Operations,
#[sea_orm(has_many = "super::buffer_snapshot::Entity")]
Snapshots,
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::buffer_operation::Entity> for Entity {
fn to() -> RelationDef {
Relation::Operations.def()
}
}
impl Related<super::buffer_snapshot::Entity> for Entity {
fn to() -> RelationDef {
Relation::Snapshots.def()
}
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,34 @@
use crate::db::BufferId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffer_operations")]
pub struct Model {
#[sea_orm(primary_key)]
pub buffer_id: BufferId,
#[sea_orm(primary_key)]
pub epoch: i32,
#[sea_orm(primary_key)]
pub lamport_timestamp: i32,
#[sea_orm(primary_key)]
pub replica_id: i32,
pub value: Vec<u8>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,31 @@
use crate::db::BufferId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "buffer_snapshots")]
pub struct Model {
#[sea_orm(primary_key)]
pub buffer_id: BufferId,
#[sea_orm(primary_key)]
pub epoch: i32,
pub text: String,
pub operation_serialization_version: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,79 @@
use crate::db::{ChannelId, ChannelVisibility};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channels")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelId,
pub name: String,
pub visibility: ChannelVisibility,
pub parent_path: String,
}
impl Model {
pub fn parent_id(&self) -> Option<ChannelId> {
self.ancestors().last()
}
pub fn ancestors(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.parent_path
.trim_end_matches('/')
.split('/')
.filter_map(|id| Some(ChannelId::from_proto(id.parse().ok()?)))
}
pub fn ancestors_including_self(&self) -> impl Iterator<Item = ChannelId> + '_ {
self.ancestors().chain(Some(self.id))
}
pub fn path(&self) -> String {
format!("{}{}/", self.parent_path, self.id)
}
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_one = "super::room::Entity")]
Room,
#[sea_orm(has_one = "super::buffer::Entity")]
Buffer,
#[sea_orm(has_many = "super::channel_member::Entity")]
Member,
#[sea_orm(has_many = "super::channel_buffer_collaborator::Entity")]
BufferCollaborators,
#[sea_orm(has_many = "super::channel_chat_participant::Entity")]
ChatParticipants,
}
impl Related<super::channel_member::Entity> for Entity {
fn to() -> RelationDef {
Relation::Member.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl Related<super::channel_buffer_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::BufferCollaborators.def()
}
}
impl Related<super::channel_chat_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::ChatParticipants.def()
}
}

View file

@ -0,0 +1,43 @@
use crate::db::{ChannelBufferCollaboratorId, ChannelId, ReplicaId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_buffer_collaborators")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelBufferCollaboratorId,
pub channel_id: ChannelId,
pub connection_id: i32,
pub connection_server_id: ServerId,
pub connection_lost: bool,
pub user_id: UserId,
pub replica_id: ReplicaId,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,41 @@
use crate::db::{ChannelChatParticipantId, ChannelId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_chat_participants")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelChatParticipantId,
pub channel_id: ChannelId,
pub user_id: UserId,
pub connection_id: i32,
pub connection_server_id: ServerId,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,59 @@
use crate::db::{channel_member, ChannelId, ChannelMemberId, ChannelRole, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_members")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ChannelMemberId,
pub channel_id: ChannelId,
pub user_id: UserId,
pub accepted: bool,
pub role: ChannelRole,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
#[derive(Debug)]
pub struct UserToChannel;
impl Linked for UserToChannel {
type FromEntity = super::user::Entity;
type ToEntity = super::channel::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
channel_member::Relation::User.def().rev(),
channel_member::Relation::Channel.def(),
]
}
}

View file

@ -0,0 +1,45 @@
use crate::db::{ChannelId, MessageId, UserId};
use sea_orm::entity::prelude::*;
use time::PrimitiveDateTime;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_messages")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: MessageId,
pub channel_id: ChannelId,
pub sender_id: UserId,
pub body: String,
pub sent_at: PrimitiveDateTime,
pub nonce: Uuid,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::SenderId",
to = "super::user::Column::Id"
)]
Sender,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::Sender.def()
}
}

View file

@ -0,0 +1,43 @@
use crate::db::{MessageId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "channel_message_mentions")]
pub struct Model {
#[sea_orm(primary_key)]
pub message_id: MessageId,
#[sea_orm(primary_key)]
pub start_offset: i32,
pub end_offset: i32,
pub user_id: UserId,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel_message::Entity",
from = "Column::MessageId",
to = "super::channel_message::Column::Id"
)]
Message,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
MentionedUser,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Message.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::MentionedUser.def()
}
}

View file

@ -0,0 +1,32 @@
use crate::db::{ContactId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "contacts")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ContactId,
pub user_id_a: UserId,
pub user_id_b: UserId,
pub a_to_b: bool,
pub should_notify: bool,
pub accepted: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdA",
to = "super::room_participant::Column::UserId"
)]
UserARoomParticipant,
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdB",
to = "super::room_participant::Column::UserId"
)]
UserBRoomParticipant,
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,40 @@
use sea_orm::entity::prelude::*;
use crate::db::FlagId;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "feature_flags")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: FlagId,
pub flag: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::user_feature::Entity")]
UserFeature,
}
impl Related<super::user_feature::Entity> for Entity {
fn to() -> RelationDef {
Relation::UserFeature.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
pub struct FlaggedUsers;
impl Linked for FlaggedUsers {
type FromEntity = Entity;
type ToEntity = super::user::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
super::user_feature::Relation::Flag.def().rev(),
super::user_feature::Relation::User.def(),
]
}
}

View file

@ -0,0 +1,50 @@
use crate::db::{FollowerId, ProjectId, RoomId, ServerId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "followers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: FollowerId,
pub room_id: RoomId,
pub project_id: ProjectId,
pub leader_connection_server_id: ServerId,
pub leader_connection_id: i32,
pub follower_connection_server_id: ServerId,
pub follower_connection_id: i32,
}
impl Model {
pub fn leader_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.leader_connection_server_id.0 as u32,
id: self.leader_connection_id as u32,
}
}
pub fn follower_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.follower_connection_server_id.0 as u32,
id: self.follower_connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,30 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "language_servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub id: i64,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,29 @@
use crate::db::{NotificationId, NotificationKindId, UserId};
use sea_orm::entity::prelude::*;
use time::PrimitiveDateTime;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notifications")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: NotificationId,
pub created_at: PrimitiveDateTime,
pub recipient_id: UserId,
pub kind: NotificationKindId,
pub entity_id: Option<i32>,
pub content: String,
pub is_read: bool,
pub response: Option<bool>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::RecipientId",
to = "super::user::Column::Id"
)]
Recipient,
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,15 @@
use crate::db::NotificationKindId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "notification_kinds")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: NotificationKindId,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,43 @@
use crate::db::{BufferId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "observed_buffer_edits")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
pub buffer_id: BufferId,
pub epoch: i32,
pub lamport_timestamp: i32,
pub replica_id: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::buffer::Entity",
from = "Column::BufferId",
to = "super::buffer::Column::Id"
)]
Buffer,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::buffer::Entity> for Entity {
fn to() -> RelationDef {
Relation::Buffer.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,41 @@
use crate::db::{ChannelId, MessageId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "observed_channel_messages")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
pub channel_id: ChannelId,
pub channel_message_id: MessageId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,84 @@
use crate::db::{ProjectId, Result, RoomId, ServerId, UserId};
use anyhow::anyhow;
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "projects")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectId,
pub room_id: RoomId,
pub host_user_id: UserId,
pub host_connection_id: Option<i32>,
pub host_connection_server_id: Option<ServerId>,
}
impl Model {
pub fn host_connection(&self) -> Result<ConnectionId> {
let host_connection_server_id = self
.host_connection_server_id
.ok_or_else(|| anyhow!("empty host_connection_server_id"))?;
let host_connection_id = self
.host_connection_id
.ok_or_else(|| anyhow!("empty host_connection_id"))?;
Ok(ConnectionId {
owner_id: host_connection_server_id.0 as u32,
id: host_connection_id as u32,
})
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::HostUserId",
to = "super::user::Column::Id"
)]
HostUser,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
#[sea_orm(has_many = "super::worktree::Entity")]
Worktrees,
#[sea_orm(has_many = "super::project_collaborator::Entity")]
Collaborators,
#[sea_orm(has_many = "super::language_server::Entity")]
LanguageServers,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostUser.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl Related<super::worktree::Entity> for Entity {
fn to() -> RelationDef {
Relation::Worktrees.def()
}
}
impl Related<super::project_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::Collaborators.def()
}
}
impl Related<super::language_server::Entity> for Entity {
fn to() -> RelationDef {
Relation::LanguageServers.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,43 @@
use crate::db::{ProjectCollaboratorId, ProjectId, ReplicaId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "project_collaborators")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectCollaboratorId,
pub project_id: ProjectId,
pub connection_id: i32,
pub connection_server_id: ServerId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,54 @@
use crate::db::{ChannelId, RoomId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Default, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "rooms")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomId,
pub live_kit_room: String,
pub channel_id: Option<ChannelId>,
pub enviroment: Option<String>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
Project,
#[sea_orm(has_many = "super::follower::Entity")]
Follower,
#[sea_orm(
belongs_to = "super::channel::Entity",
from = "Column::ChannelId",
to = "super::channel::Column::Id"
)]
Channel,
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl Related<super::follower::Entity> for Entity {
fn to() -> RelationDef {
Relation::Follower.def()
}
}
impl Related<super::channel::Entity> for Entity {
fn to() -> RelationDef {
Relation::Channel.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,61 @@
use crate::db::{ProjectId, RoomId, RoomParticipantId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "room_participants")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomParticipantId,
pub room_id: RoomId,
pub user_id: UserId,
pub answering_connection_id: Option<i32>,
pub answering_connection_server_id: Option<ServerId>,
pub answering_connection_lost: bool,
pub location_kind: Option<i32>,
pub location_project_id: Option<ProjectId>,
pub initial_project_id: Option<ProjectId>,
pub calling_user_id: UserId,
pub calling_connection_id: i32,
pub calling_connection_server_id: Option<ServerId>,
pub participant_index: Option<i32>,
}
impl Model {
pub fn answering_connection(&self) -> Option<ConnectionId> {
Some(ConnectionId {
owner_id: self.answering_connection_server_id?.0 as u32,
id: self.answering_connection_id? as u32,
})
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,15 @@
use crate::db::ServerId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ServerId,
pub environment: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,28 @@
use crate::db::{SignupId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "signups")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: SignupId,
pub email_address: String,
pub email_confirmation_code: String,
pub email_confirmation_sent: bool,
pub created_at: DateTime,
pub device_id: Option<String>,
pub user_id: Option<UserId>,
pub inviting_user_id: Option<UserId>,
pub platform_mac: bool,
pub platform_linux: bool,
pub platform_windows: bool,
pub platform_unknown: bool,
pub editor_features: Option<Vec<String>>,
pub programming_languages: Option<Vec<String>>,
pub added_to_mailing_list: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,80 @@
use crate::db::UserId;
use sea_orm::entity::prelude::*;
use serde::Serialize;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel, Serialize)]
#[sea_orm(table_name = "users")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: UserId,
pub github_login: String,
pub github_user_id: Option<i32>,
pub email_address: Option<String>,
pub admin: bool,
pub invite_code: Option<String>,
pub invite_count: i32,
pub inviter_id: Option<UserId>,
pub connected_once: bool,
pub metrics_id: Uuid,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::access_token::Entity")]
AccessToken,
#[sea_orm(has_one = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
HostedProjects,
#[sea_orm(has_many = "super::channel_member::Entity")]
ChannelMemberships,
#[sea_orm(has_many = "super::user_feature::Entity")]
UserFeatures,
}
impl Related<super::access_token::Entity> for Entity {
fn to() -> RelationDef {
Relation::AccessToken.def()
}
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostedProjects.def()
}
}
impl Related<super::channel_member::Entity> for Entity {
fn to() -> RelationDef {
Relation::ChannelMemberships.def()
}
}
impl Related<super::user_feature::Entity> for Entity {
fn to() -> RelationDef {
Relation::UserFeatures.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
pub struct UserFlags;
impl Linked for UserFlags {
type FromEntity = Entity;
type ToEntity = super::feature_flag::Entity;
fn link(&self) -> Vec<RelationDef> {
vec![
super::user_feature::Relation::User.def().rev(),
super::user_feature::Relation::Flag.def(),
]
}
}

View file

@ -0,0 +1,42 @@
use sea_orm::entity::prelude::*;
use crate::db::{FlagId, UserId};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "user_features")]
pub struct Model {
#[sea_orm(primary_key)]
pub user_id: UserId,
#[sea_orm(primary_key)]
pub feature_id: FlagId,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::feature_flag::Entity",
from = "Column::FeatureId",
to = "super::feature_flag::Column::Id"
)]
Flag,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::feature_flag::Entity> for Entity {
fn to() -> RelationDef {
Relation::Flag.def()
}
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,36 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktrees")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i64,
#[sea_orm(primary_key)]
pub project_id: ProjectId,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
/// The last scan for which we've observed entries. It may be in progress.
pub scan_id: i64,
/// The last scan that fully completed.
pub completed_scan_id: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,21 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_diagnostic_summaries")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub path: String,
pub language_server_id: i64,
pub error_count: i32,
pub warning_count: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,29 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_entries")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub id: i64,
pub is_dir: bool,
pub path: String,
pub inode: i64,
pub mtime_seconds: i64,
pub mtime_nanos: i32,
pub git_status: Option<i64>,
pub is_symlink: bool,
pub is_ignored: bool,
pub is_external: bool,
pub is_deleted: bool,
pub scan_id: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,21 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_repositories")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub work_directory_id: i64,
pub scan_id: i64,
pub branch: Option<String>,
pub is_deleted: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,23 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_repository_statuses")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub work_directory_id: i64,
#[sea_orm(primary_key)]
pub repo_path: String,
pub status: i64,
pub scan_id: i64,
pub is_deleted: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,19 @@
use crate::db::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_settings_files")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub path: String,
pub content: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -0,0 +1,187 @@
mod buffer_tests;
mod channel_tests;
mod db_tests;
mod feature_flag_tests;
mod message_tests;
use super::*;
use gpui::BackgroundExecutor;
use parking_lot::Mutex;
use sea_orm::ConnectionTrait;
use sqlx::migrate::MigrateDatabase;
use std::sync::{
atomic::{AtomicI32, AtomicU32, Ordering::SeqCst},
Arc,
};
const TEST_RELEASE_CHANNEL: &'static str = "test";
pub struct TestDb {
pub db: Option<Arc<Database>>,
pub connection: Option<sqlx::AnyConnection>,
}
impl TestDb {
pub fn sqlite(background: BackgroundExecutor) -> Self {
let url = format!("sqlite::memory:");
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
.unwrap();
let mut db = runtime.block_on(async {
let mut options = ConnectOptions::new(url);
options.max_connections(5);
let mut db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let sql = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/migrations.sqlite/20221109000000_test_schema.sql"
));
db.pool
.execute(sea_orm::Statement::from_string(
db.pool.get_database_backend(),
sql,
))
.await
.unwrap();
db.initialize_notification_kinds().await.unwrap();
db
});
db.runtime = Some(runtime);
Self {
db: Some(Arc::new(db)),
connection: None,
}
}
pub fn postgres(background: BackgroundExecutor) -> Self {
static LOCK: Mutex<()> = Mutex::new(());
let _guard = LOCK.lock();
let mut rng = StdRng::from_entropy();
let url = format!(
"postgres://postgres@localhost/zed-test-{}",
rng.gen::<u128>()
);
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
.unwrap();
let mut db = runtime.block_on(async {
sqlx::Postgres::create_database(&url)
.await
.expect("failed to create test db");
let mut options = ConnectOptions::new(url);
options
.max_connections(5)
.idle_timeout(Duration::from_secs(0));
let mut db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations");
db.migrate(Path::new(migrations_path), false).await.unwrap();
db.initialize_notification_kinds().await.unwrap();
db
});
db.runtime = Some(runtime);
Self {
db: Some(Arc::new(db)),
connection: None,
}
}
pub fn db(&self) -> &Arc<Database> {
self.db.as_ref().unwrap()
}
}
#[macro_export]
macro_rules! test_both_dbs {
($test_name:ident, $postgres_test_name:ident, $sqlite_test_name:ident) => {
#[gpui::test]
async fn $postgres_test_name(cx: &mut gpui::TestAppContext) {
let test_db = crate::db::TestDb::postgres(cx.executor().clone());
$test_name(test_db.db()).await;
}
#[gpui::test]
async fn $sqlite_test_name(cx: &mut gpui::TestAppContext) {
let test_db = crate::db::TestDb::sqlite(cx.executor().clone());
$test_name(test_db.db()).await;
}
};
}
impl Drop for TestDb {
fn drop(&mut self) {
let db = self.db.take().unwrap();
if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() {
db.runtime.as_ref().unwrap().block_on(async {
use util::ResultExt;
let query = "
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE
pg_stat_activity.datname = current_database() AND
pid <> pg_backend_pid();
";
db.pool
.execute(sea_orm::Statement::from_string(
db.pool.get_database_backend(),
query,
))
.await
.log_err();
sqlx::Postgres::drop_database(db.options.get_url())
.await
.log_err();
})
}
}
}
fn channel_tree(channels: &[(ChannelId, &[ChannelId], &'static str, ChannelRole)]) -> Vec<Channel> {
channels
.iter()
.map(|(id, parent_path, name, role)| Channel {
id: *id,
name: name.to_string(),
visibility: ChannelVisibility::Members,
role: *role,
parent_path: parent_path.to_vec(),
})
.collect()
}
static GITHUB_USER_ID: AtomicI32 = AtomicI32::new(5);
async fn new_test_user(db: &Arc<Database>, email: &str) -> UserId {
db.create_user(
email,
false,
NewUserParams {
github_login: email[0..email.find("@").unwrap()].to_string(),
github_user_id: GITHUB_USER_ID.fetch_add(1, SeqCst),
},
)
.await
.unwrap()
.user_id
}
static TEST_CONNECTION_ID: AtomicU32 = AtomicU32::new(1);
fn new_test_connection(server: ServerId) -> ConnectionId {
ConnectionId {
id: TEST_CONNECTION_ID.fetch_add(1, SeqCst),
owner_id: server.0 as u32,
}
}

View file

@ -0,0 +1,506 @@
use super::*;
use crate::test_both_dbs;
use language::proto::{self, serialize_version};
use text::Buffer;
test_both_dbs!(
test_channel_buffers,
test_channel_buffers_postgres,
test_channel_buffers_sqlite
);
async fn test_channel_buffers(db: &Arc<Database>) {
let a_id = db
.create_user(
"user_a@example.com",
false,
NewUserParams {
github_login: "user_a".into(),
github_user_id: 101,
},
)
.await
.unwrap()
.user_id;
let b_id = db
.create_user(
"user_b@example.com",
false,
NewUserParams {
github_login: "user_b".into(),
github_user_id: 102,
},
)
.await
.unwrap()
.user_id;
// This user will not be a part of the channel
let c_id = db
.create_user(
"user_c@example.com",
false,
NewUserParams {
github_login: "user_c".into(),
github_user_id: 102,
},
)
.await
.unwrap()
.user_id;
let owner_id = db.create_server("production").await.unwrap().0 as u32;
let zed_id = db.create_root_channel("zed", a_id).await.unwrap();
db.invite_channel_member(zed_id, b_id, a_id, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(zed_id, b_id, true)
.await
.unwrap();
let connection_id_a = ConnectionId { owner_id, id: 1 };
let _ = db
.join_channel_buffer(zed_id, a_id, connection_id_a)
.await
.unwrap();
let mut buffer_a = Buffer::new(0, 0, "".to_string());
let mut operations = Vec::new();
operations.push(buffer_a.edit([(0..0, "hello world")]));
operations.push(buffer_a.edit([(5..5, ", cruel")]));
operations.push(buffer_a.edit([(0..5, "goodbye")]));
operations.push(buffer_a.undo().unwrap().1);
assert_eq!(buffer_a.text(), "hello, cruel world");
let operations = operations
.into_iter()
.map(|op| proto::serialize_operation(&language::Operation::Buffer(op)))
.collect::<Vec<_>>();
db.update_channel_buffer(zed_id, a_id, &operations)
.await
.unwrap();
let connection_id_b = ConnectionId { owner_id, id: 2 };
let buffer_response_b = db
.join_channel_buffer(zed_id, b_id, connection_id_b)
.await
.unwrap();
let mut buffer_b = Buffer::new(0, 0, buffer_response_b.base_text);
buffer_b
.apply_ops(buffer_response_b.operations.into_iter().map(|operation| {
let operation = proto::deserialize_operation(operation).unwrap();
if let language::Operation::Buffer(operation) = operation {
operation
} else {
unreachable!()
}
}))
.unwrap();
assert_eq!(buffer_b.text(), "hello, cruel world");
// Ensure that C fails to open the buffer
assert!(db
.join_channel_buffer(zed_id, c_id, ConnectionId { owner_id, id: 3 })
.await
.is_err());
// Ensure that both collaborators have shown up
assert_eq!(
buffer_response_b.collaborators,
&[
rpc::proto::Collaborator {
user_id: a_id.to_proto(),
peer_id: Some(rpc::proto::PeerId { id: 1, owner_id }),
replica_id: 0,
},
rpc::proto::Collaborator {
user_id: b_id.to_proto(),
peer_id: Some(rpc::proto::PeerId { id: 2, owner_id }),
replica_id: 1,
}
]
);
// Ensure that get_channel_buffer_collaborators works
let zed_collaborats = db.get_channel_buffer_collaborators(zed_id).await.unwrap();
assert_eq!(zed_collaborats, &[a_id, b_id]);
let left_buffer = db
.leave_channel_buffer(zed_id, connection_id_b)
.await
.unwrap();
assert_eq!(left_buffer.connections, &[connection_id_a],);
let cargo_id = db.create_root_channel("cargo", a_id).await.unwrap();
let _ = db
.join_channel_buffer(cargo_id, a_id, connection_id_a)
.await
.unwrap();
db.leave_channel_buffers(connection_id_a).await.unwrap();
let zed_collaborators = db.get_channel_buffer_collaborators(zed_id).await.unwrap();
let cargo_collaborators = db.get_channel_buffer_collaborators(cargo_id).await.unwrap();
assert_eq!(zed_collaborators, &[]);
assert_eq!(cargo_collaborators, &[]);
// When everyone has left the channel, the operations are collapsed into
// a new base text.
let buffer_response_b = db
.join_channel_buffer(zed_id, b_id, connection_id_b)
.await
.unwrap();
assert_eq!(buffer_response_b.base_text, "hello, cruel world");
assert_eq!(buffer_response_b.operations, &[]);
}
test_both_dbs!(
test_channel_buffers_last_operations,
test_channel_buffers_last_operations_postgres,
test_channel_buffers_last_operations_sqlite
);
async fn test_channel_buffers_last_operations(db: &Database) {
let user_id = db
.create_user(
"user_a@example.com",
false,
NewUserParams {
github_login: "user_a".into(),
github_user_id: 101,
},
)
.await
.unwrap()
.user_id;
let observer_id = db
.create_user(
"user_b@example.com",
false,
NewUserParams {
github_login: "user_b".into(),
github_user_id: 102,
},
)
.await
.unwrap()
.user_id;
let owner_id = db.create_server("production").await.unwrap().0 as u32;
let connection_id = ConnectionId {
owner_id,
id: user_id.0 as u32,
};
let mut buffers = Vec::new();
let mut text_buffers = Vec::new();
for i in 0..3 {
let channel = db
.create_root_channel(&format!("channel-{i}"), user_id)
.await
.unwrap();
db.invite_channel_member(channel, observer_id, user_id, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(channel, observer_id, true)
.await
.unwrap();
db.join_channel_buffer(channel, user_id, connection_id)
.await
.unwrap();
buffers.push(
db.transaction(|tx| async move { db.get_channel_buffer(channel, &*tx).await })
.await
.unwrap(),
);
text_buffers.push(Buffer::new(0, 0, "".to_string()));
}
let operations = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.get_latest_operations_for_buffers([buffers[0].id, buffers[2].id], &*tx)
.await
}
})
.await
.unwrap();
assert!(operations.is_empty());
update_buffer(
buffers[0].channel_id,
user_id,
db,
vec![
text_buffers[0].edit([(0..0, "a")]),
text_buffers[0].edit([(0..0, "b")]),
text_buffers[0].edit([(0..0, "c")]),
],
)
.await;
update_buffer(
buffers[1].channel_id,
user_id,
db,
vec![
text_buffers[1].edit([(0..0, "d")]),
text_buffers[1].edit([(1..1, "e")]),
text_buffers[1].edit([(2..2, "f")]),
],
)
.await;
// cause buffer 1's epoch to increment.
db.leave_channel_buffer(buffers[1].channel_id, connection_id)
.await
.unwrap();
db.join_channel_buffer(buffers[1].channel_id, user_id, connection_id)
.await
.unwrap();
text_buffers[1] = Buffer::new(1, 0, "def".to_string());
update_buffer(
buffers[1].channel_id,
user_id,
db,
vec![
text_buffers[1].edit([(0..0, "g")]),
text_buffers[1].edit([(0..0, "h")]),
],
)
.await;
update_buffer(
buffers[2].channel_id,
user_id,
db,
vec![text_buffers[2].edit([(0..0, "i")])],
)
.await;
let operations = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.get_latest_operations_for_buffers([buffers[1].id, buffers[2].id], &*tx)
.await
}
})
.await
.unwrap();
assert_operations(
&operations,
&[
(buffers[1].id, 1, &text_buffers[1]),
(buffers[2].id, 0, &text_buffers[2]),
],
);
let operations = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.get_latest_operations_for_buffers([buffers[0].id, buffers[1].id], &*tx)
.await
}
})
.await
.unwrap();
assert_operations(
&operations,
&[
(buffers[0].id, 0, &text_buffers[0]),
(buffers[1].id, 1, &text_buffers[1]),
],
);
let buffer_changes = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.unseen_channel_buffer_changes(
observer_id,
&[
buffers[0].channel_id,
buffers[1].channel_id,
buffers[2].channel_id,
],
&*tx,
)
.await
}
})
.await
.unwrap();
pretty_assertions::assert_eq!(
buffer_changes,
[
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[0].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[0].version()),
},
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[1].channel_id.to_proto(),
epoch: 1,
version: serialize_version(&text_buffers[1].version())
.into_iter()
.filter(|vector| vector.replica_id
== buffer_changes[1].version.first().unwrap().replica_id)
.collect::<Vec<_>>(),
},
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[2].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[2].version()),
},
]
);
db.observe_buffer_version(
buffers[1].id,
observer_id,
1,
serialize_version(&text_buffers[1].version()).as_slice(),
)
.await
.unwrap();
let buffer_changes = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.unseen_channel_buffer_changes(
observer_id,
&[
buffers[0].channel_id,
buffers[1].channel_id,
buffers[2].channel_id,
],
&*tx,
)
.await
}
})
.await
.unwrap();
assert_eq!(
buffer_changes,
[
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[0].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[0].version()),
},
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[2].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[2].version()),
},
]
);
// Observe an earlier version of the buffer.
db.observe_buffer_version(
buffers[1].id,
observer_id,
1,
&[rpc::proto::VectorClockEntry {
replica_id: 0,
timestamp: 0,
}],
)
.await
.unwrap();
let buffer_changes = db
.transaction(|tx| {
let buffers = &buffers;
async move {
db.unseen_channel_buffer_changes(
observer_id,
&[
buffers[0].channel_id,
buffers[1].channel_id,
buffers[2].channel_id,
],
&*tx,
)
.await
}
})
.await
.unwrap();
assert_eq!(
buffer_changes,
[
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[0].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[0].version()),
},
rpc::proto::UnseenChannelBufferChange {
channel_id: buffers[2].channel_id.to_proto(),
epoch: 0,
version: serialize_version(&text_buffers[2].version()),
},
]
);
}
async fn update_buffer(
channel_id: ChannelId,
user_id: UserId,
db: &Database,
operations: Vec<text::Operation>,
) {
let operations = operations
.into_iter()
.map(|op| proto::serialize_operation(&language::Operation::Buffer(op)))
.collect::<Vec<_>>();
db.update_channel_buffer(channel_id, user_id, &operations)
.await
.unwrap();
}
fn assert_operations(
operations: &[buffer_operation::Model],
expected: &[(BufferId, i32, &text::Buffer)],
) {
let actual = operations
.iter()
.map(|op| buffer_operation::Model {
buffer_id: op.buffer_id,
epoch: op.epoch,
lamport_timestamp: op.lamport_timestamp,
replica_id: op.replica_id,
value: vec![],
})
.collect::<Vec<_>>();
let expected = expected
.iter()
.map(|(buffer_id, epoch, buffer)| buffer_operation::Model {
buffer_id: *buffer_id,
epoch: *epoch,
lamport_timestamp: buffer.lamport_clock.value as i32 - 1,
replica_id: buffer.replica_id() as i32,
value: vec![],
})
.collect::<Vec<_>>();
assert_eq!(actual, expected, "unexpected operations")
}

View file

@ -0,0 +1,819 @@
use crate::{
db::{
tests::{channel_tree, new_test_connection, new_test_user, TEST_RELEASE_CHANNEL},
Channel, ChannelId, ChannelRole, Database, NewUserParams, RoomId,
},
test_both_dbs,
};
use rpc::{
proto::{self},
ConnectionId,
};
use std::sync::Arc;
test_both_dbs!(test_channels, test_channels_postgres, test_channels_sqlite);
async fn test_channels(db: &Arc<Database>) {
let a_id = new_test_user(db, "user1@example.com").await;
let b_id = new_test_user(db, "user2@example.com").await;
let zed_id = db.create_root_channel("zed", a_id).await.unwrap();
// Make sure that people cannot read channels they haven't been invited to
assert!(db.get_channel(zed_id, b_id).await.is_err());
db.invite_channel_member(zed_id, b_id, a_id, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(zed_id, b_id, true)
.await
.unwrap();
let crdb_id = db.create_sub_channel("crdb", zed_id, a_id).await.unwrap();
let livestreaming_id = db
.create_sub_channel("livestreaming", zed_id, a_id)
.await
.unwrap();
let replace_id = db
.create_sub_channel("replace", zed_id, a_id)
.await
.unwrap();
let mut members = db
.transaction(|tx| async move {
let channel = db.get_channel_internal(replace_id, &*tx).await?;
Ok(db.get_channel_participants(&channel, &*tx).await?)
})
.await
.unwrap();
members.sort();
assert_eq!(members, &[a_id, b_id]);
let rust_id = db.create_root_channel("rust", a_id).await.unwrap();
let cargo_id = db.create_sub_channel("cargo", rust_id, a_id).await.unwrap();
let cargo_ra_id = db
.create_sub_channel("cargo-ra", cargo_id, a_id)
.await
.unwrap();
let result = db.get_channels_for_user(a_id).await.unwrap();
assert_eq!(
result.channels,
channel_tree(&[
(zed_id, &[], "zed", ChannelRole::Admin),
(crdb_id, &[zed_id], "crdb", ChannelRole::Admin),
(
livestreaming_id,
&[zed_id],
"livestreaming",
ChannelRole::Admin
),
(replace_id, &[zed_id], "replace", ChannelRole::Admin),
(rust_id, &[], "rust", ChannelRole::Admin),
(cargo_id, &[rust_id], "cargo", ChannelRole::Admin),
(
cargo_ra_id,
&[rust_id, cargo_id],
"cargo-ra",
ChannelRole::Admin
)
],)
);
let result = db.get_channels_for_user(b_id).await.unwrap();
assert_eq!(
result.channels,
channel_tree(&[
(zed_id, &[], "zed", ChannelRole::Member),
(crdb_id, &[zed_id], "crdb", ChannelRole::Member),
(
livestreaming_id,
&[zed_id],
"livestreaming",
ChannelRole::Member
),
(replace_id, &[zed_id], "replace", ChannelRole::Member)
],)
);
// Update member permissions
let set_subchannel_admin = db
.set_channel_member_role(crdb_id, a_id, b_id, ChannelRole::Admin)
.await;
assert!(set_subchannel_admin.is_err());
let set_channel_admin = db
.set_channel_member_role(zed_id, a_id, b_id, ChannelRole::Admin)
.await;
assert!(set_channel_admin.is_ok());
let result = db.get_channels_for_user(b_id).await.unwrap();
assert_eq!(
result.channels,
channel_tree(&[
(zed_id, &[], "zed", ChannelRole::Admin),
(crdb_id, &[zed_id], "crdb", ChannelRole::Admin),
(
livestreaming_id,
&[zed_id],
"livestreaming",
ChannelRole::Admin
),
(replace_id, &[zed_id], "replace", ChannelRole::Admin)
],)
);
// Remove a single channel
db.delete_channel(crdb_id, a_id).await.unwrap();
assert!(db.get_channel(crdb_id, a_id).await.is_err());
// Remove a channel tree
let (mut channel_ids, user_ids) = db.delete_channel(rust_id, a_id).await.unwrap();
channel_ids.sort();
assert_eq!(channel_ids, &[rust_id, cargo_id, cargo_ra_id]);
assert_eq!(user_ids, &[a_id]);
assert!(db.get_channel(rust_id, a_id).await.is_err());
assert!(db.get_channel(cargo_id, a_id).await.is_err());
assert!(db.get_channel(cargo_ra_id, a_id).await.is_err());
}
test_both_dbs!(
test_joining_channels,
test_joining_channels_postgres,
test_joining_channels_sqlite
);
async fn test_joining_channels(db: &Arc<Database>) {
let owner_id = db.create_server("test").await.unwrap().0 as u32;
let user_1 = new_test_user(db, "user1@example.com").await;
let user_2 = new_test_user(db, "user2@example.com").await;
let channel_1 = db.create_root_channel("channel_1", user_1).await.unwrap();
// can join a room with membership to its channel
let (joined_room, _, _) = db
.join_channel(
channel_1,
user_1,
ConnectionId { owner_id, id: 1 },
TEST_RELEASE_CHANNEL,
)
.await
.unwrap();
assert_eq!(joined_room.room.participants.len(), 1);
let room_id = RoomId::from_proto(joined_room.room.id);
drop(joined_room);
// cannot join a room without membership to its channel
assert!(db
.join_room(
room_id,
user_2,
ConnectionId { owner_id, id: 1 },
TEST_RELEASE_CHANNEL
)
.await
.is_err());
}
test_both_dbs!(
test_channel_invites,
test_channel_invites_postgres,
test_channel_invites_sqlite
);
async fn test_channel_invites(db: &Arc<Database>) {
db.create_server("test").await.unwrap();
let user_1 = new_test_user(db, "user1@example.com").await;
let user_2 = new_test_user(db, "user2@example.com").await;
let user_3 = new_test_user(db, "user3@example.com").await;
let channel_1_1 = db.create_root_channel("channel_1", user_1).await.unwrap();
let channel_1_2 = db.create_root_channel("channel_2", user_1).await.unwrap();
db.invite_channel_member(channel_1_1, user_2, user_1, ChannelRole::Member)
.await
.unwrap();
db.invite_channel_member(channel_1_2, user_2, user_1, ChannelRole::Member)
.await
.unwrap();
db.invite_channel_member(channel_1_1, user_3, user_1, ChannelRole::Admin)
.await
.unwrap();
let user_2_invites = db
.get_channel_invites_for_user(user_2) // -> [channel_1_1, channel_1_2]
.await
.unwrap()
.into_iter()
.map(|channel| channel.id)
.collect::<Vec<_>>();
assert_eq!(user_2_invites, &[channel_1_1, channel_1_2]);
let user_3_invites = db
.get_channel_invites_for_user(user_3) // -> [channel_1_1]
.await
.unwrap()
.into_iter()
.map(|channel| channel.id)
.collect::<Vec<_>>();
assert_eq!(user_3_invites, &[channel_1_1]);
let mut members = db
.get_channel_participant_details(channel_1_1, user_1)
.await
.unwrap();
members.sort_by_key(|member| member.user_id);
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: user_1.to_proto(),
kind: proto::channel_member::Kind::Member.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: user_2.to_proto(),
kind: proto::channel_member::Kind::Invitee.into(),
role: proto::ChannelRole::Member.into(),
},
proto::ChannelMember {
user_id: user_3.to_proto(),
kind: proto::channel_member::Kind::Invitee.into(),
role: proto::ChannelRole::Admin.into(),
},
]
);
db.respond_to_channel_invite(channel_1_1, user_2, true)
.await
.unwrap();
let channel_1_3 = db
.create_sub_channel("channel_3", channel_1_1, user_1)
.await
.unwrap();
let members = db
.get_channel_participant_details(channel_1_3, user_1)
.await
.unwrap();
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: user_1.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: user_2.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Member.into(),
},
]
);
}
test_both_dbs!(
test_channel_renames,
test_channel_renames_postgres,
test_channel_renames_sqlite
);
async fn test_channel_renames(db: &Arc<Database>) {
db.create_server("test").await.unwrap();
let user_1 = db
.create_user(
"user1@example.com",
false,
NewUserParams {
github_login: "user1".into(),
github_user_id: 5,
},
)
.await
.unwrap()
.user_id;
let user_2 = db
.create_user(
"user2@example.com",
false,
NewUserParams {
github_login: "user2".into(),
github_user_id: 6,
},
)
.await
.unwrap()
.user_id;
let zed_id = db.create_root_channel("zed", user_1).await.unwrap();
db.rename_channel(zed_id, user_1, "#zed-archive")
.await
.unwrap();
let channel = db.get_channel(zed_id, user_1).await.unwrap();
assert_eq!(channel.name, "zed-archive");
let non_permissioned_rename = db.rename_channel(zed_id, user_2, "hacked-lol").await;
assert!(non_permissioned_rename.is_err());
let bad_name_rename = db.rename_channel(zed_id, user_1, "#").await;
assert!(bad_name_rename.is_err())
}
test_both_dbs!(
test_db_channel_moving,
test_channels_moving_postgres,
test_channels_moving_sqlite
);
async fn test_db_channel_moving(db: &Arc<Database>) {
let a_id = db
.create_user(
"user1@example.com",
false,
NewUserParams {
github_login: "user1".into(),
github_user_id: 5,
},
)
.await
.unwrap()
.user_id;
let zed_id = db.create_root_channel("zed", a_id).await.unwrap();
let crdb_id = db.create_sub_channel("crdb", zed_id, a_id).await.unwrap();
let gpui2_id = db.create_sub_channel("gpui2", zed_id, a_id).await.unwrap();
let livestreaming_id = db
.create_sub_channel("livestreaming", crdb_id, a_id)
.await
.unwrap();
let livestreaming_dag_id = db
.create_sub_channel("livestreaming_dag", livestreaming_id, a_id)
.await
.unwrap();
// ========================================================================
// sanity check
// Initial DAG:
// /- gpui2
// zed -- crdb - livestreaming - livestreaming_dag
let result = db.get_channels_for_user(a_id).await.unwrap();
assert_channel_tree(
result.channels,
&[
(zed_id, &[]),
(crdb_id, &[zed_id]),
(livestreaming_id, &[zed_id, crdb_id]),
(livestreaming_dag_id, &[zed_id, crdb_id, livestreaming_id]),
(gpui2_id, &[zed_id]),
],
);
}
test_both_dbs!(
test_db_channel_moving_bugs,
test_db_channel_moving_bugs_postgres,
test_db_channel_moving_bugs_sqlite
);
async fn test_db_channel_moving_bugs(db: &Arc<Database>) {
let user_id = db
.create_user(
"user1@example.com",
false,
NewUserParams {
github_login: "user1".into(),
github_user_id: 5,
},
)
.await
.unwrap()
.user_id;
let zed_id = db.create_root_channel("zed", user_id).await.unwrap();
let projects_id = db
.create_sub_channel("projects", zed_id, user_id)
.await
.unwrap();
let livestreaming_id = db
.create_sub_channel("livestreaming", projects_id, user_id)
.await
.unwrap();
// Dag is: zed - projects - livestreaming
// Move to same parent should be a no-op
assert!(db
.move_channel(projects_id, Some(zed_id), user_id)
.await
.unwrap()
.is_none());
let result = db.get_channels_for_user(user_id).await.unwrap();
assert_channel_tree(
result.channels,
&[
(zed_id, &[]),
(projects_id, &[zed_id]),
(livestreaming_id, &[zed_id, projects_id]),
],
);
// Move the project channel to the root
db.move_channel(projects_id, None, user_id).await.unwrap();
let result = db.get_channels_for_user(user_id).await.unwrap();
assert_channel_tree(
result.channels,
&[
(zed_id, &[]),
(projects_id, &[]),
(livestreaming_id, &[projects_id]),
],
);
}
test_both_dbs!(
test_user_is_channel_participant,
test_user_is_channel_participant_postgres,
test_user_is_channel_participant_sqlite
);
async fn test_user_is_channel_participant(db: &Arc<Database>) {
let admin = new_test_user(db, "admin@example.com").await;
let member = new_test_user(db, "member@example.com").await;
let guest = new_test_user(db, "guest@example.com").await;
let zed_channel = db.create_root_channel("zed", admin).await.unwrap();
let active_channel_id = db
.create_sub_channel("active", zed_channel, admin)
.await
.unwrap();
let vim_channel_id = db
.create_sub_channel("vim", active_channel_id, admin)
.await
.unwrap();
db.set_channel_visibility(vim_channel_id, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
db.invite_channel_member(active_channel_id, member, admin, ChannelRole::Member)
.await
.unwrap();
db.invite_channel_member(vim_channel_id, guest, admin, ChannelRole::Guest)
.await
.unwrap();
db.respond_to_channel_invite(active_channel_id, member, true)
.await
.unwrap();
db.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(vim_channel_id, &*tx).await?,
admin,
&*tx,
)
.await
})
.await
.unwrap();
db.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(vim_channel_id, &*tx).await?,
member,
&*tx,
)
.await
})
.await
.unwrap();
let mut members = db
.get_channel_participant_details(vim_channel_id, admin)
.await
.unwrap();
members.sort_by_key(|member| member.user_id);
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: admin.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: member.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Member.into(),
},
proto::ChannelMember {
user_id: guest.to_proto(),
kind: proto::channel_member::Kind::Invitee.into(),
role: proto::ChannelRole::Guest.into(),
},
]
);
db.respond_to_channel_invite(vim_channel_id, guest, true)
.await
.unwrap();
db.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(vim_channel_id, &*tx).await?,
guest,
&*tx,
)
.await
})
.await
.unwrap();
let channels = db.get_channels_for_user(guest).await.unwrap().channels;
assert_channel_tree(channels, &[(vim_channel_id, &[])]);
let channels = db.get_channels_for_user(member).await.unwrap().channels;
assert_channel_tree(
channels,
&[
(active_channel_id, &[]),
(vim_channel_id, &[active_channel_id]),
],
);
db.set_channel_member_role(vim_channel_id, admin, guest, ChannelRole::Banned)
.await
.unwrap();
assert!(db
.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(vim_channel_id, &*tx).await.unwrap(),
guest,
&*tx,
)
.await
})
.await
.is_err());
let mut members = db
.get_channel_participant_details(vim_channel_id, admin)
.await
.unwrap();
members.sort_by_key(|member| member.user_id);
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: admin.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: member.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Member.into(),
},
proto::ChannelMember {
user_id: guest.to_proto(),
kind: proto::channel_member::Kind::Member.into(),
role: proto::ChannelRole::Banned.into(),
},
]
);
db.remove_channel_member(vim_channel_id, guest, admin)
.await
.unwrap();
db.set_channel_visibility(zed_channel, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
db.invite_channel_member(zed_channel, guest, admin, ChannelRole::Guest)
.await
.unwrap();
// currently people invited to parent channels are not shown here
let mut members = db
.get_channel_participant_details(vim_channel_id, admin)
.await
.unwrap();
members.sort_by_key(|member| member.user_id);
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: admin.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: member.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Member.into(),
},
]
);
db.respond_to_channel_invite(zed_channel, guest, true)
.await
.unwrap();
db.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(zed_channel, &*tx).await.unwrap(),
guest,
&*tx,
)
.await
})
.await
.unwrap();
assert!(db
.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(active_channel_id, &*tx)
.await
.unwrap(),
guest,
&*tx,
)
.await
})
.await
.is_err(),);
db.transaction(|tx| async move {
db.check_user_is_channel_participant(
&db.get_channel_internal(vim_channel_id, &*tx).await.unwrap(),
guest,
&*tx,
)
.await
})
.await
.unwrap();
let mut members = db
.get_channel_participant_details(vim_channel_id, admin)
.await
.unwrap();
members.sort_by_key(|member| member.user_id);
assert_eq!(
members,
&[
proto::ChannelMember {
user_id: admin.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Admin.into(),
},
proto::ChannelMember {
user_id: member.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Member.into(),
},
proto::ChannelMember {
user_id: guest.to_proto(),
kind: proto::channel_member::Kind::AncestorMember.into(),
role: proto::ChannelRole::Guest.into(),
},
]
);
let channels = db.get_channels_for_user(guest).await.unwrap().channels;
assert_channel_tree(
channels,
&[(zed_channel, &[]), (vim_channel_id, &[zed_channel])],
)
}
test_both_dbs!(
test_user_joins_correct_channel,
test_user_joins_correct_channel_postgres,
test_user_joins_correct_channel_sqlite
);
async fn test_user_joins_correct_channel(db: &Arc<Database>) {
let admin = new_test_user(db, "admin@example.com").await;
let zed_channel = db.create_root_channel("zed", admin).await.unwrap();
let active_channel = db
.create_sub_channel("active", zed_channel, admin)
.await
.unwrap();
let vim_channel = db
.create_sub_channel("vim", active_channel, admin)
.await
.unwrap();
let vim2_channel = db
.create_sub_channel("vim2", vim_channel, admin)
.await
.unwrap();
db.set_channel_visibility(zed_channel, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
db.set_channel_visibility(vim_channel, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
db.set_channel_visibility(vim2_channel, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
let most_public = db
.transaction(|tx| async move {
Ok(db
.public_ancestors_including_self(
&db.get_channel_internal(vim_channel, &*tx).await.unwrap(),
&tx,
)
.await?
.first()
.cloned())
})
.await
.unwrap()
.unwrap()
.id;
assert_eq!(most_public, zed_channel)
}
test_both_dbs!(
test_guest_access,
test_guest_access_postgres,
test_guest_access_sqlite
);
async fn test_guest_access(db: &Arc<Database>) {
let server = db.create_server("test").await.unwrap();
let admin = new_test_user(db, "admin@example.com").await;
let guest = new_test_user(db, "guest@example.com").await;
let guest_connection = new_test_connection(server);
let zed_channel = db.create_root_channel("zed", admin).await.unwrap();
db.set_channel_visibility(zed_channel, crate::db::ChannelVisibility::Public, admin)
.await
.unwrap();
assert!(db
.join_channel_chat(zed_channel, guest_connection, guest)
.await
.is_err());
db.join_channel(zed_channel, guest, guest_connection, TEST_RELEASE_CHANNEL)
.await
.unwrap();
assert!(db
.join_channel_chat(zed_channel, guest_connection, guest)
.await
.is_ok())
}
#[track_caller]
fn assert_channel_tree(actual: Vec<Channel>, expected: &[(ChannelId, &[ChannelId])]) {
let actual = actual
.iter()
.map(|channel| (channel.id, channel.parent_path.as_slice()))
.collect::<Vec<_>>();
pretty_assertions::assert_eq!(
actual,
expected.to_vec(),
"wrong channel ids and parent paths"
);
}

View file

@ -0,0 +1,633 @@
use super::*;
use crate::test_both_dbs;
use gpui::TestAppContext;
use pretty_assertions::{assert_eq, assert_ne};
use std::sync::Arc;
use tests::TestDb;
test_both_dbs!(
test_get_users,
test_get_users_by_ids_postgres,
test_get_users_by_ids_sqlite
);
async fn test_get_users(db: &Arc<Database>) {
let mut user_ids = Vec::new();
let mut user_metric_ids = Vec::new();
for i in 1..=4 {
let user = db
.create_user(
&format!("user{i}@example.com"),
false,
NewUserParams {
github_login: format!("user{i}"),
github_user_id: i,
},
)
.await
.unwrap();
user_ids.push(user.user_id);
user_metric_ids.push(user.metrics_id);
}
assert_eq!(
db.get_users_by_ids(user_ids.clone()).await.unwrap(),
vec![
User {
id: user_ids[0],
github_login: "user1".to_string(),
github_user_id: Some(1),
email_address: Some("user1@example.com".to_string()),
admin: false,
metrics_id: user_metric_ids[0].parse().unwrap(),
..Default::default()
},
User {
id: user_ids[1],
github_login: "user2".to_string(),
github_user_id: Some(2),
email_address: Some("user2@example.com".to_string()),
admin: false,
metrics_id: user_metric_ids[1].parse().unwrap(),
..Default::default()
},
User {
id: user_ids[2],
github_login: "user3".to_string(),
github_user_id: Some(3),
email_address: Some("user3@example.com".to_string()),
admin: false,
metrics_id: user_metric_ids[2].parse().unwrap(),
..Default::default()
},
User {
id: user_ids[3],
github_login: "user4".to_string(),
github_user_id: Some(4),
email_address: Some("user4@example.com".to_string()),
admin: false,
metrics_id: user_metric_ids[3].parse().unwrap(),
..Default::default()
}
]
);
}
test_both_dbs!(
test_get_or_create_user_by_github_account,
test_get_or_create_user_by_github_account_postgres,
test_get_or_create_user_by_github_account_sqlite
);
async fn test_get_or_create_user_by_github_account(db: &Arc<Database>) {
let user_id1 = db
.create_user(
"user1@example.com",
false,
NewUserParams {
github_login: "login1".into(),
github_user_id: 101,
},
)
.await
.unwrap()
.user_id;
let user_id2 = db
.create_user(
"user2@example.com",
false,
NewUserParams {
github_login: "login2".into(),
github_user_id: 102,
},
)
.await
.unwrap()
.user_id;
let user = db
.get_or_create_user_by_github_account("login1", None, None)
.await
.unwrap()
.unwrap();
assert_eq!(user.id, user_id1);
assert_eq!(&user.github_login, "login1");
assert_eq!(user.github_user_id, Some(101));
assert!(db
.get_or_create_user_by_github_account("non-existent-login", None, None)
.await
.unwrap()
.is_none());
let user = db
.get_or_create_user_by_github_account("the-new-login2", Some(102), None)
.await
.unwrap()
.unwrap();
assert_eq!(user.id, user_id2);
assert_eq!(&user.github_login, "the-new-login2");
assert_eq!(user.github_user_id, Some(102));
let user = db
.get_or_create_user_by_github_account("login3", Some(103), Some("user3@example.com"))
.await
.unwrap()
.unwrap();
assert_eq!(&user.github_login, "login3");
assert_eq!(user.github_user_id, Some(103));
assert_eq!(user.email_address, Some("user3@example.com".into()));
}
test_both_dbs!(
test_create_access_tokens,
test_create_access_tokens_postgres,
test_create_access_tokens_sqlite
);
async fn test_create_access_tokens(db: &Arc<Database>) {
let user = db
.create_user(
"u1@example.com",
false,
NewUserParams {
github_login: "u1".into(),
github_user_id: 1,
},
)
.await
.unwrap()
.user_id;
let token_1 = db.create_access_token(user, "h1", 2).await.unwrap();
let token_2 = db.create_access_token(user, "h2", 2).await.unwrap();
assert_eq!(
db.get_access_token(token_1).await.unwrap(),
access_token::Model {
id: token_1,
user_id: user,
hash: "h1".into(),
}
);
assert_eq!(
db.get_access_token(token_2).await.unwrap(),
access_token::Model {
id: token_2,
user_id: user,
hash: "h2".into()
}
);
let token_3 = db.create_access_token(user, "h3", 2).await.unwrap();
assert_eq!(
db.get_access_token(token_3).await.unwrap(),
access_token::Model {
id: token_3,
user_id: user,
hash: "h3".into()
}
);
assert_eq!(
db.get_access_token(token_2).await.unwrap(),
access_token::Model {
id: token_2,
user_id: user,
hash: "h2".into()
}
);
assert!(db.get_access_token(token_1).await.is_err());
let token_4 = db.create_access_token(user, "h4", 2).await.unwrap();
assert_eq!(
db.get_access_token(token_4).await.unwrap(),
access_token::Model {
id: token_4,
user_id: user,
hash: "h4".into()
}
);
assert_eq!(
db.get_access_token(token_3).await.unwrap(),
access_token::Model {
id: token_3,
user_id: user,
hash: "h3".into()
}
);
assert!(db.get_access_token(token_2).await.is_err());
assert!(db.get_access_token(token_1).await.is_err());
}
test_both_dbs!(
test_add_contacts,
test_add_contacts_postgres,
test_add_contacts_sqlite
);
async fn test_add_contacts(db: &Arc<Database>) {
let mut user_ids = Vec::new();
for i in 0..3 {
user_ids.push(
db.create_user(
&format!("user{i}@example.com"),
false,
NewUserParams {
github_login: format!("user{i}"),
github_user_id: i,
},
)
.await
.unwrap()
.user_id,
);
}
let user_1 = user_ids[0];
let user_2 = user_ids[1];
let user_3 = user_ids[2];
// User starts with no contacts
assert_eq!(db.get_contacts(user_1).await.unwrap(), &[]);
// User requests a contact. Both users see the pending request.
db.send_contact_request(user_1, user_2).await.unwrap();
assert!(!db.has_contact(user_1, user_2).await.unwrap());
assert!(!db.has_contact(user_2, user_1).await.unwrap());
assert_eq!(
db.get_contacts(user_1).await.unwrap(),
&[Contact::Outgoing { user_id: user_2 }],
);
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Incoming { user_id: user_1 }]
);
// User 2 dismisses the contact request notification without accepting or rejecting.
// We shouldn't notify them again.
db.dismiss_contact_notification(user_1, user_2)
.await
.unwrap_err();
db.dismiss_contact_notification(user_2, user_1)
.await
.unwrap();
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Incoming { user_id: user_1 }]
);
// User can't accept their own contact request
db.respond_to_contact_request(user_1, user_2, true)
.await
.unwrap_err();
// User accepts a contact request. Both users see the contact.
db.respond_to_contact_request(user_2, user_1, true)
.await
.unwrap();
assert_eq!(
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
busy: false,
}],
);
assert!(db.has_contact(user_1, user_2).await.unwrap());
assert!(db.has_contact(user_2, user_1).await.unwrap());
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
busy: false,
}]
);
// Users cannot re-request existing contacts.
db.send_contact_request(user_1, user_2).await.unwrap_err();
db.send_contact_request(user_2, user_1).await.unwrap_err();
// Users can't dismiss notifications of them accepting other users' requests.
db.dismiss_contact_notification(user_2, user_1)
.await
.unwrap_err();
assert_eq!(
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
busy: false,
}]
);
// Users can dismiss notifications of other users accepting their requests.
db.dismiss_contact_notification(user_1, user_2)
.await
.unwrap();
assert_eq!(
db.get_contacts(user_1).await.unwrap(),
&[Contact::Accepted {
user_id: user_2,
busy: false,
}]
);
// Users send each other concurrent contact requests and
// see that they are immediately accepted.
db.send_contact_request(user_1, user_3).await.unwrap();
db.send_contact_request(user_3, user_1).await.unwrap();
assert_eq!(
db.get_contacts(user_1).await.unwrap(),
&[
Contact::Accepted {
user_id: user_2,
busy: false,
},
Contact::Accepted {
user_id: user_3,
busy: false,
}
]
);
assert_eq!(
db.get_contacts(user_3).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
busy: false,
}],
);
// User declines a contact request. Both users see that it is gone.
db.send_contact_request(user_2, user_3).await.unwrap();
db.respond_to_contact_request(user_3, user_2, false)
.await
.unwrap();
assert!(!db.has_contact(user_2, user_3).await.unwrap());
assert!(!db.has_contact(user_3, user_2).await.unwrap());
assert_eq!(
db.get_contacts(user_2).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
busy: false,
}]
);
assert_eq!(
db.get_contacts(user_3).await.unwrap(),
&[Contact::Accepted {
user_id: user_1,
busy: false,
}],
);
}
test_both_dbs!(
test_metrics_id,
test_metrics_id_postgres,
test_metrics_id_sqlite
);
async fn test_metrics_id(db: &Arc<Database>) {
let NewUserResult {
user_id: user1,
metrics_id: metrics_id1,
..
} = db
.create_user(
"person1@example.com",
false,
NewUserParams {
github_login: "person1".into(),
github_user_id: 101,
},
)
.await
.unwrap();
let NewUserResult {
user_id: user2,
metrics_id: metrics_id2,
..
} = db
.create_user(
"person2@example.com",
false,
NewUserParams {
github_login: "person2".into(),
github_user_id: 102,
},
)
.await
.unwrap();
assert_eq!(db.get_user_metrics_id(user1).await.unwrap(), metrics_id1);
assert_eq!(db.get_user_metrics_id(user2).await.unwrap(), metrics_id2);
assert_eq!(metrics_id1.len(), 36);
assert_eq!(metrics_id2.len(), 36);
assert_ne!(metrics_id1, metrics_id2);
}
test_both_dbs!(
test_project_count,
test_project_count_postgres,
test_project_count_sqlite
);
async fn test_project_count(db: &Arc<Database>) {
let owner_id = db.create_server("test").await.unwrap().0 as u32;
let user1 = db
.create_user(
&format!("admin@example.com"),
true,
NewUserParams {
github_login: "admin".into(),
github_user_id: 0,
},
)
.await
.unwrap();
let user2 = db
.create_user(
&format!("user@example.com"),
false,
NewUserParams {
github_login: "user".into(),
github_user_id: 1,
},
)
.await
.unwrap();
let room_id = RoomId::from_proto(
db.create_room(user1.user_id, ConnectionId { owner_id, id: 0 }, "", "dev")
.await
.unwrap()
.id,
);
db.call(
room_id,
user1.user_id,
ConnectionId { owner_id, id: 0 },
user2.user_id,
None,
)
.await
.unwrap();
db.join_room(
room_id,
user2.user_id,
ConnectionId { owner_id, id: 1 },
"dev",
)
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0);
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[])
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1);
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[])
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2);
// Projects shared by admins aren't counted.
db.share_project(room_id, ConnectionId { owner_id, id: 0 }, &[])
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2);
db.leave_room(ConnectionId { owner_id, id: 1 })
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0);
}
#[test]
fn test_fuzzy_like_string() {
assert_eq!(Database::fuzzy_like_string("abcd"), "%a%b%c%d%");
assert_eq!(Database::fuzzy_like_string("x y"), "%x%y%");
assert_eq!(Database::fuzzy_like_string(" z "), "%z%");
}
#[gpui::test]
async fn test_fuzzy_search_users(cx: &mut TestAppContext) {
let test_db = TestDb::postgres(cx.executor().clone());
let db = test_db.db();
for (i, github_login) in [
"California",
"colorado",
"oregon",
"washington",
"florida",
"delaware",
"rhode-island",
]
.into_iter()
.enumerate()
{
db.create_user(
&format!("{github_login}@example.com"),
false,
NewUserParams {
github_login: github_login.into(),
github_user_id: i as i32,
},
)
.await
.unwrap();
}
assert_eq!(
fuzzy_search_user_names(db, "clr").await,
&["colorado", "California"]
);
assert_eq!(
fuzzy_search_user_names(db, "ro").await,
&["rhode-island", "colorado", "oregon"],
);
async fn fuzzy_search_user_names(db: &Database, query: &str) -> Vec<String> {
db.fuzzy_search_users(query, 10)
.await
.unwrap()
.into_iter()
.map(|user| user.github_login)
.collect::<Vec<_>>()
}
}
test_both_dbs!(
test_non_matching_release_channels,
test_non_matching_release_channels_postgres,
test_non_matching_release_channels_sqlite
);
async fn test_non_matching_release_channels(db: &Arc<Database>) {
let owner_id = db.create_server("test").await.unwrap().0 as u32;
let user1 = db
.create_user(
&format!("admin@example.com"),
true,
NewUserParams {
github_login: "admin".into(),
github_user_id: 0,
},
)
.await
.unwrap();
let user2 = db
.create_user(
&format!("user@example.com"),
false,
NewUserParams {
github_login: "user".into(),
github_user_id: 1,
},
)
.await
.unwrap();
let room = db
.create_room(
user1.user_id,
ConnectionId { owner_id, id: 0 },
"",
"stable",
)
.await
.unwrap();
db.call(
RoomId::from_proto(room.id),
user1.user_id,
ConnectionId { owner_id, id: 0 },
user2.user_id,
None,
)
.await
.unwrap();
// User attempts to join from preview
let result = db
.join_room(
RoomId::from_proto(room.id),
user2.user_id,
ConnectionId { owner_id, id: 1 },
"preview",
)
.await;
assert!(result.is_err());
// User switches to stable
let result = db
.join_room(
RoomId::from_proto(room.id),
user2.user_id,
ConnectionId { owner_id, id: 1 },
"stable",
)
.await;
assert!(result.is_ok())
}

View file

@ -0,0 +1,58 @@
use crate::{
db::{Database, NewUserParams},
test_both_dbs,
};
use std::sync::Arc;
test_both_dbs!(
test_get_user_flags,
test_get_user_flags_postgres,
test_get_user_flags_sqlite
);
async fn test_get_user_flags(db: &Arc<Database>) {
let user_1 = db
.create_user(
&format!("user1@example.com"),
false,
NewUserParams {
github_login: format!("user1"),
github_user_id: 1,
},
)
.await
.unwrap()
.user_id;
let user_2 = db
.create_user(
&format!("user2@example.com"),
false,
NewUserParams {
github_login: format!("user2"),
github_user_id: 2,
},
)
.await
.unwrap()
.user_id;
const CHANNELS_ALPHA: &'static str = "channels-alpha";
const NEW_SEARCH: &'static str = "new-search";
let channels_flag = db.create_user_flag(CHANNELS_ALPHA).await.unwrap();
let search_flag = db.create_user_flag(NEW_SEARCH).await.unwrap();
db.add_user_flag(user_1, channels_flag).await.unwrap();
db.add_user_flag(user_1, search_flag).await.unwrap();
db.add_user_flag(user_2, channels_flag).await.unwrap();
let mut user_1_flags = db.get_user_flags(user_1).await.unwrap();
user_1_flags.sort();
assert_eq!(user_1_flags, &[CHANNELS_ALPHA, NEW_SEARCH]);
let mut user_2_flags = db.get_user_flags(user_2).await.unwrap();
user_2_flags.sort();
assert_eq!(user_2_flags, &[CHANNELS_ALPHA]);
}

View file

@ -0,0 +1,454 @@
use super::new_test_user;
use crate::{
db::{ChannelRole, Database, MessageId},
test_both_dbs,
};
use channel::mentions_to_proto;
use std::sync::Arc;
use time::OffsetDateTime;
test_both_dbs!(
test_channel_message_retrieval,
test_channel_message_retrieval_postgres,
test_channel_message_retrieval_sqlite
);
async fn test_channel_message_retrieval(db: &Arc<Database>) {
let user = new_test_user(db, "user@example.com").await;
let result = db.create_channel("channel", None, user).await.unwrap();
let owner_id = db.create_server("test").await.unwrap().0 as u32;
db.join_channel_chat(
result.channel.id,
rpc::ConnectionId { owner_id, id: 0 },
user,
)
.await
.unwrap();
let mut all_messages = Vec::new();
for i in 0..10 {
all_messages.push(
db.create_channel_message(
result.channel.id,
user,
&i.to_string(),
&[],
OffsetDateTime::now_utc(),
i,
)
.await
.unwrap()
.message_id
.to_proto(),
);
}
let messages = db
.get_channel_messages(result.channel.id, user, 3, None)
.await
.unwrap()
.into_iter()
.map(|message| message.id)
.collect::<Vec<_>>();
assert_eq!(messages, &all_messages[7..10]);
let messages = db
.get_channel_messages(
result.channel.id,
user,
4,
Some(MessageId::from_proto(all_messages[6])),
)
.await
.unwrap()
.into_iter()
.map(|message| message.id)
.collect::<Vec<_>>();
assert_eq!(messages, &all_messages[2..6]);
}
test_both_dbs!(
test_channel_message_nonces,
test_channel_message_nonces_postgres,
test_channel_message_nonces_sqlite
);
async fn test_channel_message_nonces(db: &Arc<Database>) {
let user_a = new_test_user(db, "user_a@example.com").await;
let user_b = new_test_user(db, "user_b@example.com").await;
let user_c = new_test_user(db, "user_c@example.com").await;
let channel = db.create_root_channel("channel", user_a).await.unwrap();
db.invite_channel_member(channel, user_b, user_a, ChannelRole::Member)
.await
.unwrap();
db.invite_channel_member(channel, user_c, user_a, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(channel, user_b, true)
.await
.unwrap();
db.respond_to_channel_invite(channel, user_c, true)
.await
.unwrap();
let owner_id = db.create_server("test").await.unwrap().0 as u32;
db.join_channel_chat(channel, rpc::ConnectionId { owner_id, id: 0 }, user_a)
.await
.unwrap();
db.join_channel_chat(channel, rpc::ConnectionId { owner_id, id: 1 }, user_b)
.await
.unwrap();
// As user A, create messages that re-use the same nonces. The requests
// succeed, but return the same ids.
let id1 = db
.create_channel_message(
channel,
user_a,
"hi @user_b",
&mentions_to_proto(&[(3..10, user_b.to_proto())]),
OffsetDateTime::now_utc(),
100,
)
.await
.unwrap()
.message_id;
let id2 = db
.create_channel_message(
channel,
user_a,
"hello, fellow users",
&mentions_to_proto(&[]),
OffsetDateTime::now_utc(),
200,
)
.await
.unwrap()
.message_id;
let id3 = db
.create_channel_message(
channel,
user_a,
"bye @user_c (same nonce as first message)",
&mentions_to_proto(&[(4..11, user_c.to_proto())]),
OffsetDateTime::now_utc(),
100,
)
.await
.unwrap()
.message_id;
let id4 = db
.create_channel_message(
channel,
user_a,
"omg (same nonce as second message)",
&mentions_to_proto(&[]),
OffsetDateTime::now_utc(),
200,
)
.await
.unwrap()
.message_id;
// As a different user, reuse one of the same nonces. This request succeeds
// and returns a different id.
let id5 = db
.create_channel_message(
channel,
user_b,
"omg @user_a (same nonce as user_a's first message)",
&mentions_to_proto(&[(4..11, user_a.to_proto())]),
OffsetDateTime::now_utc(),
100,
)
.await
.unwrap()
.message_id;
assert_ne!(id1, id2);
assert_eq!(id1, id3);
assert_eq!(id2, id4);
assert_ne!(id5, id1);
let messages = db
.get_channel_messages(channel, user_a, 5, None)
.await
.unwrap()
.into_iter()
.map(|m| (m.id, m.body, m.mentions))
.collect::<Vec<_>>();
assert_eq!(
messages,
&[
(
id1.to_proto(),
"hi @user_b".into(),
mentions_to_proto(&[(3..10, user_b.to_proto())]),
),
(
id2.to_proto(),
"hello, fellow users".into(),
mentions_to_proto(&[])
),
(
id5.to_proto(),
"omg @user_a (same nonce as user_a's first message)".into(),
mentions_to_proto(&[(4..11, user_a.to_proto())]),
),
]
);
}
test_both_dbs!(
test_unseen_channel_messages,
test_unseen_channel_messages_postgres,
test_unseen_channel_messages_sqlite
);
async fn test_unseen_channel_messages(db: &Arc<Database>) {
let user = new_test_user(db, "user_a@example.com").await;
let observer = new_test_user(db, "user_b@example.com").await;
let channel_1 = db.create_root_channel("channel", user).await.unwrap();
let channel_2 = db.create_root_channel("channel-2", user).await.unwrap();
db.invite_channel_member(channel_1, observer, user, ChannelRole::Member)
.await
.unwrap();
db.invite_channel_member(channel_2, observer, user, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(channel_1, observer, true)
.await
.unwrap();
db.respond_to_channel_invite(channel_2, observer, true)
.await
.unwrap();
let owner_id = db.create_server("test").await.unwrap().0 as u32;
let user_connection_id = rpc::ConnectionId { owner_id, id: 0 };
db.join_channel_chat(channel_1, user_connection_id, user)
.await
.unwrap();
let _ = db
.create_channel_message(channel_1, user, "1_1", &[], OffsetDateTime::now_utc(), 1)
.await
.unwrap();
let second_message = db
.create_channel_message(channel_1, user, "1_2", &[], OffsetDateTime::now_utc(), 2)
.await
.unwrap()
.message_id;
let third_message = db
.create_channel_message(channel_1, user, "1_3", &[], OffsetDateTime::now_utc(), 3)
.await
.unwrap()
.message_id;
db.join_channel_chat(channel_2, user_connection_id, user)
.await
.unwrap();
let fourth_message = db
.create_channel_message(channel_2, user, "2_1", &[], OffsetDateTime::now_utc(), 4)
.await
.unwrap()
.message_id;
// Check that observer has new messages
let unseen_messages = db
.transaction(|tx| async move {
db.unseen_channel_messages(observer, &[channel_1, channel_2], &*tx)
.await
})
.await
.unwrap();
assert_eq!(
unseen_messages,
[
rpc::proto::UnseenChannelMessage {
channel_id: channel_1.to_proto(),
message_id: third_message.to_proto(),
},
rpc::proto::UnseenChannelMessage {
channel_id: channel_2.to_proto(),
message_id: fourth_message.to_proto(),
},
]
);
// Observe the second message
db.observe_channel_message(channel_1, observer, second_message)
.await
.unwrap();
// Make sure the observer still has a new message
let unseen_messages = db
.transaction(|tx| async move {
db.unseen_channel_messages(observer, &[channel_1, channel_2], &*tx)
.await
})
.await
.unwrap();
assert_eq!(
unseen_messages,
[
rpc::proto::UnseenChannelMessage {
channel_id: channel_1.to_proto(),
message_id: third_message.to_proto(),
},
rpc::proto::UnseenChannelMessage {
channel_id: channel_2.to_proto(),
message_id: fourth_message.to_proto(),
},
]
);
// Observe the third message,
db.observe_channel_message(channel_1, observer, third_message)
.await
.unwrap();
// Make sure the observer does not have a new method
let unseen_messages = db
.transaction(|tx| async move {
db.unseen_channel_messages(observer, &[channel_1, channel_2], &*tx)
.await
})
.await
.unwrap();
assert_eq!(
unseen_messages,
[rpc::proto::UnseenChannelMessage {
channel_id: channel_2.to_proto(),
message_id: fourth_message.to_proto(),
}]
);
// Observe the second message again, should not regress our observed state
db.observe_channel_message(channel_1, observer, second_message)
.await
.unwrap();
// Make sure the observer does not have a new message
let unseen_messages = db
.transaction(|tx| async move {
db.unseen_channel_messages(observer, &[channel_1, channel_2], &*tx)
.await
})
.await
.unwrap();
assert_eq!(
unseen_messages,
[rpc::proto::UnseenChannelMessage {
channel_id: channel_2.to_proto(),
message_id: fourth_message.to_proto(),
}]
);
}
test_both_dbs!(
test_channel_message_mentions,
test_channel_message_mentions_postgres,
test_channel_message_mentions_sqlite
);
async fn test_channel_message_mentions(db: &Arc<Database>) {
let user_a = new_test_user(db, "user_a@example.com").await;
let user_b = new_test_user(db, "user_b@example.com").await;
let user_c = new_test_user(db, "user_c@example.com").await;
let channel = db
.create_channel("channel", None, user_a)
.await
.unwrap()
.channel
.id;
db.invite_channel_member(channel, user_b, user_a, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(channel, user_b, true)
.await
.unwrap();
let owner_id = db.create_server("test").await.unwrap().0 as u32;
let connection_id = rpc::ConnectionId { owner_id, id: 0 };
db.join_channel_chat(channel, connection_id, user_a)
.await
.unwrap();
db.create_channel_message(
channel,
user_a,
"hi @user_b and @user_c",
&mentions_to_proto(&[(3..10, user_b.to_proto()), (15..22, user_c.to_proto())]),
OffsetDateTime::now_utc(),
1,
)
.await
.unwrap();
db.create_channel_message(
channel,
user_a,
"bye @user_c",
&mentions_to_proto(&[(4..11, user_c.to_proto())]),
OffsetDateTime::now_utc(),
2,
)
.await
.unwrap();
db.create_channel_message(
channel,
user_a,
"umm",
&mentions_to_proto(&[]),
OffsetDateTime::now_utc(),
3,
)
.await
.unwrap();
db.create_channel_message(
channel,
user_a,
"@user_b, stop.",
&mentions_to_proto(&[(0..7, user_b.to_proto())]),
OffsetDateTime::now_utc(),
4,
)
.await
.unwrap();
let messages = db
.get_channel_messages(channel, user_b, 5, None)
.await
.unwrap()
.into_iter()
.map(|m| (m.body, m.mentions))
.collect::<Vec<_>>();
assert_eq!(
&messages,
&[
(
"hi @user_b and @user_c".into(),
mentions_to_proto(&[(3..10, user_b.to_proto()), (15..22, user_c.to_proto())]),
),
(
"bye @user_c".into(),
mentions_to_proto(&[(4..11, user_c.to_proto())]),
),
("umm".into(), mentions_to_proto(&[]),),
(
"@user_b, stop.".into(),
mentions_to_proto(&[(0..7, user_b.to_proto())]),
),
]
);
}

20
crates/collab2/src/env.rs Normal file
View file

@ -0,0 +1,20 @@
use anyhow::anyhow;
use std::fs;
pub fn load_dotenv() -> anyhow::Result<()> {
let env: toml::map::Map<String, toml::Value> = toml::de::from_str(
&fs::read_to_string("./.env.toml").map_err(|_| anyhow!("no .env.toml file found"))?,
)?;
for (key, value) in env {
let value = match value {
toml::Value::String(value) => value,
toml::Value::Integer(value) => value.to_string(),
toml::Value::Float(value) => value.to_string(),
_ => panic!("unsupported TOML value in .env.toml for key {}", key),
};
std::env::set_var(key, value);
}
Ok(())
}

View file

@ -0,0 +1,29 @@
// Allow tide Results to accept context like other Results do when
// using anyhow.
pub trait TideResultExt {
fn context<C>(self, cx: C) -> Self
where
C: std::fmt::Display + Send + Sync + 'static;
fn with_context<C, F>(self, f: F) -> Self
where
C: std::fmt::Display + Send + Sync + 'static,
F: FnOnce() -> C;
}
impl<T> TideResultExt for tide::Result<T> {
fn context<C>(self, cx: C) -> Self
where
C: std::fmt::Display + Send + Sync + 'static,
{
self.map_err(|e| tide::Error::new(e.status(), e.into_inner().context(cx)))
}
fn with_context<C, F>(self, f: F) -> Self
where
C: std::fmt::Display + Send + Sync + 'static,
F: FnOnce() -> C,
{
self.map_err(|e| tide::Error::new(e.status(), e.into_inner().context(f())))
}
}

View file

@ -0,0 +1,47 @@
use std::{future::Future, time::Duration};
#[cfg(test)]
use gpui::BackgroundExecutor;
#[derive(Clone)]
pub enum Executor {
Production,
#[cfg(test)]
Deterministic(BackgroundExecutor),
}
impl Executor {
pub fn spawn_detached<F>(&self, future: F)
where
F: 'static + Send + Future<Output = ()>,
{
match self {
Executor::Production => {
tokio::spawn(future);
}
#[cfg(test)]
Executor::Deterministic(background) => {
background.spawn(future).detach();
}
}
}
pub fn sleep(&self, duration: Duration) -> impl Future<Output = ()> {
let this = self.clone();
async move {
match this {
Executor::Production => tokio::time::sleep(duration).await,
#[cfg(test)]
Executor::Deterministic(background) => background.timer(duration).await,
}
}
}
pub fn record_backtrace(&self) {
match self {
Executor::Production => {}
#[cfg(test)]
Executor::Deterministic(background) => background.record_backtrace(),
}
}
}

147
crates/collab2/src/lib.rs Normal file
View file

@ -0,0 +1,147 @@
pub mod api;
pub mod auth;
pub mod db;
pub mod env;
pub mod executor;
pub mod rpc;
#[cfg(test)]
mod tests;
use axum::{http::StatusCode, response::IntoResponse};
use db::Database;
use executor::Executor;
use serde::Deserialize;
use std::{path::PathBuf, sync::Arc};
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub enum Error {
Http(StatusCode, String),
Database(sea_orm::error::DbErr),
Internal(anyhow::Error),
}
impl From<anyhow::Error> for Error {
fn from(error: anyhow::Error) -> Self {
Self::Internal(error)
}
}
impl From<sea_orm::error::DbErr> for Error {
fn from(error: sea_orm::error::DbErr) -> Self {
Self::Database(error)
}
}
impl From<axum::Error> for Error {
fn from(error: axum::Error) -> Self {
Self::Internal(error.into())
}
}
impl From<hyper::Error> for Error {
fn from(error: hyper::Error) -> Self {
Self::Internal(error.into())
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Self {
Self::Internal(error.into())
}
}
impl IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
match self {
Error::Http(code, message) => (code, message).into_response(),
Error::Database(error) => {
(StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response()
}
Error::Internal(error) => {
(StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response()
}
}
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Http(code, message) => (code, message).fmt(f),
Error::Database(error) => error.fmt(f),
Error::Internal(error) => error.fmt(f),
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Http(code, message) => write!(f, "{code}: {message}"),
Error::Database(error) => error.fmt(f),
Error::Internal(error) => error.fmt(f),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Deserialize)]
pub struct Config {
pub http_port: u16,
pub database_url: String,
pub database_max_connections: u32,
pub api_token: String,
pub invite_link_prefix: String,
pub live_kit_server: Option<String>,
pub live_kit_key: Option<String>,
pub live_kit_secret: Option<String>,
pub rust_log: Option<String>,
pub log_json: Option<bool>,
pub zed_environment: String,
}
#[derive(Default, Deserialize)]
pub struct MigrateConfig {
pub database_url: String,
pub migrations_path: Option<PathBuf>,
}
pub struct AppState {
pub db: Arc<Database>,
pub live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
pub config: Config,
}
impl AppState {
pub async fn new(config: Config) -> Result<Arc<Self>> {
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(config.database_max_connections);
let mut db = Database::new(db_options, Executor::Production).await?;
db.initialize_notification_kinds().await?;
let live_kit_client = if let Some(((server, key), secret)) = config
.live_kit_server
.as_ref()
.zip(config.live_kit_key.as_ref())
.zip(config.live_kit_secret.as_ref())
{
Some(Arc::new(live_kit_server::api::LiveKitClient::new(
server.clone(),
key.clone(),
secret.clone(),
)) as Arc<dyn live_kit_server::api::Client>)
} else {
None
};
let this = Self {
db: Arc::new(db),
live_kit_client,
config,
};
Ok(Arc::new(this))
}
}

139
crates/collab2/src/main.rs Normal file
View file

@ -0,0 +1,139 @@
use anyhow::anyhow;
use axum::{routing::get, Extension, Router};
use collab2::{db, env, executor::Executor, AppState, Config, MigrateConfig, Result};
use db::Database;
use std::{
env::args,
net::{SocketAddr, TcpListener},
path::Path,
sync::Arc,
};
use tokio::signal::unix::SignalKind;
use tracing_log::LogTracer;
use tracing_subscriber::{filter::EnvFilter, fmt::format::JsonFields, Layer};
use util::ResultExt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
#[tokio::main]
async fn main() -> Result<()> {
if let Err(error) = env::load_dotenv() {
eprintln!(
"error loading .env.toml (this is expected in production): {}",
error
);
}
match args().skip(1).next().as_deref() {
Some("version") => {
println!("collab v{VERSION}");
}
Some("migrate") => {
let config = envy::from_env::<MigrateConfig>().expect("error loading config");
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(5);
let db = Database::new(db_options, Executor::Production).await?;
let migrations_path = config
.migrations_path
.as_deref()
.unwrap_or_else(|| Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations")));
let migrations = db.migrate(&migrations_path, false).await?;
for (migration, duration) in migrations {
println!(
"Ran {} {} {:?}",
migration.version, migration.description, duration
);
}
return Ok(());
}
Some("serve") => {
let config = envy::from_env::<Config>().expect("error loading config");
init_tracing(&config);
let state = AppState::new(config).await?;
let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port))
.expect("failed to bind TCP listener");
let epoch = state
.db
.create_server(&state.config.zed_environment)
.await?;
let rpc_server = collab2::rpc::Server::new(epoch, state.clone(), Executor::Production);
rpc_server.start().await?;
let app = collab2::api::routes(rpc_server.clone(), state.clone())
.merge(collab2::rpc::routes(rpc_server.clone()))
.merge(
Router::new()
.route("/", get(handle_root))
.route("/healthz", get(handle_liveness_probe))
.layer(Extension(state.clone())),
);
axum::Server::from_tcp(listener)?
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
.with_graceful_shutdown(async move {
let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate())
.expect("failed to listen for interrupt signal");
let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())
.expect("failed to listen for interrupt signal");
let sigterm = sigterm.recv();
let sigint = sigint.recv();
futures::pin_mut!(sigterm, sigint);
futures::future::select(sigterm, sigint).await;
tracing::info!("Received interrupt signal");
rpc_server.teardown();
})
.await?;
}
_ => {
Err(anyhow!("usage: collab <version | migrate | serve>"))?;
}
}
Ok(())
}
async fn handle_root() -> String {
format!("collab v{VERSION}")
}
async fn handle_liveness_probe(Extension(state): Extension<Arc<AppState>>) -> Result<String> {
state.db.get_all_users(0, 1).await?;
Ok("ok".to_string())
}
pub fn init_tracing(config: &Config) -> Option<()> {
use std::str::FromStr;
use tracing_subscriber::layer::SubscriberExt;
let rust_log = config.rust_log.clone()?;
LogTracer::init().log_err()?;
let subscriber = tracing_subscriber::Registry::default()
.with(if config.log_json.unwrap_or(false) {
Box::new(
tracing_subscriber::fmt::layer()
.fmt_fields(JsonFields::default())
.event_format(
tracing_subscriber::fmt::format()
.json()
.flatten_event(true)
.with_span_list(true),
),
) as Box<dyn Layer<_> + Send + Sync>
} else {
Box::new(
tracing_subscriber::fmt::layer()
.event_format(tracing_subscriber::fmt::format().pretty()),
)
})
.with(EnvFilter::from_str(rust_log.as_str()).log_err()?);
tracing::subscriber::set_global_default(subscriber).unwrap();
None
}

3500
crates/collab2/src/rpc.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,98 @@
use crate::db::UserId;
use anyhow::{anyhow, Result};
use collections::{BTreeMap, HashSet};
use rpc::ConnectionId;
use serde::Serialize;
use tracing::instrument;
#[derive(Default, Serialize)]
pub struct ConnectionPool {
connections: BTreeMap<ConnectionId, Connection>,
connected_users: BTreeMap<UserId, ConnectedUser>,
}
#[derive(Default, Serialize)]
struct ConnectedUser {
connection_ids: HashSet<ConnectionId>,
}
#[derive(Serialize)]
pub struct Connection {
pub user_id: UserId,
pub admin: bool,
}
impl ConnectionPool {
pub fn reset(&mut self) {
self.connections.clear();
self.connected_users.clear();
}
#[instrument(skip(self))]
pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId, admin: bool) {
self.connections
.insert(connection_id, Connection { user_id, admin });
let connected_user = self.connected_users.entry(user_id).or_default();
connected_user.connection_ids.insert(connection_id);
}
#[instrument(skip(self))]
pub fn remove_connection(&mut self, connection_id: ConnectionId) -> Result<()> {
let connection = self
.connections
.get_mut(&connection_id)
.ok_or_else(|| anyhow!("no such connection"))?;
let user_id = connection.user_id;
let connected_user = self.connected_users.get_mut(&user_id).unwrap();
connected_user.connection_ids.remove(&connection_id);
if connected_user.connection_ids.is_empty() {
self.connected_users.remove(&user_id);
}
self.connections.remove(&connection_id).unwrap();
Ok(())
}
pub fn connections(&self) -> impl Iterator<Item = &Connection> {
self.connections.values()
}
pub fn user_connection_ids(&self, user_id: UserId) -> impl Iterator<Item = ConnectionId> + '_ {
self.connected_users
.get(&user_id)
.into_iter()
.map(|state| &state.connection_ids)
.flatten()
.copied()
}
pub fn is_user_online(&self, user_id: UserId) -> bool {
!self
.connected_users
.get(&user_id)
.unwrap_or(&Default::default())
.connection_ids
.is_empty()
}
#[cfg(test)]
pub fn check_invariants(&self) {
for (connection_id, connection) in &self.connections {
assert!(self
.connected_users
.get(&connection.user_id)
.unwrap()
.connection_ids
.contains(connection_id));
}
for (user_id, state) in &self.connected_users {
for connection_id in &state.connection_ids {
assert_eq!(
self.connections.get(connection_id).unwrap().user_id,
*user_id
);
}
}
}
}

View file

@ -0,0 +1,47 @@
use call::Room;
use gpui::{Model, TestAppContext};
mod channel_buffer_tests;
mod channel_message_tests;
mod channel_tests;
mod following_tests;
mod integration_tests;
mod notification_tests;
mod random_channel_buffer_tests;
mod random_project_collaboration_tests;
mod randomized_test_helpers;
mod test_server;
pub use crate as collab2;
pub use randomized_test_helpers::{
run_randomized_test, save_randomized_test_plan, RandomizedTest, TestError, UserTestPlan,
};
pub use test_server::{TestClient, TestServer};
#[derive(Debug, Eq, PartialEq)]
struct RoomParticipants {
remote: Vec<String>,
pending: Vec<String>,
}
fn room_participants(room: &Model<Room>, cx: &mut TestAppContext) -> RoomParticipants {
room.read_with(cx, |room, _| {
let mut remote = room
.remote_participants()
.iter()
.map(|(_, participant)| participant.user.github_login.clone())
.collect::<Vec<_>>();
let mut pending = room
.pending_participants()
.iter()
.map(|user| user.github_login.clone())
.collect::<Vec<_>>();
remote.sort();
pending.sort();
RoomParticipants { remote, pending }
})
}
fn channel_id(room: &Model<Room>, cx: &mut TestAppContext) -> Option<u64> {
cx.read(|cx| room.read(cx).channel_id())
}

View file

@ -0,0 +1,871 @@
use crate::{
rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
tests::TestServer,
};
use client::{Collaborator, UserId};
use collections::HashMap;
use futures::future;
use gpui::{BackgroundExecutor, Model, TestAppContext};
use rpc::{proto::PeerId, RECEIVE_TIMEOUT};
#[gpui::test]
async fn test_core_channel_buffers(
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let channel_id = server
.make_channel("zed", None, (&client_a, cx_a), &mut [(&client_b, cx_b)])
.await;
// Client A joins the channel buffer
let channel_buffer_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
// Client A edits the buffer
let buffer_a = channel_buffer_a.read_with(cx_a, |buffer, _| buffer.buffer());
buffer_a.update(cx_a, |buffer, cx| {
buffer.edit([(0..0, "hello world")], None, cx)
});
buffer_a.update(cx_a, |buffer, cx| {
buffer.edit([(5..5, ", cruel")], None, cx)
});
buffer_a.update(cx_a, |buffer, cx| {
buffer.edit([(0..5, "goodbye")], None, cx)
});
buffer_a.update(cx_a, |buffer, cx| buffer.undo(cx));
assert_eq!(buffer_text(&buffer_a, cx_a), "hello, cruel world");
executor.run_until_parked();
// Client B joins the channel buffer
let channel_buffer_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
channel_buffer_b.read_with(cx_b, |buffer, _| {
assert_collaborators(
buffer.collaborators(),
&[client_a.user_id(), client_b.user_id()],
);
});
// Client B sees the correct text, and then edits it
let buffer_b = channel_buffer_b.read_with(cx_b, |buffer, _| buffer.buffer());
assert_eq!(
buffer_b.read_with(cx_b, |buffer, _| buffer.remote_id()),
buffer_a.read_with(cx_a, |buffer, _| buffer.remote_id())
);
assert_eq!(buffer_text(&buffer_b, cx_b), "hello, cruel world");
buffer_b.update(cx_b, |buffer, cx| {
buffer.edit([(7..12, "beautiful")], None, cx)
});
// Both A and B see the new edit
executor.run_until_parked();
assert_eq!(buffer_text(&buffer_a, cx_a), "hello, beautiful world");
assert_eq!(buffer_text(&buffer_b, cx_b), "hello, beautiful world");
// Client A closes the channel buffer.
cx_a.update(|_| drop(channel_buffer_a));
executor.run_until_parked();
// Client B sees that client A is gone from the channel buffer.
channel_buffer_b.read_with(cx_b, |buffer, _| {
assert_collaborators(&buffer.collaborators(), &[client_b.user_id()]);
});
// Client A rejoins the channel buffer
let _channel_buffer_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
executor.run_until_parked();
// Sanity test, make sure we saw A rejoining
channel_buffer_b.read_with(cx_b, |buffer, _| {
assert_collaborators(
&buffer.collaborators(),
&[client_a.user_id(), client_b.user_id()],
);
});
// Client A loses connection.
server.forbid_connections();
server.disconnect_client(client_a.peer_id().unwrap());
executor.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
// Client B observes A disconnect
channel_buffer_b.read_with(cx_b, |buffer, _| {
assert_collaborators(&buffer.collaborators(), &[client_b.user_id()]);
});
// TODO:
// - Test synchronizing offline updates, what happens to A's channel buffer when A disconnects
// - Test interaction with channel deletion while buffer is open
}
// todo!("collab_ui")
// #[gpui::test]
// async fn test_channel_notes_participant_indices(
// executor: BackgroundExecutor,
// mut cx_a: &mut TestAppContext,
// mut cx_b: &mut TestAppContext,
// cx_c: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(&executor).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let client_c = server.create_client(cx_c, "user_c").await;
// let active_call_a = cx_a.read(ActiveCall::global);
// let active_call_b = cx_b.read(ActiveCall::global);
// cx_a.update(editor::init);
// cx_b.update(editor::init);
// cx_c.update(editor::init);
// let channel_id = server
// .make_channel(
// "the-channel",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b), (&client_c, cx_c)],
// )
// .await;
// client_a
// .fs()
// .insert_tree("/root", json!({"file.txt": "123"}))
// .await;
// let (project_a, worktree_id_a) = client_a.build_local_project("/root", cx_a).await;
// let project_b = client_b.build_empty_local_project(cx_b);
// let project_c = client_c.build_empty_local_project(cx_c);
// let workspace_a = client_a.build_workspace(&project_a, cx_a).root(cx_a);
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// let workspace_c = client_c.build_workspace(&project_c, cx_c).root(cx_c);
// // Clients A, B, and C open the channel notes
// let channel_view_a = cx_a
// .update(|cx| ChannelView::open(channel_id, workspace_a.clone(), cx))
// .await
// .unwrap();
// let channel_view_b = cx_b
// .update(|cx| ChannelView::open(channel_id, workspace_b.clone(), cx))
// .await
// .unwrap();
// let channel_view_c = cx_c
// .update(|cx| ChannelView::open(channel_id, workspace_c.clone(), cx))
// .await
// .unwrap();
// // Clients A, B, and C all insert and select some text
// channel_view_a.update(cx_a, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// editor.insert("a", cx);
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![0..1]);
// });
// });
// });
// executor.run_until_parked();
// channel_view_b.update(cx_b, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// editor.move_down(&Default::default(), cx);
// editor.insert("b", cx);
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![1..2]);
// });
// });
// });
// executor.run_until_parked();
// channel_view_c.update(cx_c, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// editor.move_down(&Default::default(), cx);
// editor.insert("c", cx);
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![2..3]);
// });
// });
// });
// // Client A sees clients B and C without assigned colors, because they aren't
// // in a call together.
// executor.run_until_parked();
// channel_view_a.update(cx_a, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// assert_remote_selections(editor, &[(None, 1..2), (None, 2..3)], cx);
// });
// });
// // Clients A and B join the same call.
// for (call, cx) in [(&active_call_a, &mut cx_a), (&active_call_b, &mut cx_b)] {
// call.update(*cx, |call, cx| call.join_channel(channel_id, cx))
// .await
// .unwrap();
// }
// // Clients A and B see each other with two different assigned colors. Client C
// // still doesn't have a color.
// executor.run_until_parked();
// channel_view_a.update(cx_a, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// assert_remote_selections(
// editor,
// &[(Some(ParticipantIndex(1)), 1..2), (None, 2..3)],
// cx,
// );
// });
// });
// channel_view_b.update(cx_b, |notes, cx| {
// notes.editor.update(cx, |editor, cx| {
// assert_remote_selections(
// editor,
// &[(Some(ParticipantIndex(0)), 0..1), (None, 2..3)],
// cx,
// );
// });
// });
// // Client A shares a project, and client B joins.
// let project_id = active_call_a
// .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx))
// .await
// .unwrap();
// let project_b = client_b.build_remote_project(project_id, cx_b).await;
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// // Clients A and B open the same file.
// let editor_a = workspace_a
// .update(cx_a, |workspace, cx| {
// workspace.open_path((worktree_id_a, "file.txt"), None, true, cx)
// })
// .await
// .unwrap()
// .downcast::<Editor>()
// .unwrap();
// let editor_b = workspace_b
// .update(cx_b, |workspace, cx| {
// workspace.open_path((worktree_id_a, "file.txt"), None, true, cx)
// })
// .await
// .unwrap()
// .downcast::<Editor>()
// .unwrap();
// editor_a.update(cx_a, |editor, cx| {
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![0..1]);
// });
// });
// editor_b.update(cx_b, |editor, cx| {
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![2..3]);
// });
// });
// executor.run_until_parked();
// // Clients A and B see each other with the same colors as in the channel notes.
// editor_a.update(cx_a, |editor, cx| {
// assert_remote_selections(editor, &[(Some(ParticipantIndex(1)), 2..3)], cx);
// });
// editor_b.update(cx_b, |editor, cx| {
// assert_remote_selections(editor, &[(Some(ParticipantIndex(0)), 0..1)], cx);
// });
// }
//todo!(editor)
// #[track_caller]
// fn assert_remote_selections(
// editor: &mut Editor,
// expected_selections: &[(Option<ParticipantIndex>, Range<usize>)],
// cx: &mut ViewContext<Editor>,
// ) {
// let snapshot = editor.snapshot(cx);
// let range = Anchor::min()..Anchor::max();
// let remote_selections = snapshot
// .remote_selections_in_range(&range, editor.collaboration_hub().unwrap(), cx)
// .map(|s| {
// let start = s.selection.start.to_offset(&snapshot.buffer_snapshot);
// let end = s.selection.end.to_offset(&snapshot.buffer_snapshot);
// (s.participant_index, start..end)
// })
// .collect::<Vec<_>>();
// assert_eq!(
// remote_selections, expected_selections,
// "incorrect remote selections"
// );
// }
#[gpui::test]
async fn test_multiple_handles_to_channel_buffer(
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
) {
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let channel_id = server
.make_channel("the-channel", None, (&client_a, cx_a), &mut [])
.await;
let channel_buffer_1 = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx));
let channel_buffer_2 = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx));
let channel_buffer_3 = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx));
// All concurrent tasks for opening a channel buffer return the same model handle.
let (channel_buffer, channel_buffer_2, channel_buffer_3) =
future::try_join3(channel_buffer_1, channel_buffer_2, channel_buffer_3)
.await
.unwrap();
let channel_buffer_model_id = channel_buffer.entity_id();
assert_eq!(channel_buffer, channel_buffer_2);
assert_eq!(channel_buffer, channel_buffer_3);
channel_buffer.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(0..0, "hello")], None, cx);
})
});
deterministic.run_until_parked();
cx_a.update(|_| {
drop(channel_buffer);
drop(channel_buffer_2);
drop(channel_buffer_3);
});
deterministic.run_until_parked();
// The channel buffer can be reopened after dropping it.
let channel_buffer = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
assert_ne!(channel_buffer.entity_id(), channel_buffer_model_id);
channel_buffer.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, _| {
assert_eq!(buffer.text(), "hello");
})
});
}
#[gpui::test]
async fn test_channel_buffer_disconnect(
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b)],
)
.await;
let channel_buffer_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
let channel_buffer_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
server.forbid_connections();
server.disconnect_client(client_a.peer_id().unwrap());
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
channel_buffer_a.update(cx_a, |buffer, cx| {
assert_eq!(buffer.channel(cx).unwrap().name, "the-channel");
assert!(!buffer.is_connected());
});
deterministic.run_until_parked();
server.allow_connections();
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
deterministic.run_until_parked();
client_a
.channel_store()
.update(cx_a, |channel_store, _| {
channel_store.remove_channel(channel_id)
})
.await
.unwrap();
deterministic.run_until_parked();
// Channel buffer observed the deletion
channel_buffer_b.update(cx_b, |buffer, cx| {
assert!(buffer.channel(cx).is_none());
assert!(!buffer.is_connected());
});
}
#[gpui::test]
async fn test_rejoin_channel_buffer(
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b)],
)
.await;
let channel_buffer_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
let channel_buffer_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
channel_buffer_a.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(0..0, "1")], None, cx);
})
});
deterministic.run_until_parked();
// Client A disconnects.
server.forbid_connections();
server.disconnect_client(client_a.peer_id().unwrap());
// Both clients make an edit.
channel_buffer_a.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(1..1, "2")], None, cx);
})
});
channel_buffer_b.update(cx_b, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(0..0, "0")], None, cx);
})
});
// Both clients see their own edit.
deterministic.run_until_parked();
channel_buffer_a.read_with(cx_a, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "12");
});
channel_buffer_b.read_with(cx_b, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "01");
});
// Client A reconnects. Both clients see each other's edits, and see
// the same collaborators.
server.allow_connections();
deterministic.advance_clock(RECEIVE_TIMEOUT);
channel_buffer_a.read_with(cx_a, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "012");
});
channel_buffer_b.read_with(cx_b, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "012");
});
channel_buffer_a.read_with(cx_a, |buffer_a, _| {
channel_buffer_b.read_with(cx_b, |buffer_b, _| {
assert_eq!(buffer_a.collaborators(), buffer_b.collaborators());
});
});
}
#[gpui::test]
async fn test_channel_buffers_and_server_restarts(
deterministic: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
) {
let mut server = TestServer::start(deterministic.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b), (&client_c, cx_c)],
)
.await;
let channel_buffer_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
let channel_buffer_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
let _channel_buffer_c = client_c
.channel_store()
.update(cx_c, |store, cx| store.open_channel_buffer(channel_id, cx))
.await
.unwrap();
channel_buffer_a.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(0..0, "1")], None, cx);
})
});
deterministic.run_until_parked();
// Client C can't reconnect.
client_c.override_establish_connection(|_, cx| cx.spawn(|_| future::pending()));
// Server stops.
server.reset().await;
deterministic.advance_clock(RECEIVE_TIMEOUT);
// While the server is down, both clients make an edit.
channel_buffer_a.update(cx_a, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(1..1, "2")], None, cx);
})
});
channel_buffer_b.update(cx_b, |buffer, cx| {
buffer.buffer().update(cx, |buffer, cx| {
buffer.edit([(0..0, "0")], None, cx);
})
});
// Server restarts.
server.start().await.unwrap();
deterministic.advance_clock(CLEANUP_TIMEOUT);
// Clients reconnects. Clients A and B see each other's edits, and see
// that client C has disconnected.
channel_buffer_a.read_with(cx_a, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "012");
});
channel_buffer_b.read_with(cx_b, |buffer, cx| {
assert_eq!(buffer.buffer().read(cx).text(), "012");
});
channel_buffer_a.read_with(cx_a, |buffer_a, _| {
channel_buffer_b.read_with(cx_b, |buffer_b, _| {
assert_collaborators(
buffer_a.collaborators(),
&[client_a.user_id(), client_b.user_id()],
);
assert_eq!(buffer_a.collaborators(), buffer_b.collaborators());
});
});
}
//todo!(collab_ui)
// #[gpui::test(iterations = 10)]
// async fn test_following_to_channel_notes_without_a_shared_project(
// deterministic: BackgroundExecutor,
// mut cx_a: &mut TestAppContext,
// mut cx_b: &mut TestAppContext,
// mut cx_c: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(&deterministic).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let client_c = server.create_client(cx_c, "user_c").await;
// cx_a.update(editor::init);
// cx_b.update(editor::init);
// cx_c.update(editor::init);
// cx_a.update(collab_ui::channel_view::init);
// cx_b.update(collab_ui::channel_view::init);
// cx_c.update(collab_ui::channel_view::init);
// let channel_1_id = server
// .make_channel(
// "channel-1",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b), (&client_c, cx_c)],
// )
// .await;
// let channel_2_id = server
// .make_channel(
// "channel-2",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b), (&client_c, cx_c)],
// )
// .await;
// // Clients A, B, and C join a channel.
// let active_call_a = cx_a.read(ActiveCall::global);
// let active_call_b = cx_b.read(ActiveCall::global);
// let active_call_c = cx_c.read(ActiveCall::global);
// for (call, cx) in [
// (&active_call_a, &mut cx_a),
// (&active_call_b, &mut cx_b),
// (&active_call_c, &mut cx_c),
// ] {
// call.update(*cx, |call, cx| call.join_channel(channel_1_id, cx))
// .await
// .unwrap();
// }
// deterministic.run_until_parked();
// // Clients A, B, and C all open their own unshared projects.
// client_a.fs().insert_tree("/a", json!({})).await;
// client_b.fs().insert_tree("/b", json!({})).await;
// client_c.fs().insert_tree("/c", json!({})).await;
// let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
// let (project_b, _) = client_b.build_local_project("/b", cx_b).await;
// let (project_c, _) = client_b.build_local_project("/c", cx_c).await;
// let workspace_a = client_a.build_workspace(&project_a, cx_a).root(cx_a);
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// let _workspace_c = client_c.build_workspace(&project_c, cx_c).root(cx_c);
// active_call_a
// .update(cx_a, |call, cx| call.set_location(Some(&project_a), cx))
// .await
// .unwrap();
// // Client A opens the notes for channel 1.
// let channel_view_1_a = cx_a
// .update(|cx| ChannelView::open(channel_1_id, workspace_a.clone(), cx))
// .await
// .unwrap();
// channel_view_1_a.update(cx_a, |notes, cx| {
// assert_eq!(notes.channel(cx).unwrap().name, "channel-1");
// notes.editor.update(cx, |editor, cx| {
// editor.insert("Hello from A.", cx);
// editor.change_selections(None, cx, |selections| {
// selections.select_ranges(vec![3..4]);
// });
// });
// });
// // Client B follows client A.
// workspace_b
// .update(cx_b, |workspace, cx| {
// workspace.follow(client_a.peer_id().unwrap(), cx).unwrap()
// })
// .await
// .unwrap();
// // Client B is taken to the notes for channel 1, with the same
// // text selected as client A.
// deterministic.run_until_parked();
// let channel_view_1_b = workspace_b.read_with(cx_b, |workspace, cx| {
// assert_eq!(
// workspace.leader_for_pane(workspace.active_pane()),
// Some(client_a.peer_id().unwrap())
// );
// workspace
// .active_item(cx)
// .expect("no active item")
// .downcast::<ChannelView>()
// .expect("active item is not a channel view")
// });
// channel_view_1_b.read_with(cx_b, |notes, cx| {
// assert_eq!(notes.channel(cx).unwrap().name, "channel-1");
// let editor = notes.editor.read(cx);
// assert_eq!(editor.text(cx), "Hello from A.");
// assert_eq!(editor.selections.ranges::<usize>(cx), &[3..4]);
// });
// // Client A opens the notes for channel 2.
// let channel_view_2_a = cx_a
// .update(|cx| ChannelView::open(channel_2_id, workspace_a.clone(), cx))
// .await
// .unwrap();
// channel_view_2_a.read_with(cx_a, |notes, cx| {
// assert_eq!(notes.channel(cx).unwrap().name, "channel-2");
// });
// // Client B is taken to the notes for channel 2.
// deterministic.run_until_parked();
// let channel_view_2_b = workspace_b.read_with(cx_b, |workspace, cx| {
// assert_eq!(
// workspace.leader_for_pane(workspace.active_pane()),
// Some(client_a.peer_id().unwrap())
// );
// workspace
// .active_item(cx)
// .expect("no active item")
// .downcast::<ChannelView>()
// .expect("active item is not a channel view")
// });
// channel_view_2_b.read_with(cx_b, |notes, cx| {
// assert_eq!(notes.channel(cx).unwrap().name, "channel-2");
// });
// }
//todo!(collab_ui)
// #[gpui::test]
// async fn test_channel_buffer_changes(
// deterministic: BackgroundExecutor,
// cx_a: &mut TestAppContext,
// cx_b: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(&deterministic).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let channel_id = server
// .make_channel(
// "the-channel",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b)],
// )
// .await;
// let channel_buffer_a = client_a
// .channel_store()
// .update(cx_a, |store, cx| store.open_channel_buffer(channel_id, cx))
// .await
// .unwrap();
// // Client A makes an edit, and client B should see that the note has changed.
// channel_buffer_a.update(cx_a, |buffer, cx| {
// buffer.buffer().update(cx, |buffer, cx| {
// buffer.edit([(0..0, "1")], None, cx);
// })
// });
// deterministic.run_until_parked();
// let has_buffer_changed = cx_b.update(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_channel_buffer_changed(channel_id)
// .unwrap()
// });
// assert!(has_buffer_changed);
// // Opening the buffer should clear the changed flag.
// let project_b = client_b.build_empty_local_project(cx_b);
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// let channel_view_b = cx_b
// .update(|cx| ChannelView::open(channel_id, workspace_b.clone(), cx))
// .await
// .unwrap();
// deterministic.run_until_parked();
// let has_buffer_changed = cx_b.update(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_channel_buffer_changed(channel_id)
// .unwrap()
// });
// assert!(!has_buffer_changed);
// // Editing the channel while the buffer is open should not show that the buffer has changed.
// channel_buffer_a.update(cx_a, |buffer, cx| {
// buffer.buffer().update(cx, |buffer, cx| {
// buffer.edit([(0..0, "2")], None, cx);
// })
// });
// deterministic.run_until_parked();
// let has_buffer_changed = cx_b.read(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_channel_buffer_changed(channel_id)
// .unwrap()
// });
// assert!(!has_buffer_changed);
// deterministic.advance_clock(ACKNOWLEDGE_DEBOUNCE_INTERVAL);
// // Test that the server is tracking things correctly, and we retain our 'not changed'
// // state across a disconnect
// server.simulate_long_connection_interruption(client_b.peer_id().unwrap(), &deterministic);
// let has_buffer_changed = cx_b.read(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_channel_buffer_changed(channel_id)
// .unwrap()
// });
// assert!(!has_buffer_changed);
// // Closing the buffer should re-enable change tracking
// cx_b.update(|cx| {
// workspace_b.update(cx, |workspace, cx| {
// workspace.close_all_items_and_panes(&Default::default(), cx)
// });
// drop(channel_view_b)
// });
// deterministic.run_until_parked();
// channel_buffer_a.update(cx_a, |buffer, cx| {
// buffer.buffer().update(cx, |buffer, cx| {
// buffer.edit([(0..0, "3")], None, cx);
// })
// });
// deterministic.run_until_parked();
// let has_buffer_changed = cx_b.read(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_channel_buffer_changed(channel_id)
// .unwrap()
// });
// assert!(has_buffer_changed);
// }
#[track_caller]
fn assert_collaborators(collaborators: &HashMap<PeerId, Collaborator>, ids: &[Option<UserId>]) {
let mut user_ids = collaborators
.values()
.map(|collaborator| collaborator.user_id)
.collect::<Vec<_>>();
user_ids.sort();
assert_eq!(
user_ids,
ids.into_iter().map(|id| id.unwrap()).collect::<Vec<_>>()
);
}
fn buffer_text(channel_buffer: &Model<language::Buffer>, cx: &mut TestAppContext) -> String {
channel_buffer.read_with(cx, |buffer, _| buffer.text())
}

View file

@ -0,0 +1,408 @@
use crate::{rpc::RECONNECT_TIMEOUT, tests::TestServer};
use channel::{ChannelChat, ChannelMessageId};
use gpui::{BackgroundExecutor, Model, TestAppContext};
// todo!(notifications)
// #[gpui::test]
// async fn test_basic_channel_messages(
// executor: BackgroundExecutor,
// mut cx_a: &mut TestAppContext,
// mut cx_b: &mut TestAppContext,
// mut cx_c: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(executor.clone()).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let client_c = server.create_client(cx_c, "user_c").await;
// let channel_id = server
// .make_channel(
// "the-channel",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b), (&client_c, cx_c)],
// )
// .await;
// let channel_chat_a = client_a
// .channel_store()
// .update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
// .await
// .unwrap();
// let channel_chat_b = client_b
// .channel_store()
// .update(cx_b, |store, cx| store.open_channel_chat(channel_id, cx))
// .await
// .unwrap();
// let message_id = channel_chat_a
// .update(cx_a, |c, cx| {
// c.send_message(
// MessageParams {
// text: "hi @user_c!".into(),
// mentions: vec![(3..10, client_c.id())],
// },
// cx,
// )
// .unwrap()
// })
// .await
// .unwrap();
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("two".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// channel_chat_b
// .update(cx_b, |c, cx| c.send_message("three".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// let channel_chat_c = client_c
// .channel_store()
// .update(cx_c, |store, cx| store.open_channel_chat(channel_id, cx))
// .await
// .unwrap();
// for (chat, cx) in [
// (&channel_chat_a, &mut cx_a),
// (&channel_chat_b, &mut cx_b),
// (&channel_chat_c, &mut cx_c),
// ] {
// chat.update(*cx, |c, _| {
// assert_eq!(
// c.messages()
// .iter()
// .map(|m| (m.body.as_str(), m.mentions.as_slice()))
// .collect::<Vec<_>>(),
// vec![
// ("hi @user_c!", [(3..10, client_c.id())].as_slice()),
// ("two", &[]),
// ("three", &[])
// ],
// "results for user {}",
// c.client().id(),
// );
// });
// }
// client_c.notification_store().update(cx_c, |store, _| {
// assert_eq!(store.notification_count(), 2);
// assert_eq!(store.unread_notification_count(), 1);
// assert_eq!(
// store.notification_at(0).unwrap().notification,
// Notification::ChannelMessageMention {
// message_id,
// sender_id: client_a.id(),
// channel_id,
// }
// );
// assert_eq!(
// store.notification_at(1).unwrap().notification,
// Notification::ChannelInvitation {
// channel_id,
// channel_name: "the-channel".to_string(),
// inviter_id: client_a.id()
// }
// );
// });
// }
#[gpui::test]
async fn test_rejoin_channel_chat(
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b)],
)
.await;
let channel_chat_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
let channel_chat_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
channel_chat_a
.update(cx_a, |c, cx| c.send_message("one".into(), cx).unwrap())
.await
.unwrap();
channel_chat_b
.update(cx_b, |c, cx| c.send_message("two".into(), cx).unwrap())
.await
.unwrap();
server.forbid_connections();
server.disconnect_client(client_a.peer_id().unwrap());
// While client A is disconnected, clients A and B both send new messages.
channel_chat_a
.update(cx_a, |c, cx| c.send_message("three".into(), cx).unwrap())
.await
.unwrap_err();
channel_chat_a
.update(cx_a, |c, cx| c.send_message("four".into(), cx).unwrap())
.await
.unwrap_err();
channel_chat_b
.update(cx_b, |c, cx| c.send_message("five".into(), cx).unwrap())
.await
.unwrap();
channel_chat_b
.update(cx_b, |c, cx| c.send_message("six".into(), cx).unwrap())
.await
.unwrap();
// Client A reconnects.
server.allow_connections();
executor.advance_clock(RECONNECT_TIMEOUT);
// Client A fetches the messages that were sent while they were disconnected
// and resends their own messages which failed to send.
let expected_messages = &["one", "two", "five", "six", "three", "four"];
assert_messages(&channel_chat_a, expected_messages, cx_a);
assert_messages(&channel_chat_b, expected_messages, cx_b);
}
#[gpui::test]
async fn test_remove_channel_message(
executor: BackgroundExecutor,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
) {
let mut server = TestServer::start(executor.clone()).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
let channel_id = server
.make_channel(
"the-channel",
None,
(&client_a, cx_a),
&mut [(&client_b, cx_b), (&client_c, cx_c)],
)
.await;
let channel_chat_a = client_a
.channel_store()
.update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
let channel_chat_b = client_b
.channel_store()
.update(cx_b, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
// Client A sends some messages.
channel_chat_a
.update(cx_a, |c, cx| c.send_message("one".into(), cx).unwrap())
.await
.unwrap();
channel_chat_a
.update(cx_a, |c, cx| c.send_message("two".into(), cx).unwrap())
.await
.unwrap();
channel_chat_a
.update(cx_a, |c, cx| c.send_message("three".into(), cx).unwrap())
.await
.unwrap();
// Clients A and B see all of the messages.
executor.run_until_parked();
let expected_messages = &["one", "two", "three"];
assert_messages(&channel_chat_a, expected_messages, cx_a);
assert_messages(&channel_chat_b, expected_messages, cx_b);
// Client A deletes one of their messages.
channel_chat_a
.update(cx_a, |c, cx| {
let ChannelMessageId::Saved(id) = c.message(1).id else {
panic!("message not saved")
};
c.remove_message(id, cx)
})
.await
.unwrap();
// Client B sees that the message is gone.
executor.run_until_parked();
let expected_messages = &["one", "three"];
assert_messages(&channel_chat_a, expected_messages, cx_a);
assert_messages(&channel_chat_b, expected_messages, cx_b);
// Client C joins the channel chat, and does not see the deleted message.
let channel_chat_c = client_c
.channel_store()
.update(cx_c, |store, cx| store.open_channel_chat(channel_id, cx))
.await
.unwrap();
assert_messages(&channel_chat_c, expected_messages, cx_c);
}
#[track_caller]
fn assert_messages(chat: &Model<ChannelChat>, messages: &[&str], cx: &mut TestAppContext) {
// todo!(don't directly borrow here)
assert_eq!(
chat.read_with(cx, |chat, _| {
chat.messages()
.iter()
.map(|m| m.body.clone())
.collect::<Vec<_>>()
}),
messages
);
}
//todo!(collab_ui)
// #[gpui::test]
// async fn test_channel_message_changes(
// executor: BackgroundExecutor,
// cx_a: &mut TestAppContext,
// cx_b: &mut TestAppContext,
// ) {
// let mut server = TestServer::start(&executor).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let channel_id = server
// .make_channel(
// "the-channel",
// None,
// (&client_a, cx_a),
// &mut [(&client_b, cx_b)],
// )
// .await;
// // Client A sends a message, client B should see that there is a new message.
// let channel_chat_a = client_a
// .channel_store()
// .update(cx_a, |store, cx| store.open_channel_chat(channel_id, cx))
// .await
// .unwrap();
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("one".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
// assert!(b_has_messages);
// // Opening the chat should clear the changed flag.
// cx_b.update(|cx| {
// collab_ui::init(&client_b.app_state, cx);
// });
// let project_b = client_b.build_empty_local_project(cx_b);
// let workspace_b = client_b.build_workspace(&project_b, cx_b).root(cx_b);
// let chat_panel_b = workspace_b.update(cx_b, |workspace, cx| ChatPanel::new(workspace, cx));
// chat_panel_b
// .update(cx_b, |chat_panel, cx| {
// chat_panel.set_active(true, cx);
// chat_panel.select_channel(channel_id, None, cx)
// })
// .await
// .unwrap();
// executor.run_until_parked();
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
// assert!(!b_has_messages);
// // Sending a message while the chat is open should not change the flag.
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("two".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
// assert!(!b_has_messages);
// // Sending a message while the chat is closed should change the flag.
// chat_panel_b.update(cx_b, |chat_panel, cx| {
// chat_panel.set_active(false, cx);
// });
// // Sending a message while the chat is open should not change the flag.
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("three".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
// assert!(b_has_messages);
// // Closing the chat should re-enable change tracking
// cx_b.update(|_| drop(chat_panel_b));
// channel_chat_a
// .update(cx_a, |c, cx| c.send_message("four".into(), cx).unwrap())
// .await
// .unwrap();
// executor.run_until_parked();
// let b_has_messages = cx_b.read_with(|cx| {
// client_b
// .channel_store()
// .read(cx)
// .has_new_messages(channel_id)
// .unwrap()
// });
// assert!(b_has_messages);
// }

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,160 @@
//todo!(notifications)
// use crate::tests::TestServer;
// use gpui::{executor::Deterministic, TestAppContext};
// use notifications::NotificationEvent;
// use parking_lot::Mutex;
// use rpc::{proto, Notification};
// use std::sync::Arc;
// #[gpui::test]
// async fn test_notifications(
// deterministic: Arc<Deterministic>,
// cx_a: &mut TestAppContext,
// cx_b: &mut TestAppContext,
// ) {
// deterministic.forbid_parking();
// let mut server = TestServer::start(&deterministic).await;
// let client_a = server.create_client(cx_a, "user_a").await;
// let client_b = server.create_client(cx_b, "user_b").await;
// let notification_events_a = Arc::new(Mutex::new(Vec::new()));
// let notification_events_b = Arc::new(Mutex::new(Vec::new()));
// client_a.notification_store().update(cx_a, |_, cx| {
// let events = notification_events_a.clone();
// cx.subscribe(&cx.handle(), move |_, _, event, _| {
// events.lock().push(event.clone());
// })
// .detach()
// });
// client_b.notification_store().update(cx_b, |_, cx| {
// let events = notification_events_b.clone();
// cx.subscribe(&cx.handle(), move |_, _, event, _| {
// events.lock().push(event.clone());
// })
// .detach()
// });
// // Client A sends a contact request to client B.
// client_a
// .user_store()
// .update(cx_a, |store, cx| store.request_contact(client_b.id(), cx))
// .await
// .unwrap();
// // Client B receives a contact request notification and responds to the
// // request, accepting it.
// deterministic.run_until_parked();
// client_b.notification_store().update(cx_b, |store, cx| {
// assert_eq!(store.notification_count(), 1);
// assert_eq!(store.unread_notification_count(), 1);
// let entry = store.notification_at(0).unwrap();
// assert_eq!(
// entry.notification,
// Notification::ContactRequest {
// sender_id: client_a.id()
// }
// );
// assert!(!entry.is_read);
// assert_eq!(
// &notification_events_b.lock()[0..],
// &[
// NotificationEvent::NewNotification {
// entry: entry.clone(),
// },
// NotificationEvent::NotificationsUpdated {
// old_range: 0..0,
// new_count: 1
// }
// ]
// );
// store.respond_to_notification(entry.notification.clone(), true, cx);
// });
// // Client B sees the notification is now read, and that they responded.
// deterministic.run_until_parked();
// client_b.notification_store().read_with(cx_b, |store, _| {
// assert_eq!(store.notification_count(), 1);
// assert_eq!(store.unread_notification_count(), 0);
// let entry = store.notification_at(0).unwrap();
// assert!(entry.is_read);
// assert_eq!(entry.response, Some(true));
// assert_eq!(
// &notification_events_b.lock()[2..],
// &[
// NotificationEvent::NotificationRead {
// entry: entry.clone(),
// },
// NotificationEvent::NotificationsUpdated {
// old_range: 0..1,
// new_count: 1
// }
// ]
// );
// });
// // Client A receives a notification that client B accepted their request.
// client_a.notification_store().read_with(cx_a, |store, _| {
// assert_eq!(store.notification_count(), 1);
// assert_eq!(store.unread_notification_count(), 1);
// let entry = store.notification_at(0).unwrap();
// assert_eq!(
// entry.notification,
// Notification::ContactRequestAccepted {
// responder_id: client_b.id()
// }
// );
// assert!(!entry.is_read);
// });
// // Client A creates a channel and invites client B to be a member.
// let channel_id = client_a
// .channel_store()
// .update(cx_a, |store, cx| {
// store.create_channel("the-channel", None, cx)
// })
// .await
// .unwrap();
// client_a
// .channel_store()
// .update(cx_a, |store, cx| {
// store.invite_member(channel_id, client_b.id(), proto::ChannelRole::Member, cx)
// })
// .await
// .unwrap();
// // Client B receives a channel invitation notification and responds to the
// // invitation, accepting it.
// deterministic.run_until_parked();
// client_b.notification_store().update(cx_b, |store, cx| {
// assert_eq!(store.notification_count(), 2);
// assert_eq!(store.unread_notification_count(), 1);
// let entry = store.notification_at(0).unwrap();
// assert_eq!(
// entry.notification,
// Notification::ChannelInvitation {
// channel_id,
// channel_name: "the-channel".to_string(),
// inviter_id: client_a.id()
// }
// );
// assert!(!entry.is_read);
// store.respond_to_notification(entry.notification.clone(), true, cx);
// });
// // Client B sees the notification is now read, and that they responded.
// deterministic.run_until_parked();
// client_b.notification_store().read_with(cx_b, |store, _| {
// assert_eq!(store.notification_count(), 2);
// assert_eq!(store.unread_notification_count(), 0);
// let entry = store.notification_at(0).unwrap();
// assert!(entry.is_read);
// assert_eq!(entry.response, Some(true));
// });
// }

View file

@ -0,0 +1,296 @@
use crate::db::ChannelRole;
use super::{run_randomized_test, RandomizedTest, TestClient, TestError, TestServer, UserTestPlan};
use anyhow::Result;
use async_trait::async_trait;
use gpui::{BackgroundExecutor, TestAppContext};
use rand::prelude::*;
use serde_derive::{Deserialize, Serialize};
use std::{
ops::{Deref, DerefMut, Range},
rc::Rc,
sync::Arc,
};
use text::Bias;
#[gpui::test(
iterations = 100,
on_failure = "crate::tests::save_randomized_test_plan"
)]
async fn test_random_channel_buffers(
cx: &mut TestAppContext,
executor: BackgroundExecutor,
rng: StdRng,
) {
run_randomized_test::<RandomChannelBufferTest>(cx, executor, rng).await;
}
struct RandomChannelBufferTest;
#[derive(Clone, Serialize, Deserialize)]
enum ChannelBufferOperation {
JoinChannelNotes {
channel_name: String,
},
LeaveChannelNotes {
channel_name: String,
},
EditChannelNotes {
channel_name: String,
edits: Vec<(Range<usize>, Arc<str>)>,
},
Noop,
}
const CHANNEL_COUNT: usize = 3;
#[async_trait(?Send)]
impl RandomizedTest for RandomChannelBufferTest {
type Operation = ChannelBufferOperation;
async fn initialize(server: &mut TestServer, users: &[UserTestPlan]) {
let db = &server.app_state.db;
for ix in 0..CHANNEL_COUNT {
let id = db
.create_root_channel(&format!("channel-{ix}"), users[0].user_id)
.await
.unwrap();
for user in &users[1..] {
db.invite_channel_member(id, user.user_id, users[0].user_id, ChannelRole::Member)
.await
.unwrap();
db.respond_to_channel_invite(id, user.user_id, true)
.await
.unwrap();
}
}
}
fn generate_operation(
client: &TestClient,
rng: &mut StdRng,
_: &mut UserTestPlan,
cx: &TestAppContext,
) -> ChannelBufferOperation {
let channel_store = client.channel_store().clone();
let mut channel_buffers = client.channel_buffers();
// When signed out, we can't do anything unless a channel buffer is
// already open.
if channel_buffers.deref_mut().is_empty()
&& channel_store.read_with(cx, |store, _| store.channel_count() == 0)
{
return ChannelBufferOperation::Noop;
}
loop {
match rng.gen_range(0..100_u32) {
0..=29 => {
let channel_name = client.channel_store().read_with(cx, |store, cx| {
store.ordered_channels().find_map(|(_, channel)| {
if store.has_open_channel_buffer(channel.id, cx) {
None
} else {
Some(channel.name.clone())
}
})
});
if let Some(channel_name) = channel_name {
break ChannelBufferOperation::JoinChannelNotes { channel_name };
}
}
30..=40 => {
if let Some(buffer) = channel_buffers.deref().iter().choose(rng) {
let channel_name =
buffer.read_with(cx, |b, cx| b.channel(cx).unwrap().name.clone());
break ChannelBufferOperation::LeaveChannelNotes { channel_name };
}
}
_ => {
if let Some(buffer) = channel_buffers.deref().iter().choose(rng) {
break buffer.read_with(cx, |b, cx| {
let channel_name = b.channel(cx).unwrap().name.clone();
let edits = b
.buffer()
.read_with(cx, |buffer, _| buffer.get_random_edits(rng, 3));
ChannelBufferOperation::EditChannelNotes {
channel_name,
edits,
}
});
}
}
}
}
}
async fn apply_operation(
client: &TestClient,
operation: ChannelBufferOperation,
cx: &mut TestAppContext,
) -> Result<(), TestError> {
match operation {
ChannelBufferOperation::JoinChannelNotes { channel_name } => {
let buffer = client.channel_store().update(cx, |store, cx| {
let channel_id = store
.ordered_channels()
.find(|(_, c)| c.name == channel_name)
.unwrap()
.1
.id;
if store.has_open_channel_buffer(channel_id, cx) {
Err(TestError::Inapplicable)
} else {
Ok(store.open_channel_buffer(channel_id, cx))
}
})?;
log::info!(
"{}: opening notes for channel {channel_name}",
client.username
);
client.channel_buffers().deref_mut().insert(buffer.await?);
}
ChannelBufferOperation::LeaveChannelNotes { channel_name } => {
let buffer = cx.update(|cx| {
let mut left_buffer = Err(TestError::Inapplicable);
client.channel_buffers().deref_mut().retain(|buffer| {
if buffer.read(cx).channel(cx).unwrap().name == channel_name {
left_buffer = Ok(buffer.clone());
false
} else {
true
}
});
left_buffer
})?;
log::info!(
"{}: closing notes for channel {channel_name}",
client.username
);
cx.update(|_| drop(buffer));
}
ChannelBufferOperation::EditChannelNotes {
channel_name,
edits,
} => {
let channel_buffer = cx
.read(|cx| {
client
.channel_buffers()
.deref()
.iter()
.find(|buffer| {
buffer.read(cx).channel(cx).unwrap().name == channel_name
})
.cloned()
})
.ok_or_else(|| TestError::Inapplicable)?;
log::info!(
"{}: editing notes for channel {channel_name} with {:?}",
client.username,
edits
);
channel_buffer.update(cx, |buffer, cx| {
let buffer = buffer.buffer();
buffer.update(cx, |buffer, cx| {
let snapshot = buffer.snapshot();
buffer.edit(
edits.into_iter().map(|(range, text)| {
let start = snapshot.clip_offset(range.start, Bias::Left);
let end = snapshot.clip_offset(range.end, Bias::Right);
(start..end, text)
}),
None,
cx,
);
});
});
}
ChannelBufferOperation::Noop => Err(TestError::Inapplicable)?,
}
Ok(())
}
async fn on_client_added(client: &Rc<TestClient>, cx: &mut TestAppContext) {
let channel_store = client.channel_store();
while channel_store.read_with(cx, |store, _| store.channel_count() == 0) {
// todo!(notifications)
// channel_store.next_notification(cx).await;
}
}
async fn on_quiesce(server: &mut TestServer, clients: &mut [(Rc<TestClient>, TestAppContext)]) {
let channels = server.app_state.db.all_channels().await.unwrap();
for (client, client_cx) in clients.iter_mut() {
client_cx.update(|cx| {
client
.channel_buffers()
.deref_mut()
.retain(|b| b.read(cx).is_connected());
});
}
for (channel_id, channel_name) in channels {
let mut prev_text: Option<(u64, String)> = None;
let mut collaborator_user_ids = server
.app_state
.db
.get_channel_buffer_collaborators(channel_id)
.await
.unwrap()
.into_iter()
.map(|id| id.to_proto())
.collect::<Vec<_>>();
collaborator_user_ids.sort();
for (client, client_cx) in clients.iter() {
let user_id = client.user_id().unwrap();
client_cx.read(|cx| {
if let Some(channel_buffer) = client
.channel_buffers()
.deref()
.iter()
.find(|b| b.read(cx).channel_id == channel_id.to_proto())
{
let channel_buffer = channel_buffer.read(cx);
// Assert that channel buffer's text matches other clients' copies.
let text = channel_buffer.buffer().read(cx).text();
if let Some((prev_user_id, prev_text)) = &prev_text {
assert_eq!(
&text,
prev_text,
"client {user_id} has different text than client {prev_user_id} for channel {channel_name}",
);
} else {
prev_text = Some((user_id, text.clone()));
}
// Assert that all clients and the server agree about who is present in the
// channel buffer.
let collaborators = channel_buffer.collaborators();
let mut user_ids =
collaborators.values().map(|c| c.user_id).collect::<Vec<_>>();
user_ids.sort();
assert_eq!(
user_ids,
collaborator_user_ids,
"client {user_id} has different user ids for channel {channel_name} than the server",
);
}
});
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,677 @@
use crate::{
db::{self, NewUserParams, UserId},
rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
tests::{TestClient, TestServer},
};
use async_trait::async_trait;
use futures::StreamExt;
use gpui::{BackgroundExecutor, Task, TestAppContext};
use parking_lot::Mutex;
use rand::prelude::*;
use rpc::RECEIVE_TIMEOUT;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use settings::SettingsStore;
use std::{
env,
path::PathBuf,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering::SeqCst},
Arc,
},
};
lazy_static::lazy_static! {
static ref PLAN_LOAD_PATH: Option<PathBuf> = path_env_var("LOAD_PLAN");
static ref PLAN_SAVE_PATH: Option<PathBuf> = path_env_var("SAVE_PLAN");
static ref MAX_PEERS: usize = env::var("MAX_PEERS")
.map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
.unwrap_or(3);
static ref MAX_OPERATIONS: usize = env::var("OPERATIONS")
.map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
.unwrap_or(10);
}
static LOADED_PLAN_JSON: Mutex<Option<Vec<u8>>> = Mutex::new(None);
static LAST_PLAN: Mutex<Option<Box<dyn Send + FnOnce() -> Vec<u8>>>> = Mutex::new(None);
struct TestPlan<T: RandomizedTest> {
rng: StdRng,
replay: bool,
stored_operations: Vec<(StoredOperation<T::Operation>, Arc<AtomicBool>)>,
max_operations: usize,
operation_ix: usize,
users: Vec<UserTestPlan>,
next_batch_id: usize,
allow_server_restarts: bool,
allow_client_reconnection: bool,
allow_client_disconnection: bool,
}
pub struct UserTestPlan {
pub user_id: UserId,
pub username: String,
pub allow_client_reconnection: bool,
pub allow_client_disconnection: bool,
next_root_id: usize,
operation_ix: usize,
online: bool,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum StoredOperation<T> {
Server(ServerOperation),
Client {
user_id: UserId,
batch_id: usize,
operation: T,
},
}
#[derive(Clone, Debug, Serialize, Deserialize)]
enum ServerOperation {
AddConnection {
user_id: UserId,
},
RemoveConnection {
user_id: UserId,
},
BounceConnection {
user_id: UserId,
},
RestartServer,
MutateClients {
batch_id: usize,
#[serde(skip_serializing)]
#[serde(skip_deserializing)]
user_ids: Vec<UserId>,
quiesce: bool,
},
}
pub enum TestError {
Inapplicable,
Other(anyhow::Error),
}
#[async_trait(?Send)]
pub trait RandomizedTest: 'static + Sized {
type Operation: Send + Clone + Serialize + DeserializeOwned;
fn generate_operation(
client: &TestClient,
rng: &mut StdRng,
plan: &mut UserTestPlan,
cx: &TestAppContext,
) -> Self::Operation;
async fn apply_operation(
client: &TestClient,
operation: Self::Operation,
cx: &mut TestAppContext,
) -> Result<(), TestError>;
async fn initialize(server: &mut TestServer, users: &[UserTestPlan]);
async fn on_client_added(client: &Rc<TestClient>, cx: &mut TestAppContext);
async fn on_quiesce(server: &mut TestServer, client: &mut [(Rc<TestClient>, TestAppContext)]);
}
pub async fn run_randomized_test<T: RandomizedTest>(
cx: &mut TestAppContext,
executor: BackgroundExecutor,
rng: StdRng,
) {
let mut server = TestServer::start(executor.clone()).await;
let plan = TestPlan::<T>::new(&mut server, rng).await;
LAST_PLAN.lock().replace({
let plan = plan.clone();
Box::new(move || plan.lock().serialize())
});
let mut clients = Vec::new();
let mut client_tasks = Vec::new();
let mut operation_channels = Vec::new();
loop {
let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else {
break;
};
applied.store(true, SeqCst);
let did_apply = TestPlan::apply_server_operation(
plan.clone(),
executor.clone(),
&mut server,
&mut clients,
&mut client_tasks,
&mut operation_channels,
next_operation,
cx,
)
.await;
if !did_apply {
applied.store(false, SeqCst);
}
}
drop(operation_channels);
executor.start_waiting();
futures::future::join_all(client_tasks).await;
executor.finish_waiting();
executor.run_until_parked();
T::on_quiesce(&mut server, &mut clients).await;
for (client, cx) in clients {
cx.update(|cx| {
let store = cx.remove_global::<SettingsStore>();
cx.clear_globals();
cx.set_global(store);
drop(client);
});
}
executor.run_until_parked();
if let Some(path) = &*PLAN_SAVE_PATH {
eprintln!("saved test plan to path {:?}", path);
std::fs::write(path, plan.lock().serialize()).unwrap();
}
}
pub fn save_randomized_test_plan() {
if let Some(serialize_plan) = LAST_PLAN.lock().take() {
if let Some(path) = &*PLAN_SAVE_PATH {
eprintln!("saved test plan to path {:?}", path);
std::fs::write(path, serialize_plan()).unwrap();
}
}
}
impl<T: RandomizedTest> TestPlan<T> {
pub async fn new(server: &mut TestServer, mut rng: StdRng) -> Arc<Mutex<Self>> {
let allow_server_restarts = rng.gen_bool(0.7);
let allow_client_reconnection = rng.gen_bool(0.7);
let allow_client_disconnection = rng.gen_bool(0.1);
let mut users = Vec::new();
for ix in 0..*MAX_PEERS {
let username = format!("user-{}", ix + 1);
let user_id = server
.app_state
.db
.create_user(
&format!("{username}@example.com"),
false,
NewUserParams {
github_login: username.clone(),
github_user_id: ix as i32,
},
)
.await
.unwrap()
.user_id;
users.push(UserTestPlan {
user_id,
username,
online: false,
next_root_id: 0,
operation_ix: 0,
allow_client_disconnection,
allow_client_reconnection,
});
}
T::initialize(server, &users).await;
let plan = Arc::new(Mutex::new(Self {
replay: false,
allow_server_restarts,
allow_client_reconnection,
allow_client_disconnection,
stored_operations: Vec::new(),
operation_ix: 0,
next_batch_id: 0,
max_operations: *MAX_OPERATIONS,
users,
rng,
}));
if let Some(path) = &*PLAN_LOAD_PATH {
let json = LOADED_PLAN_JSON
.lock()
.get_or_insert_with(|| {
eprintln!("loaded test plan from path {:?}", path);
std::fs::read(path).unwrap()
})
.clone();
plan.lock().deserialize(json);
}
plan
}
fn deserialize(&mut self, json: Vec<u8>) {
let stored_operations: Vec<StoredOperation<T::Operation>> =
serde_json::from_slice(&json).unwrap();
self.replay = true;
self.stored_operations = stored_operations
.iter()
.cloned()
.enumerate()
.map(|(i, mut operation)| {
let did_apply = Arc::new(AtomicBool::new(false));
if let StoredOperation::Server(ServerOperation::MutateClients {
batch_id: current_batch_id,
user_ids,
..
}) = &mut operation
{
assert!(user_ids.is_empty());
user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| {
if let StoredOperation::Client {
user_id, batch_id, ..
} = operation
{
if batch_id == current_batch_id {
return Some(user_id);
}
}
None
}));
user_ids.sort_unstable();
}
(operation, did_apply)
})
.collect()
}
fn serialize(&mut self) -> Vec<u8> {
// Format each operation as one line
let mut json = Vec::new();
json.push(b'[');
for (operation, applied) in &self.stored_operations {
if !applied.load(SeqCst) {
continue;
}
if json.len() > 1 {
json.push(b',');
}
json.extend_from_slice(b"\n ");
serde_json::to_writer(&mut json, operation).unwrap();
}
json.extend_from_slice(b"\n]\n");
json
}
fn next_server_operation(
&mut self,
clients: &[(Rc<TestClient>, TestAppContext)],
) -> Option<(ServerOperation, Arc<AtomicBool>)> {
if self.replay {
while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) {
self.operation_ix += 1;
if let (StoredOperation::Server(operation), applied) = stored_operation {
return Some((operation.clone(), applied.clone()));
}
}
None
} else {
let operation = self.generate_server_operation(clients)?;
let applied = Arc::new(AtomicBool::new(false));
self.stored_operations
.push((StoredOperation::Server(operation.clone()), applied.clone()));
Some((operation, applied))
}
}
fn next_client_operation(
&mut self,
client: &TestClient,
current_batch_id: usize,
cx: &TestAppContext,
) -> Option<(T::Operation, Arc<AtomicBool>)> {
let current_user_id = client.current_user_id(cx);
let user_ix = self
.users
.iter()
.position(|user| user.user_id == current_user_id)
.unwrap();
let user_plan = &mut self.users[user_ix];
if self.replay {
while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) {
user_plan.operation_ix += 1;
if let (
StoredOperation::Client {
user_id, operation, ..
},
applied,
) = stored_operation
{
if user_id == &current_user_id {
return Some((operation.clone(), applied.clone()));
}
}
}
None
} else {
if self.operation_ix == self.max_operations {
return None;
}
self.operation_ix += 1;
let operation = T::generate_operation(
client,
&mut self.rng,
self.users
.iter_mut()
.find(|user| user.user_id == current_user_id)
.unwrap(),
cx,
);
let applied = Arc::new(AtomicBool::new(false));
self.stored_operations.push((
StoredOperation::Client {
user_id: current_user_id,
batch_id: current_batch_id,
operation: operation.clone(),
},
applied.clone(),
));
Some((operation, applied))
}
}
fn generate_server_operation(
&mut self,
clients: &[(Rc<TestClient>, TestAppContext)],
) -> Option<ServerOperation> {
if self.operation_ix == self.max_operations {
return None;
}
Some(loop {
break match self.rng.gen_range(0..100) {
0..=29 if clients.len() < self.users.len() => {
let user = self
.users
.iter()
.filter(|u| !u.online)
.choose(&mut self.rng)
.unwrap();
self.operation_ix += 1;
ServerOperation::AddConnection {
user_id: user.user_id,
}
}
30..=34 if clients.len() > 1 && self.allow_client_disconnection => {
let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
let user_id = client.current_user_id(cx);
self.operation_ix += 1;
ServerOperation::RemoveConnection { user_id }
}
35..=39 if clients.len() > 1 && self.allow_client_reconnection => {
let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
let user_id = client.current_user_id(cx);
self.operation_ix += 1;
ServerOperation::BounceConnection { user_id }
}
40..=44 if self.allow_server_restarts && clients.len() > 1 => {
self.operation_ix += 1;
ServerOperation::RestartServer
}
_ if !clients.is_empty() => {
let count = self
.rng
.gen_range(1..10)
.min(self.max_operations - self.operation_ix);
let batch_id = util::post_inc(&mut self.next_batch_id);
let mut user_ids = (0..count)
.map(|_| {
let ix = self.rng.gen_range(0..clients.len());
let (client, cx) = &clients[ix];
client.current_user_id(cx)
})
.collect::<Vec<_>>();
user_ids.sort_unstable();
ServerOperation::MutateClients {
user_ids,
batch_id,
quiesce: self.rng.gen_bool(0.7),
}
}
_ => continue,
};
})
}
async fn apply_server_operation(
plan: Arc<Mutex<Self>>,
deterministic: BackgroundExecutor,
server: &mut TestServer,
clients: &mut Vec<(Rc<TestClient>, TestAppContext)>,
client_tasks: &mut Vec<Task<()>>,
operation_channels: &mut Vec<futures::channel::mpsc::UnboundedSender<usize>>,
operation: ServerOperation,
cx: &mut TestAppContext,
) -> bool {
match operation {
ServerOperation::AddConnection { user_id } => {
let username;
{
let mut plan = plan.lock();
let user = plan.user(user_id);
if user.online {
return false;
}
user.online = true;
username = user.username.clone();
};
log::info!("adding new connection for {}", username);
let mut client_cx = cx.new_app();
let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded();
let client = Rc::new(server.create_client(&mut client_cx, &username).await);
operation_channels.push(operation_tx);
clients.push((client.clone(), client_cx.clone()));
let foreground_executor = client_cx.foreground_executor().clone();
let simulate_client =
Self::simulate_client(plan.clone(), client, operation_rx, client_cx);
client_tasks.push(foreground_executor.spawn(simulate_client));
log::info!("added connection for {}", username);
}
ServerOperation::RemoveConnection {
user_id: removed_user_id,
} => {
log::info!("simulating full disconnection of user {}", removed_user_id);
let client_ix = clients
.iter()
.position(|(client, cx)| client.current_user_id(cx) == removed_user_id);
let Some(client_ix) = client_ix else {
return false;
};
let user_connection_ids = server
.connection_pool
.lock()
.user_connection_ids(removed_user_id)
.collect::<Vec<_>>();
assert_eq!(user_connection_ids.len(), 1);
let removed_peer_id = user_connection_ids[0].into();
let (client, client_cx) = clients.remove(client_ix);
let client_task = client_tasks.remove(client_ix);
operation_channels.remove(client_ix);
server.forbid_connections();
server.disconnect_client(removed_peer_id);
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
deterministic.start_waiting();
log::info!("waiting for user {} to exit...", removed_user_id);
client_task.await;
deterministic.finish_waiting();
server.allow_connections();
for project in client.remote_projects().iter() {
project.read_with(&client_cx, |project, _| {
assert!(
project.is_read_only(),
"project {:?} should be read only",
project.remote_id()
)
});
}
for (client, cx) in clients {
let contacts = server
.app_state
.db
.get_contacts(client.current_user_id(cx))
.await
.unwrap();
let pool = server.connection_pool.lock();
for contact in contacts {
if let db::Contact::Accepted { user_id, busy, .. } = contact {
if user_id == removed_user_id {
assert!(!pool.is_user_online(user_id));
assert!(!busy);
}
}
}
}
log::info!("{} removed", client.username);
plan.lock().user(removed_user_id).online = false;
client_cx.update(|cx| {
cx.clear_globals();
drop(client);
});
}
ServerOperation::BounceConnection { user_id } => {
log::info!("simulating temporary disconnection of user {}", user_id);
let user_connection_ids = server
.connection_pool
.lock()
.user_connection_ids(user_id)
.collect::<Vec<_>>();
if user_connection_ids.is_empty() {
return false;
}
assert_eq!(user_connection_ids.len(), 1);
let peer_id = user_connection_ids[0].into();
server.disconnect_client(peer_id);
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
}
ServerOperation::RestartServer => {
log::info!("simulating server restart");
server.reset().await;
deterministic.advance_clock(RECEIVE_TIMEOUT);
server.start().await.unwrap();
deterministic.advance_clock(CLEANUP_TIMEOUT);
let environment = &server.app_state.config.zed_environment;
let (stale_room_ids, _) = server
.app_state
.db
.stale_server_resource_ids(environment, server.id())
.await
.unwrap();
assert_eq!(stale_room_ids, vec![]);
}
ServerOperation::MutateClients {
user_ids,
batch_id,
quiesce,
} => {
let mut applied = false;
for user_id in user_ids {
let client_ix = clients
.iter()
.position(|(client, cx)| client.current_user_id(cx) == user_id);
let Some(client_ix) = client_ix else { continue };
applied = true;
if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) {
log::error!("error signaling user {user_id}: {err}");
}
}
if quiesce && applied {
deterministic.run_until_parked();
T::on_quiesce(server, clients).await;
}
return applied;
}
}
true
}
async fn simulate_client(
plan: Arc<Mutex<Self>>,
client: Rc<TestClient>,
mut operation_rx: futures::channel::mpsc::UnboundedReceiver<usize>,
mut cx: TestAppContext,
) {
T::on_client_added(&client, &mut cx).await;
while let Some(batch_id) = operation_rx.next().await {
let Some((operation, applied)) =
plan.lock().next_client_operation(&client, batch_id, &cx)
else {
break;
};
applied.store(true, SeqCst);
match T::apply_operation(&client, operation, &mut cx).await {
Ok(()) => {}
Err(TestError::Inapplicable) => {
applied.store(false, SeqCst);
log::info!("skipped operation");
}
Err(TestError::Other(error)) => {
log::error!("{} error: {}", client.username, error);
}
}
cx.executor().simulate_random_delay().await;
}
log::info!("{}: done", client.username);
}
fn user(&mut self, user_id: UserId) -> &mut UserTestPlan {
self.users
.iter_mut()
.find(|user| user.user_id == user_id)
.unwrap()
}
}
impl UserTestPlan {
pub fn next_root_dir_name(&mut self) -> String {
let user_id = self.user_id;
let root_id = util::post_inc(&mut self.next_root_id);
format!("dir-{user_id}-{root_id}")
}
}
impl From<anyhow::Error> for TestError {
fn from(value: anyhow::Error) -> Self {
Self::Other(value)
}
}
fn path_env_var(name: &str) -> Option<PathBuf> {
let value = env::var(name).ok()?;
let mut path = PathBuf::from(value);
if path.is_relative() {
let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
abs_path.pop();
abs_path.pop();
abs_path.push(path);
path = abs_path
}
Some(path)
}

View file

@ -0,0 +1,624 @@
use crate::{
db::{tests::TestDb, NewUserParams, UserId},
executor::Executor,
rpc::{Server, CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
AppState,
};
use anyhow::anyhow;
use call::ActiveCall;
use channel::{ChannelBuffer, ChannelStore};
use client::{
self, proto::PeerId, Client, Connection, Credentials, EstablishConnectionError, UserStore,
};
use collections::{HashMap, HashSet};
use fs::FakeFs;
use futures::{channel::oneshot, StreamExt as _};
use gpui::{BackgroundExecutor, Context, Model, TestAppContext, WindowHandle};
use language::LanguageRegistry;
use node_runtime::FakeNodeRuntime;
use parking_lot::Mutex;
use project::{Project, WorktreeId};
use rpc::{proto::ChannelRole, RECEIVE_TIMEOUT};
use settings::SettingsStore;
use std::{
cell::{Ref, RefCell, RefMut},
env,
ops::{Deref, DerefMut},
path::Path,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
Arc,
},
};
use util::http::FakeHttpClient;
use workspace::{Workspace, WorkspaceStore};
pub struct TestServer {
pub app_state: Arc<AppState>,
pub test_live_kit_server: Arc<live_kit_client::TestServer>,
server: Arc<Server>,
connection_killers: Arc<Mutex<HashMap<PeerId, Arc<AtomicBool>>>>,
forbid_connections: Arc<AtomicBool>,
_test_db: TestDb,
}
pub struct TestClient {
pub username: String,
pub app_state: Arc<workspace::AppState>,
channel_store: Model<ChannelStore>,
// todo!(notifications)
// notification_store: Model<NotificationStore>,
state: RefCell<TestClientState>,
}
#[derive(Default)]
struct TestClientState {
local_projects: Vec<Model<Project>>,
remote_projects: Vec<Model<Project>>,
buffers: HashMap<Model<Project>, HashSet<Model<language::Buffer>>>,
channel_buffers: HashSet<Model<ChannelBuffer>>,
}
pub struct ContactsSummary {
pub current: Vec<String>,
pub outgoing_requests: Vec<String>,
pub incoming_requests: Vec<String>,
}
impl TestServer {
pub async fn start(deterministic: BackgroundExecutor) -> Self {
static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0);
let use_postgres = env::var("USE_POSTGRES").ok();
let use_postgres = use_postgres.as_deref();
let test_db = if use_postgres == Some("true") || use_postgres == Some("1") {
TestDb::postgres(deterministic.clone())
} else {
TestDb::sqlite(deterministic.clone())
};
let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst);
let live_kit_server = live_kit_client::TestServer::create(
format!("http://livekit.{}.test", live_kit_server_id),
format!("devkey-{}", live_kit_server_id),
format!("secret-{}", live_kit_server_id),
deterministic.clone(),
)
.unwrap();
let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
let epoch = app_state
.db
.create_server(&app_state.config.zed_environment)
.await
.unwrap();
let server = Server::new(
epoch,
app_state.clone(),
Executor::Deterministic(deterministic.clone()),
);
server.start().await.unwrap();
// Advance clock to ensure the server's cleanup task is finished.
deterministic.advance_clock(CLEANUP_TIMEOUT);
Self {
app_state,
server,
connection_killers: Default::default(),
forbid_connections: Default::default(),
_test_db: test_db,
test_live_kit_server: live_kit_server,
}
}
pub async fn reset(&self) {
self.app_state.db.reset();
let epoch = self
.app_state
.db
.create_server(&self.app_state.config.zed_environment)
.await
.unwrap();
self.server.reset(epoch);
}
pub async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient {
cx.update(|cx| {
if cx.has_global::<SettingsStore>() {
panic!("Same cx used to create two test clients")
}
let settings = SettingsStore::test(cx);
cx.set_global(settings);
});
let http = FakeHttpClient::with_404_response();
let user_id = if let Ok(Some(user)) = self.app_state.db.get_user_by_github_login(name).await
{
user.id
} else {
self.app_state
.db
.create_user(
&format!("{name}@example.com"),
false,
NewUserParams {
github_login: name.into(),
github_user_id: 0,
},
)
.await
.expect("creating user failed")
.user_id
};
let client_name = name.to_string();
let mut client = cx.read(|cx| Client::new(http.clone(), cx));
let server = self.server.clone();
let db = self.app_state.db.clone();
let connection_killers = self.connection_killers.clone();
let forbid_connections = self.forbid_connections.clone();
Arc::get_mut(&mut client)
.unwrap()
.set_id(user_id.to_proto())
.override_authenticate(move |cx| {
cx.spawn(|_| async move {
let access_token = "the-token".to_string();
Ok(Credentials {
user_id: user_id.to_proto(),
access_token,
})
})
})
.override_establish_connection(move |credentials, cx| {
assert_eq!(credentials.user_id, user_id.0 as u64);
assert_eq!(credentials.access_token, "the-token");
let server = server.clone();
let db = db.clone();
let connection_killers = connection_killers.clone();
let forbid_connections = forbid_connections.clone();
let client_name = client_name.clone();
cx.spawn(move |cx| async move {
if forbid_connections.load(SeqCst) {
Err(EstablishConnectionError::other(anyhow!(
"server is forbidding connections"
)))
} else {
let (client_conn, server_conn, killed) =
Connection::in_memory(cx.background_executor().clone());
let (connection_id_tx, connection_id_rx) = oneshot::channel();
let user = db
.get_user_by_id(user_id)
.await
.expect("retrieving user failed")
.unwrap();
cx.background_executor()
.spawn(server.handle_connection(
server_conn,
client_name,
user,
Some(connection_id_tx),
Executor::Deterministic(cx.background_executor().clone()),
))
.detach();
let connection_id = connection_id_rx.await.unwrap();
connection_killers
.lock()
.insert(connection_id.into(), killed);
Ok(client_conn)
}
})
});
let fs = FakeFs::new(cx.executor().clone());
let user_store = cx.build_model(|cx| UserStore::new(client.clone(), http, cx));
let workspace_store = cx.build_model(|cx| WorkspaceStore::new(client.clone(), cx));
let mut language_registry = LanguageRegistry::test();
language_registry.set_executor(cx.executor().clone());
let app_state = Arc::new(workspace::AppState {
client: client.clone(),
user_store: user_store.clone(),
workspace_store,
languages: Arc::new(language_registry),
fs: fs.clone(),
build_window_options: |_, _, _| Default::default(),
initialize_workspace: |_, _, _, _| gpui::Task::ready(Ok(())),
node_runtime: FakeNodeRuntime::new(),
});
cx.update(|cx| {
theme::init(cx);
Project::init(&client, cx);
client::init(&client, cx);
language::init(cx);
editor::init_settings(cx);
workspace::init(app_state.clone(), cx);
audio::init((), cx);
call::init(client.clone(), user_store.clone(), cx);
channel::init(&client, user_store.clone(), cx);
//todo(notifications)
// notifications::init(client.clone(), user_store, cx);
});
client
.authenticate_and_connect(false, &cx.to_async())
.await
.unwrap();
let client = TestClient {
app_state,
username: name.to_string(),
channel_store: cx.read(ChannelStore::global).clone(),
// todo!(notifications)
// notification_store: cx.read(NotificationStore::global).clone(),
state: Default::default(),
};
client.wait_for_current_user(cx).await;
client
}
pub fn disconnect_client(&self, peer_id: PeerId) {
self.connection_killers
.lock()
.remove(&peer_id)
.unwrap()
.store(true, SeqCst);
}
//todo!(workspace)
#[allow(dead_code)]
pub fn simulate_long_connection_interruption(
&self,
peer_id: PeerId,
deterministic: BackgroundExecutor,
) {
self.forbid_connections();
self.disconnect_client(peer_id);
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
self.allow_connections();
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
deterministic.run_until_parked();
}
pub fn forbid_connections(&self) {
self.forbid_connections.store(true, SeqCst);
}
pub fn allow_connections(&self) {
self.forbid_connections.store(false, SeqCst);
}
pub async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
for ix in 1..clients.len() {
let (left, right) = clients.split_at_mut(ix);
let (client_a, cx_a) = left.last_mut().unwrap();
for (client_b, cx_b) in right {
client_a
.app_state
.user_store
.update(*cx_a, |store, cx| {
store.request_contact(client_b.user_id().unwrap(), cx)
})
.await
.unwrap();
cx_a.executor().run_until_parked();
client_b
.app_state
.user_store
.update(*cx_b, |store, cx| {
store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx)
})
.await
.unwrap();
}
}
}
pub async fn make_channel(
&self,
channel: &str,
parent: Option<u64>,
admin: (&TestClient, &mut TestAppContext),
members: &mut [(&TestClient, &mut TestAppContext)],
) -> u64 {
let (_, admin_cx) = admin;
let channel_id = admin_cx
.read(ChannelStore::global)
.update(admin_cx, |channel_store, cx| {
channel_store.create_channel(channel, parent, cx)
})
.await
.unwrap();
for (member_client, member_cx) in members {
admin_cx
.read(ChannelStore::global)
.update(admin_cx, |channel_store, cx| {
channel_store.invite_member(
channel_id,
member_client.user_id().unwrap(),
ChannelRole::Member,
cx,
)
})
.await
.unwrap();
admin_cx.executor().run_until_parked();
member_cx
.read(ChannelStore::global)
.update(*member_cx, |channels, cx| {
channels.respond_to_channel_invite(channel_id, true, cx)
})
.await
.unwrap();
}
channel_id
}
pub async fn make_channel_tree(
&self,
channels: &[(&str, Option<&str>)],
creator: (&TestClient, &mut TestAppContext),
) -> Vec<u64> {
let mut observed_channels = HashMap::default();
let mut result = Vec::new();
for (channel, parent) in channels {
let id;
if let Some(parent) = parent {
if let Some(parent_id) = observed_channels.get(parent) {
id = self
.make_channel(channel, Some(*parent_id), (creator.0, creator.1), &mut [])
.await;
} else {
panic!(
"Edge {}->{} referenced before {} was created",
parent, channel, parent
)
}
} else {
id = self
.make_channel(channel, None, (creator.0, creator.1), &mut [])
.await;
}
observed_channels.insert(channel, id);
result.push(id);
}
result
}
pub async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
self.make_contacts(clients).await;
let (left, right) = clients.split_at_mut(1);
let (_client_a, cx_a) = &mut left[0];
let active_call_a = cx_a.read(ActiveCall::global);
for (client_b, cx_b) in right {
let user_id_b = client_b.current_user_id(*cx_b).to_proto();
active_call_a
.update(*cx_a, |call, cx| call.invite(user_id_b, None, cx))
.await
.unwrap();
cx_b.executor().run_until_parked();
let active_call_b = cx_b.read(ActiveCall::global);
active_call_b
.update(*cx_b, |call, cx| call.accept_incoming(cx))
.await
.unwrap();
}
}
pub async fn build_app_state(
test_db: &TestDb,
fake_server: &live_kit_client::TestServer,
) -> Arc<AppState> {
Arc::new(AppState {
db: test_db.db().clone(),
live_kit_client: Some(Arc::new(fake_server.create_api_client())),
config: Default::default(),
})
}
}
impl Deref for TestServer {
type Target = Server;
fn deref(&self) -> &Self::Target {
&self.server
}
}
impl Drop for TestServer {
fn drop(&mut self) {
self.server.teardown();
self.test_live_kit_server.teardown().unwrap();
}
}
impl Deref for TestClient {
type Target = Arc<Client>;
fn deref(&self) -> &Self::Target {
&self.app_state.client
}
}
impl TestClient {
pub fn fs(&self) -> &FakeFs {
self.app_state.fs.as_fake()
}
pub fn channel_store(&self) -> &Model<ChannelStore> {
&self.channel_store
}
// todo!(notifications)
// pub fn notification_store(&self) -> &Model<NotificationStore> {
// &self.notification_store
// }
pub fn user_store(&self) -> &Model<UserStore> {
&self.app_state.user_store
}
pub fn language_registry(&self) -> &Arc<LanguageRegistry> {
&self.app_state.languages
}
pub fn client(&self) -> &Arc<Client> {
&self.app_state.client
}
pub fn current_user_id(&self, cx: &TestAppContext) -> UserId {
UserId::from_proto(
self.app_state
.user_store
.read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
)
}
pub async fn wait_for_current_user(&self, cx: &TestAppContext) {
let mut authed_user = self
.app_state
.user_store
.read_with(cx, |user_store, _| user_store.watch_current_user());
while authed_user.next().await.unwrap().is_none() {}
}
pub async fn clear_contacts(&self, cx: &mut TestAppContext) {
self.app_state
.user_store
.update(cx, |store, _| store.clear_contacts())
.await;
}
pub fn local_projects<'a>(&'a self) -> impl Deref<Target = Vec<Model<Project>>> + 'a {
Ref::map(self.state.borrow(), |state| &state.local_projects)
}
pub fn remote_projects<'a>(&'a self) -> impl Deref<Target = Vec<Model<Project>>> + 'a {
Ref::map(self.state.borrow(), |state| &state.remote_projects)
}
pub fn local_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<Model<Project>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects)
}
pub fn remote_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<Model<Project>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects)
}
pub fn buffers_for_project<'a>(
&'a self,
project: &Model<Project>,
) -> impl DerefMut<Target = HashSet<Model<language::Buffer>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| {
state.buffers.entry(project.clone()).or_default()
})
}
pub fn buffers<'a>(
&'a self,
) -> impl DerefMut<Target = HashMap<Model<Project>, HashSet<Model<language::Buffer>>>> + 'a
{
RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers)
}
pub fn channel_buffers<'a>(
&'a self,
) -> impl DerefMut<Target = HashSet<Model<ChannelBuffer>>> + 'a {
RefMut::map(self.state.borrow_mut(), |state| &mut state.channel_buffers)
}
pub fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary {
self.app_state
.user_store
.read_with(cx, |store, _| ContactsSummary {
current: store
.contacts()
.iter()
.map(|contact| contact.user.github_login.clone())
.collect(),
outgoing_requests: store
.outgoing_contact_requests()
.iter()
.map(|user| user.github_login.clone())
.collect(),
incoming_requests: store
.incoming_contact_requests()
.iter()
.map(|user| user.github_login.clone())
.collect(),
})
}
pub async fn build_local_project(
&self,
root_path: impl AsRef<Path>,
cx: &mut TestAppContext,
) -> (Model<Project>, WorktreeId) {
let project = self.build_empty_local_project(cx);
let (worktree, _) = project
.update(cx, |p, cx| {
p.find_or_create_local_worktree(root_path, true, cx)
})
.await
.unwrap();
worktree
.read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
(project, worktree.read_with(cx, |tree, _| tree.id()))
}
pub fn build_empty_local_project(&self, cx: &mut TestAppContext) -> Model<Project> {
cx.update(|cx| {
Project::local(
self.client().clone(),
self.app_state.node_runtime.clone(),
self.app_state.user_store.clone(),
self.app_state.languages.clone(),
self.app_state.fs.clone(),
cx,
)
})
}
pub async fn build_remote_project(
&self,
host_project_id: u64,
guest_cx: &mut TestAppContext,
) -> Model<Project> {
let active_call = guest_cx.read(ActiveCall::global);
let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone());
room.update(guest_cx, |room, cx| {
room.join_project(
host_project_id,
self.app_state.languages.clone(),
self.app_state.fs.clone(),
cx,
)
})
.await
.unwrap()
}
//todo(workspace)
#[allow(dead_code)]
pub fn build_workspace(
&self,
project: &Model<Project>,
cx: &mut TestAppContext,
) -> WindowHandle<Workspace> {
cx.add_window(|cx| Workspace::new(0, project.clone(), self.app_state.clone(), cx))
}
}
impl Drop for TestClient {
fn drop(&mut self) {
self.app_state.client.teardown();
}
}