Merge branch 'main' into randomized-tests-operation-script

This commit is contained in:
Max Brunsfeld 2023-04-03 13:09:25 -07:00
commit c960277349
483 changed files with 32884 additions and 10315 deletions

View file

@ -78,6 +78,7 @@ pub async fn validate_api_token<B>(req: Request<B>, next: Next<B>) -> impl IntoR
struct AuthenticatedUserParams {
github_user_id: Option<i32>,
github_login: String,
github_email: Option<String>,
}
#[derive(Debug, Serialize)]
@ -92,7 +93,11 @@ async fn get_authenticated_user(
) -> Result<Json<AuthenticatedUserResponse>> {
let user = app
.db
.get_user_by_github_account(&params.github_login, params.github_user_id)
.get_or_create_user_by_github_account(
&params.github_login,
params.github_user_id,
params.github_email.as_deref(),
)
.await?
.ok_or_else(|| Error::Http(StatusCode::NOT_FOUND, "user not found".into()))?;
let metrics_id = app.db.get_user_metrics_id(user.id).await?;
@ -297,11 +302,7 @@ async fn create_access_token(
let mut user_id = user.id;
if let Some(impersonate) = params.impersonate {
if user.admin {
if let Some(impersonated_user) = app
.db
.get_user_by_github_account(&impersonate, None)
.await?
{
if let Some(impersonated_user) = app.db.get_user_by_github_login(&impersonate).await? {
user_id = impersonated_user.id;
} else {
return Err(Error::Http(

View file

@ -1,5 +1,5 @@
use crate::{
db::{self, UserId},
db::{self, AccessTokenId, Database, UserId},
AppState, Error, Result,
};
use anyhow::{anyhow, Context};
@ -8,12 +8,24 @@ use axum::{
middleware::Next,
response::IntoResponse,
};
use lazy_static::lazy_static;
use prometheus::{exponential_buckets, register_histogram, Histogram};
use rand::thread_rng;
use scrypt::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Scrypt,
};
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use std::{sync::Arc, time::Instant};
lazy_static! {
static ref METRIC_ACCESS_TOKEN_HASHING_TIME: Histogram = register_histogram!(
"access_token_hashing_time",
"time spent hashing access tokens",
exponential_buckets(10.0, 2.0, 10).unwrap(),
)
.unwrap();
}
pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
let mut auth_header = req
@ -42,20 +54,14 @@ pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl Into
)
})?;
let mut credentials_valid = false;
let state = req.extensions().get::<Arc<AppState>>().unwrap();
if let Some(admin_token) = access_token.strip_prefix("ADMIN_TOKEN:") {
if state.config.api_token == admin_token {
credentials_valid = true;
}
let credentials_valid = if let Some(admin_token) = access_token.strip_prefix("ADMIN_TOKEN:") {
state.config.api_token == admin_token
} else {
for password_hash in state.db.get_access_token_hashes(user_id).await? {
if verify_access_token(access_token, &password_hash)? {
credentials_valid = true;
break;
}
}
}
verify_access_token(&access_token, user_id, &state.db)
.await
.unwrap_or(false)
};
if credentials_valid {
let user = state
@ -75,13 +81,26 @@ pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl Into
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
#[derive(Serialize, Deserialize)]
struct AccessTokenJson {
version: usize,
id: AccessTokenId,
token: String,
}
pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result<String> {
const VERSION: usize = 1;
let access_token = rpc::auth::random_token();
let access_token_hash =
hash_access_token(&access_token).context("failed to hash access token")?;
db.create_access_token_hash(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE)
let id = db
.create_access_token(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE)
.await?;
Ok(access_token)
Ok(serde_json::to_string(&AccessTokenJson {
version: VERSION,
id,
token: access_token,
})?)
}
fn hash_access_token(token: &str) -> Result<String> {
@ -89,7 +108,7 @@ fn hash_access_token(token: &str) -> Result<String> {
let params = if cfg!(debug_assertions) {
scrypt::Params::new(1, 1, 1).unwrap()
} else {
scrypt::Params::recommended()
scrypt::Params::new(14, 8, 1).unwrap()
};
Ok(Scrypt
@ -112,7 +131,21 @@ pub fn encrypt_access_token(access_token: &str, public_key: String) -> Result<St
Ok(encrypted_access_token)
}
pub fn verify_access_token(token: &str, hash: &str) -> Result<bool> {
let hash = PasswordHash::new(hash).map_err(anyhow::Error::new)?;
Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok())
pub async fn verify_access_token(token: &str, user_id: UserId, db: &Arc<Database>) -> Result<bool> {
let token: AccessTokenJson = serde_json::from_str(&token)?;
let db_token = db.get_access_token(token.id).await?;
if db_token.user_id != user_id {
return Err(anyhow!("no such access token"))?;
}
let db_hash = PasswordHash::new(&db_token.hash).map_err(anyhow::Error::new)?;
let t0 = Instant::now();
let is_valid = Scrypt
.verify_password(token.token.as_bytes(), &db_hash)
.is_ok();
let duration = t0.elapsed();
log::info!("hashed access token in {:?}", duration);
METRIC_ACCESS_TOKEN_HASHING_TIME.observe(duration.as_millis() as f64);
Ok(is_valid)
}

View file

@ -1,4 +1,4 @@
use collab::db;
use collab::{db, executor::Executor};
use db::{ConnectOptions, Database};
use serde::{de::DeserializeOwned, Deserialize};
use std::fmt::Write;
@ -13,7 +13,7 @@ struct GitHubUser {
#[tokio::main]
async fn main() {
let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var");
let db = Database::new(ConnectOptions::new(database_url))
let db = Database::new(ConnectOptions::new(database_url), Executor::Production)
.await
.expect("failed to connect to postgres database");
let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var");
@ -59,7 +59,7 @@ async fn main() {
for (github_user, admin) in zed_users {
if db
.get_user_by_github_account(&github_user.login, Some(github_user.id))
.get_user_by_github_login(&github_user.login)
.await
.expect("failed to fetch user")
.is_none()

View file

@ -1,5 +1,6 @@
mod access_token;
mod contact;
mod follower;
mod language_server;
mod project;
mod project_collaborator;
@ -14,6 +15,7 @@ mod worktree;
mod worktree_diagnostic_summary;
mod worktree_entry;
use crate::executor::Executor;
use crate::{Error, Result};
use anyhow::anyhow;
use collections::{BTreeMap, HashMap, HashSet};
@ -21,6 +23,8 @@ pub use contact::Contact;
use dashmap::DashMap;
use futures::StreamExt;
use hyper::StatusCode;
use rand::prelude::StdRng;
use rand::{Rng, SeedableRng};
use rpc::{proto, ConnectionId};
use sea_orm::Condition;
pub use sea_orm::ConnectOptions;
@ -45,20 +49,20 @@ pub struct Database {
options: ConnectOptions,
pool: DatabaseConnection,
rooms: DashMap<RoomId, Arc<Mutex<()>>>,
#[cfg(test)]
background: Option<std::sync::Arc<gpui::executor::Background>>,
rng: Mutex<StdRng>,
executor: Executor,
#[cfg(test)]
runtime: Option<tokio::runtime::Runtime>,
}
impl Database {
pub async fn new(options: ConnectOptions) -> Result<Self> {
pub async fn new(options: ConnectOptions, executor: Executor) -> Result<Self> {
Ok(Self {
options: options.clone(),
pool: sea_orm::Database::connect(options).await?,
rooms: DashMap::with_capacity(16384),
#[cfg(test)]
background: None,
rng: Mutex::new(StdRng::seed_from_u64(0)),
executor,
#[cfg(test)]
runtime: None,
})
@ -157,7 +161,7 @@ impl Database {
room_id: RoomId,
new_server_id: ServerId,
) -> Result<RoomGuard<RefreshedRoom>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
let stale_participant_filter = Condition::all()
.add(room_participant::Column::RoomId.eq(room_id))
.add(room_participant::Column::AnsweringConnectionId.is_not_null())
@ -190,17 +194,18 @@ impl Database {
.filter(room_participant::Column::RoomId.eq(room_id))
.exec(&*tx)
.await?;
project::Entity::delete_many()
.filter(project::Column::RoomId.eq(room_id))
.exec(&*tx)
.await?;
room::Entity::delete_by_id(room_id).exec(&*tx).await?;
}
Ok((
room_id,
RefreshedRoom {
room,
stale_participant_user_ids,
canceled_calls_to_user_ids,
},
))
Ok(RefreshedRoom {
room,
stale_participant_user_ids,
canceled_calls_to_user_ids,
})
})
.await
}
@ -293,10 +298,21 @@ impl Database {
.await
}
pub async fn get_user_by_github_account(
pub async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
self.transaction(|tx| async move {
Ok(user::Entity::find()
.filter(user::Column::GithubLogin.eq(github_login))
.one(&*tx)
.await?)
})
.await
}
pub async fn get_or_create_user_by_github_account(
&self,
github_login: &str,
github_user_id: Option<i32>,
github_email: Option<&str>,
) -> Result<Option<User>> {
self.transaction(|tx| async move {
let tx = &*tx;
@ -318,7 +334,19 @@ impl Database {
user_by_github_login.github_user_id = ActiveValue::set(Some(github_user_id));
Ok(Some(user_by_github_login.update(tx).await?))
} else {
Ok(None)
let user = user::Entity::insert(user::ActiveModel {
email_address: ActiveValue::set(github_email.map(|email| email.into())),
github_login: ActiveValue::set(github_login.into()),
github_user_id: ActiveValue::set(Some(github_user_id)),
admin: ActiveValue::set(false),
invite_count: ActiveValue::set(0),
invite_code: ActiveValue::set(None),
metrics_id: ActiveValue::set(Uuid::new_v4()),
..Default::default()
})
.exec_with_returning(&*tx)
.await?;
Ok(Some(user))
}
} else {
Ok(user::Entity::find()
@ -1129,18 +1157,16 @@ impl Database {
user_id: UserId,
connection: ConnectionId,
live_kit_room: &str,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(|tx| async move {
) -> Result<proto::Room> {
self.transaction(|tx| async move {
let room = room::ActiveModel {
live_kit_room: ActiveValue::set(live_kit_room.into()),
..Default::default()
}
.insert(&*tx)
.await?;
let room_id = room.id;
room_participant::ActiveModel {
room_id: ActiveValue::set(room_id),
room_id: ActiveValue::set(room.id),
user_id: ActiveValue::set(user_id),
answering_connection_id: ActiveValue::set(Some(connection.id as i32)),
answering_connection_server_id: ActiveValue::set(Some(ServerId(
@ -1157,8 +1183,8 @@ impl Database {
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, room))
let room = self.get_room(room.id, &tx).await?;
Ok(room)
})
.await
}
@ -1171,7 +1197,7 @@ impl Database {
called_user_id: UserId,
initial_project_id: Option<ProjectId>,
) -> Result<RoomGuard<(proto::Room, proto::IncomingCall)>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
room_participant::ActiveModel {
room_id: ActiveValue::set(room_id),
user_id: ActiveValue::set(called_user_id),
@ -1190,7 +1216,7 @@ impl Database {
let room = self.get_room(room_id, &tx).await?;
let incoming_call = Self::build_incoming_call(&room, called_user_id)
.ok_or_else(|| anyhow!("failed to build incoming call"))?;
Ok((room_id, (room, incoming_call)))
Ok((room, incoming_call))
})
.await
}
@ -1200,7 +1226,7 @@ impl Database {
room_id: RoomId,
called_user_id: UserId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
room_participant::Entity::delete_many()
.filter(
room_participant::Column::RoomId
@ -1210,7 +1236,7 @@ impl Database {
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, room))
Ok(room)
})
.await
}
@ -1257,7 +1283,7 @@ impl Database {
calling_connection: ConnectionId,
called_user_id: UserId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
@ -1276,14 +1302,13 @@ impl Database {
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no call to cancel"))?;
let room_id = participant.room_id;
room_participant::Entity::delete(participant.into_active_model())
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, room))
Ok(room)
})
.await
}
@ -1294,7 +1319,7 @@ impl Database {
user_id: UserId,
connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
let result = room_participant::Entity::update_many()
.filter(
Condition::all()
@ -1316,7 +1341,7 @@ impl Database {
Err(anyhow!("room does not exist or was already joined"))?
} else {
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, room))
Ok(room)
}
})
.await
@ -1328,9 +1353,9 @@ impl Database {
user_id: UserId,
connection: ConnectionId,
) -> Result<RoomGuard<RejoinedRoom>> {
self.room_transaction(|tx| async {
let room_id = RoomId::from_proto(rejoin_room.id);
self.room_transaction(room_id, |tx| async {
let tx = tx;
let room_id = RoomId::from_proto(rejoin_room.id);
let participant_update = room_participant::Entity::update_many()
.filter(
Condition::all()
@ -1549,14 +1574,11 @@ impl Database {
}
let room = self.get_room(room_id, &tx).await?;
Ok((
room_id,
RejoinedRoom {
room,
rejoined_projects,
reshared_projects,
},
))
Ok(RejoinedRoom {
room,
rejoined_projects,
reshared_projects,
})
})
.await
}
@ -1717,13 +1739,75 @@ impl Database {
.await
}
pub async fn follow(
&self,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
follower::ActiveModel {
room_id: ActiveValue::set(room_id),
project_id: ActiveValue::set(project_id),
leader_connection_server_id: ActiveValue::set(ServerId(
leader_connection.owner_id as i32,
)),
leader_connection_id: ActiveValue::set(leader_connection.id as i32),
follower_connection_server_id: ActiveValue::set(ServerId(
follower_connection.owner_id as i32,
)),
follower_connection_id: ActiveValue::set(follower_connection.id as i32),
..Default::default()
}
.insert(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
pub async fn unfollow(
&self,
project_id: ProjectId,
leader_connection: ConnectionId,
follower_connection: ConnectionId,
) -> Result<RoomGuard<proto::Room>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
follower::Entity::delete_many()
.filter(
Condition::all()
.add(follower::Column::ProjectId.eq(project_id))
.add(
follower::Column::LeaderConnectionServerId
.eq(leader_connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(leader_connection.id))
.add(
follower::Column::FollowerConnectionServerId
.eq(follower_connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(follower_connection.id)),
)
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &*tx).await?;
Ok(room)
})
.await
}
pub async fn update_room_participant_location(
&self,
room_id: RoomId,
connection: ConnectionId,
location: proto::ParticipantLocation,
) -> Result<RoomGuard<proto::Room>> {
self.room_transaction(|tx| async {
self.room_transaction(room_id, |tx| async {
let tx = tx;
let location_kind;
let location_project_id;
@ -1769,7 +1853,7 @@ impl Database {
if result.rows_affected == 1 {
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, room))
Ok(room)
} else {
Err(anyhow!("could not update room participant location"))?
}
@ -1926,12 +2010,25 @@ impl Database {
}
}
}
drop(db_projects);
let mut db_followers = db_room.find_related(follower::Entity).stream(tx).await?;
let mut followers = Vec::new();
while let Some(db_follower) = db_followers.next().await {
let db_follower = db_follower?;
followers.push(proto::Follower {
leader_id: Some(db_follower.leader_connection().into()),
follower_id: Some(db_follower.follower_connection().into()),
project_id: db_follower.project_id.to_proto(),
});
}
Ok(proto::Room {
id: db_room.id.to_proto(),
live_kit_room: db_room.live_kit_room,
participants: participants.into_values().collect(),
pending_participants,
followers,
})
}
@ -1963,7 +2060,7 @@ impl Database {
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(ProjectId, proto::Room)>> {
self.room_transaction(|tx| async move {
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
@ -2024,7 +2121,7 @@ impl Database {
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, (project.id, room)))
Ok((project.id, room))
})
.await
}
@ -2034,7 +2131,8 @@ impl Database {
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
self.room_transaction(|tx| async move {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
let project = project::Entity::find_by_id(project_id)
@ -2042,12 +2140,11 @@ impl Database {
.await?
.ok_or_else(|| anyhow!("project not found"))?;
if project.host_connection()? == connection {
let room_id = project.room_id;
project::Entity::delete(project.into_active_model())
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
Ok((room_id, (room, guest_connection_ids)))
Ok((room, guest_connection_ids))
} else {
Err(anyhow!("cannot unshare a project hosted by another user"))?
}
@ -2061,7 +2158,8 @@ impl Database {
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> Result<RoomGuard<(proto::Room, Vec<ConnectionId>)>> {
self.room_transaction(|tx| async move {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
@ -2079,7 +2177,7 @@ impl Database {
let guest_connection_ids = self.project_guest_connection_ids(project.id, &tx).await?;
let room = self.get_room(project.room_id, &tx).await?;
Ok((project.room_id, (room, guest_connection_ids)))
Ok((room, guest_connection_ids))
})
.await
}
@ -2124,12 +2222,12 @@ impl Database {
update: &proto::UpdateWorktree,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.room_transaction(|tx| async move {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
// Ensure the update comes from the host.
let project = project::Entity::find_by_id(project_id)
let _project = project::Entity::find_by_id(project_id)
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id as i32))
@ -2140,7 +2238,6 @@ impl Database {
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let room_id = project.room_id;
// Update metadata.
worktree::Entity::update(worktree::ActiveModel {
@ -2220,7 +2317,7 @@ impl Database {
}
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok((room_id, connection_ids))
Ok(connection_ids)
})
.await
}
@ -2230,9 +2327,10 @@ impl Database {
update: &proto::UpdateDiagnosticSummary,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.room_transaction(|tx| async move {
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let project_id = ProjectId::from_proto(update.project_id);
let worktree_id = update.worktree_id as i64;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let summary = update
.summary
.as_ref()
@ -2274,7 +2372,7 @@ impl Database {
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok((project.room_id, connection_ids))
Ok(connection_ids)
})
.await
}
@ -2284,8 +2382,9 @@ impl Database {
update: &proto::StartLanguageServer,
connection: ConnectionId,
) -> Result<RoomGuard<Vec<ConnectionId>>> {
self.room_transaction(|tx| async move {
let project_id = ProjectId::from_proto(update.project_id);
let project_id = ProjectId::from_proto(update.project_id);
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let server = update
.server
.as_ref()
@ -2319,7 +2418,7 @@ impl Database {
.await?;
let connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
Ok((project.room_id, connection_ids))
Ok(connection_ids)
})
.await
}
@ -2329,7 +2428,8 @@ impl Database {
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<(Project, ReplicaId)>> {
self.room_transaction(|tx| async move {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
.filter(
Condition::all()
@ -2455,7 +2555,6 @@ impl Database {
.all(&*tx)
.await?;
let room_id = project.room_id;
let project = Project {
collaborators: collaborators
.into_iter()
@ -2475,7 +2574,7 @@ impl Database {
})
.collect(),
};
Ok((room_id, (project, replica_id as ReplicaId)))
Ok((project, replica_id as ReplicaId))
})
.await
}
@ -2484,8 +2583,9 @@ impl Database {
&self,
project_id: ProjectId,
connection: ConnectionId,
) -> Result<RoomGuard<LeftProject>> {
self.room_transaction(|tx| async move {
) -> Result<RoomGuard<(proto::Room, LeftProject)>> {
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let result = project_collaborator::Entity::delete_many()
.filter(
Condition::all()
@ -2515,13 +2615,39 @@ impl Database {
.map(|collaborator| collaborator.connection())
.collect();
follower::Entity::delete_many()
.filter(
Condition::any()
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(project_id))
.add(
follower::Column::LeaderConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::LeaderConnectionId.eq(connection.id)),
)
.add(
Condition::all()
.add(follower::Column::ProjectId.eq(project_id))
.add(
follower::Column::FollowerConnectionServerId
.eq(connection.owner_id),
)
.add(follower::Column::FollowerConnectionId.eq(connection.id)),
),
)
.exec(&*tx)
.await?;
let room = self.get_room(project.room_id, &tx).await?;
let left_project = LeftProject {
id: project_id,
host_user_id: project.host_user_id,
host_connection_id: project.host_connection()?,
connection_ids,
};
Ok((project.room_id, left_project))
Ok((room, left_project))
})
.await
}
@ -2531,11 +2657,8 @@ impl Database {
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<Vec<ProjectCollaborator>>> {
self.room_transaction(|tx| async move {
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.all(&*tx)
@ -2553,7 +2676,7 @@ impl Database {
.iter()
.any(|collaborator| collaborator.connection_id == connection_id)
{
Ok((project.room_id, collaborators))
Ok(collaborators)
} else {
Err(anyhow!("no such project"))?
}
@ -2566,11 +2689,8 @@ impl Database {
project_id: ProjectId,
connection_id: ConnectionId,
) -> Result<RoomGuard<HashSet<ConnectionId>>> {
self.room_transaction(|tx| async move {
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let room_id = self.room_id_for_project(project_id).await?;
self.room_transaction(room_id, |tx| async move {
let mut collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.eq(project_id))
.stream(&*tx)
@ -2583,7 +2703,7 @@ impl Database {
}
if connection_ids.contains(&connection_id) {
Ok((project.room_id, connection_ids))
Ok(connection_ids)
} else {
Err(anyhow!("no such project"))?
}
@ -2613,18 +2733,29 @@ impl Database {
Ok(guest_connection_ids)
}
async fn room_id_for_project(&self, project_id: ProjectId) -> Result<RoomId> {
self.transaction(|tx| async move {
let project = project::Entity::find_by_id(project_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project {} not found", project_id))?;
Ok(project.room_id)
})
.await
}
// access tokens
pub async fn create_access_token_hash(
pub async fn create_access_token(
&self,
user_id: UserId,
access_token_hash: &str,
max_access_token_count: usize,
) -> Result<()> {
) -> Result<AccessTokenId> {
self.transaction(|tx| async {
let tx = tx;
access_token::ActiveModel {
let token = access_token::ActiveModel {
user_id: ActiveValue::set(user_id),
hash: ActiveValue::set(access_token_hash.into()),
..Default::default()
@ -2647,26 +2778,20 @@ impl Database {
)
.exec(&*tx)
.await?;
Ok(())
Ok(token.id)
})
.await
}
pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result<Vec<String>> {
#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)]
enum QueryAs {
Hash,
}
pub async fn get_access_token(
&self,
access_token_id: AccessTokenId,
) -> Result<access_token::Model> {
self.transaction(|tx| async move {
Ok(access_token::Entity::find()
.select_only()
.column(access_token::Column::Hash)
.filter(access_token::Column::UserId.eq(user_id))
.order_by_desc(access_token::Column::Id)
.into_values::<_, QueryAs>()
.all(&*tx)
.await?)
Ok(access_token::Entity::find_by_id(access_token_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such access token"))?)
})
.await
}
@ -2677,30 +2802,26 @@ impl Database {
Fut: Send + Future<Output = Result<T>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(result) => {
match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(result),
Err(error) => {
if is_serialization_error(&error) {
// Retry (don't break the loop)
} else {
return Err(error);
}
Ok(result) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(result),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
},
Err(error) => {
tx.rollback().await?;
if is_serialization_error(&error) {
// Retry (don't break the loop)
} else {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
@ -2713,6 +2834,7 @@ impl Database {
Fut: Send + Future<Output = Result<Option<(RoomId, T)>>>,
{
let body = async {
let mut i = 0;
loop {
let (tx, result) = self.with_transaction(&f).await?;
match result {
@ -2728,56 +2850,72 @@ impl Database {
}));
}
Err(error) => {
if is_serialization_error(&error) {
// Retry (don't break the loop)
} else {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
}
Ok(None) => {
match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(None),
Err(error) => {
if is_serialization_error(&error) {
// Retry (don't break the loop)
} else {
return Err(error);
}
Ok(None) => match tx.commit().await.map_err(Into::into) {
Ok(()) => return Ok(None),
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
},
Err(error) => {
tx.rollback().await?;
if is_serialization_error(&error) {
// Retry (don't break the loop)
} else {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
i += 1;
}
};
self.run(body).await
}
async fn room_transaction<F, Fut, T>(&self, f: F) -> Result<RoomGuard<T>>
async fn room_transaction<F, Fut, T>(&self, room_id: RoomId, f: F) -> Result<RoomGuard<T>>
where
F: Send + Fn(TransactionHandle) -> Fut,
Fut: Send + Future<Output = Result<(RoomId, T)>>,
Fut: Send + Future<Output = Result<T>>,
{
let data = self
.optional_room_transaction(move |tx| {
let future = f(tx);
async {
let data = future.await?;
Ok(Some(data))
let body = async {
let mut i = 0;
loop {
let lock = self.rooms.entry(room_id).or_default().clone();
let _guard = lock.lock_owned().await;
let (tx, result) = self.with_transaction(&f).await?;
match result {
Ok(data) => match tx.commit().await.map_err(Into::into) {
Ok(()) => {
return Ok(RoomGuard {
data,
_guard,
_not_send: PhantomData,
});
}
Err(error) => {
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
},
Err(error) => {
tx.rollback().await?;
if !self.retry_on_serialization_error(&error, i).await {
return Err(error);
}
}
}
})
.await?;
Ok(data.unwrap())
i += 1;
}
};
self.run(body).await
}
async fn with_transaction<F, Fut, T>(&self, f: &F) -> Result<(DatabaseTransaction, Result<T>)>
@ -2799,14 +2937,14 @@ impl Database {
Ok((tx, result))
}
async fn run<F, T>(&self, future: F) -> T
async fn run<F, T>(&self, future: F) -> Result<T>
where
F: Future<Output = T>,
F: Future<Output = Result<T>>,
{
#[cfg(test)]
{
if let Some(background) = self.background.as_ref() {
background.simulate_random_delay().await;
if let Executor::Deterministic(executor) = &self.executor {
executor.simulate_random_delay().await;
}
self.runtime.as_ref().unwrap().block_on(future)
@ -2817,6 +2955,27 @@ impl Database {
future.await
}
}
async fn retry_on_serialization_error(&self, error: &Error, prev_attempt_count: u32) -> bool {
// If the error is due to a failure to serialize concurrent transactions, then retry
// this transaction after a delay. With each subsequent retry, double the delay duration.
// Also vary the delay randomly in order to ensure different database connections retry
// at different times.
if is_serialization_error(error) {
let base_delay = 4_u64 << prev_attempt_count.min(16);
let randomized_delay = base_delay as f32 * self.rng.lock().await.gen_range(0.5..=2.0);
log::info!(
"retrying transaction after serialization error. delay: {} ms.",
randomized_delay
);
self.executor
.sleep(Duration::from_millis(randomized_delay as u64))
.await;
true
} else {
false
}
}
}
fn is_serialization_error(error: &Error) -> bool {
@ -3011,6 +3170,7 @@ macro_rules! id_type {
id_type!(AccessTokenId);
id_type!(ContactId);
id_type!(FollowerId);
id_type!(RoomId);
id_type!(RoomParticipantId);
id_type!(ProjectId);
@ -3117,7 +3277,6 @@ mod test {
use gpui::executor::Background;
use lazy_static::lazy_static;
use parking_lot::Mutex;
use rand::prelude::*;
use sea_orm::ConnectionTrait;
use sqlx::migrate::MigrateDatabase;
use std::sync::Arc;
@ -3139,7 +3298,9 @@ mod test {
let mut db = runtime.block_on(async {
let mut options = ConnectOptions::new(url);
options.max_connections(5);
let db = Database::new(options).await.unwrap();
let db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let sql = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/migrations.sqlite/20221109000000_test_schema.sql"
@ -3154,7 +3315,6 @@ mod test {
db
});
db.background = Some(background);
db.runtime = Some(runtime);
Self {
@ -3188,13 +3348,14 @@ mod test {
options
.max_connections(5)
.idle_timeout(Duration::from_secs(0));
let db = Database::new(options).await.unwrap();
let db = Database::new(options, Executor::Deterministic(background))
.await
.unwrap();
let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations");
db.migrate(Path::new(migrations_path), false).await.unwrap();
db
});
db.background = Some(background);
db.runtime = Some(runtime);
Self {

View file

@ -0,0 +1,51 @@
use super::{FollowerId, ProjectId, RoomId, ServerId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
use serde::Serialize;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel, Serialize)]
#[sea_orm(table_name = "followers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: FollowerId,
pub room_id: RoomId,
pub project_id: ProjectId,
pub leader_connection_server_id: ServerId,
pub leader_connection_id: i32,
pub follower_connection_server_id: ServerId,
pub follower_connection_id: i32,
}
impl Model {
pub fn leader_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.leader_connection_server_id.0 as u32,
id: self.leader_connection_id as u32,
}
}
pub fn follower_connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.follower_connection_server_id.0 as u32,
id: self.follower_connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -15,6 +15,8 @@ pub enum Relation {
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
Project,
#[sea_orm(has_many = "super::follower::Entity")]
Follower,
}
impl Related<super::room_participant::Entity> for Entity {
@ -29,4 +31,10 @@ impl Related<super::project::Entity> for Entity {
}
}
impl Related<super::follower::Entity> for Entity {
fn to() -> RelationDef {
Relation::Follower.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -92,8 +92,8 @@ test_both_dbs!(
);
test_both_dbs!(
test_get_user_by_github_account_postgres,
test_get_user_by_github_account_sqlite,
test_get_or_create_user_by_github_account_postgres,
test_get_or_create_user_by_github_account_sqlite,
db,
{
let user_id1 = db
@ -124,7 +124,7 @@ test_both_dbs!(
.user_id;
let user = db
.get_user_by_github_account("login1", None)
.get_or_create_user_by_github_account("login1", None, None)
.await
.unwrap()
.unwrap();
@ -133,19 +133,28 @@ test_both_dbs!(
assert_eq!(user.github_user_id, Some(101));
assert!(db
.get_user_by_github_account("non-existent-login", None)
.get_or_create_user_by_github_account("non-existent-login", None, None)
.await
.unwrap()
.is_none());
let user = db
.get_user_by_github_account("the-new-login2", Some(102))
.get_or_create_user_by_github_account("the-new-login2", Some(102), None)
.await
.unwrap()
.unwrap();
assert_eq!(user.id, user_id2);
assert_eq!(&user.github_login, "the-new-login2");
assert_eq!(user.github_user_id, Some(102));
let user = db
.get_or_create_user_by_github_account("login3", Some(103), Some("user3@example.com"))
.await
.unwrap()
.unwrap();
assert_eq!(&user.github_login, "login3");
assert_eq!(user.github_user_id, Some(103));
assert_eq!(user.email_address, Some("user3@example.com".into()));
}
);
@ -168,30 +177,63 @@ test_both_dbs!(
.unwrap()
.user_id;
db.create_access_token_hash(user, "h1", 3).await.unwrap();
db.create_access_token_hash(user, "h2", 3).await.unwrap();
let token_1 = db.create_access_token(user, "h1", 2).await.unwrap();
let token_2 = db.create_access_token(user, "h2", 2).await.unwrap();
assert_eq!(
db.get_access_token_hashes(user).await.unwrap(),
&["h2".to_string(), "h1".to_string()]
db.get_access_token(token_1).await.unwrap(),
access_token::Model {
id: token_1,
user_id: user,
hash: "h1".into(),
}
);
assert_eq!(
db.get_access_token(token_2).await.unwrap(),
access_token::Model {
id: token_2,
user_id: user,
hash: "h2".into()
}
);
db.create_access_token_hash(user, "h3", 3).await.unwrap();
let token_3 = db.create_access_token(user, "h3", 2).await.unwrap();
assert_eq!(
db.get_access_token_hashes(user).await.unwrap(),
&["h3".to_string(), "h2".to_string(), "h1".to_string(),]
db.get_access_token(token_3).await.unwrap(),
access_token::Model {
id: token_3,
user_id: user,
hash: "h3".into()
}
);
assert_eq!(
db.get_access_token(token_2).await.unwrap(),
access_token::Model {
id: token_2,
user_id: user,
hash: "h2".into()
}
);
assert!(db.get_access_token(token_1).await.is_err());
db.create_access_token_hash(user, "h4", 3).await.unwrap();
let token_4 = db.create_access_token(user, "h4", 2).await.unwrap();
assert_eq!(
db.get_access_token_hashes(user).await.unwrap(),
&["h4".to_string(), "h3".to_string(), "h2".to_string(),]
db.get_access_token(token_4).await.unwrap(),
access_token::Model {
id: token_4,
user_id: user,
hash: "h4".into()
}
);
db.create_access_token_hash(user, "h5", 3).await.unwrap();
assert_eq!(
db.get_access_token_hashes(user).await.unwrap(),
&["h5".to_string(), "h4".to_string(), "h3".to_string()]
db.get_access_token(token_3).await.unwrap(),
access_token::Model {
id: token_3,
user_id: user,
hash: "h3".into()
}
);
assert!(db.get_access_token(token_2).await.is_err());
assert!(db.get_access_token(token_1).await.is_err());
}
);

View file

@ -10,6 +10,7 @@ mod tests;
use axum::{http::StatusCode, response::IntoResponse};
use db::Database;
use executor::Executor;
use serde::Deserialize;
use std::{path::PathBuf, sync::Arc};
@ -91,6 +92,7 @@ impl std::error::Error for Error {}
pub struct Config {
pub http_port: u16,
pub database_url: String,
pub database_max_connections: u32,
pub api_token: String,
pub invite_link_prefix: String,
pub live_kit_server: Option<String>,
@ -116,8 +118,8 @@ pub struct AppState {
impl AppState {
pub async fn new(config: Config) -> Result<Arc<Self>> {
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(5);
let db = Database::new(db_options).await?;
db_options.max_connections(config.database_max_connections);
let db = Database::new(db_options, Executor::Production).await?;
let live_kit_client = if let Some(((server, key), secret)) = config
.live_kit_server
.as_ref()

View file

@ -1,11 +1,12 @@
use anyhow::anyhow;
use axum::{routing::get, Router};
use axum::{routing::get, Extension, Router};
use collab::{db, env, executor::Executor, AppState, Config, MigrateConfig, Result};
use db::Database;
use std::{
env::args,
net::{SocketAddr, TcpListener},
path::Path,
sync::Arc,
};
use tokio::signal::unix::SignalKind;
use tracing_log::LogTracer;
@ -31,7 +32,7 @@ async fn main() -> Result<()> {
let config = envy::from_env::<MigrateConfig>().expect("error loading config");
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(5);
let db = Database::new(db_options).await?;
let db = Database::new(db_options, Executor::Production).await?;
let migrations_path = config
.migrations_path
@ -66,7 +67,12 @@ async fn main() -> Result<()> {
let app = collab::api::routes(rpc_server.clone(), state.clone())
.merge(collab::rpc::routes(rpc_server.clone()))
.merge(Router::new().route("/", get(handle_root)));
.merge(
Router::new()
.route("/", get(handle_root))
.route("/healthz", get(handle_liveness_probe))
.layer(Extension(state.clone())),
);
axum::Server::from_tcp(listener)?
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
@ -95,6 +101,11 @@ async fn handle_root() -> String {
format!("collab v{VERSION}")
}
async fn handle_liveness_probe(Extension(state): Extension<Arc<AppState>>) -> Result<String> {
state.db.get_all_users(0, 1).await?;
Ok("ok".to_string())
}
pub fn init_tracing(config: &Config) -> Option<()> {
use std::str::FromStr;
use tracing_subscriber::layer::SubscriberExt;

View file

@ -53,11 +53,11 @@ use std::{
},
time::Duration,
};
use tokio::sync::watch;
use tokio::sync::{watch, Semaphore};
use tower::ServiceBuilder;
use tracing::{info_span, instrument, Instrument};
pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(5);
pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(30);
pub const CLEANUP_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
@ -186,7 +186,7 @@ impl Server {
.add_request_handler(create_room)
.add_request_handler(join_room)
.add_request_handler(rejoin_room)
.add_message_handler(leave_room)
.add_request_handler(leave_room)
.add_request_handler(call)
.add_request_handler(cancel_call)
.add_message_handler(decline_call)
@ -270,8 +270,11 @@ impl Server {
let mut live_kit_room = String::new();
let mut delete_live_kit_room = false;
if let Ok(mut refreshed_room) =
app_state.db.refresh_room(room_id, server_id).await
if let Some(mut refreshed_room) = app_state
.db
.refresh_room(room_id, server_id)
.await
.trace_err()
{
tracing::info!(
room_id = room_id.0,
@ -539,8 +542,13 @@ impl Server {
// This arrangement ensures we will attempt to process earlier messages first, but fall
// back to processing messages arrived later in the spirit of making progress.
let mut foreground_message_handlers = FuturesUnordered::new();
let concurrent_handlers = Arc::new(Semaphore::new(256));
loop {
let next_message = incoming_rx.next().fuse();
let next_message = async {
let permit = concurrent_handlers.clone().acquire_owned().await.unwrap();
let message = incoming_rx.next().await;
(permit, message)
}.fuse();
futures::pin_mut!(next_message);
futures::select_biased! {
_ = teardown.changed().fuse() => return Ok(()),
@ -551,7 +559,8 @@ impl Server {
break;
}
_ = foreground_message_handlers.next() => {}
message = next_message => {
next_message = next_message => {
let (permit, message) = next_message;
if let Some(message) = message {
let type_name = message.payload_type_name();
let span = tracing::info_span!("receive message", %user_id, %login, %connection_id, %address, type_name);
@ -561,7 +570,10 @@ impl Server {
let handle_message = (handler)(message, session.clone());
drop(span_enter);
let handle_message = handle_message.instrument(span);
let handle_message = async move {
handle_message.await;
drop(permit);
}.instrument(span);
if is_background {
executor.spawn_detached(handle_message);
} else {
@ -1090,8 +1102,14 @@ async fn rejoin_room(
Ok(())
}
async fn leave_room(_message: proto::LeaveRoom, session: Session) -> Result<()> {
leave_room_for_session(&session).await
async fn leave_room(
_: proto::LeaveRoom,
response: Response<proto::LeaveRoom>,
session: Session,
) -> Result<()> {
leave_room_for_session(&session).await?;
response.send(proto::Ack {})?;
Ok(())
}
async fn call(
@ -1312,6 +1330,7 @@ async fn join_project(
.filter(|collaborator| collaborator.connection_id != session.connection_id)
.map(|collaborator| collaborator.to_proto())
.collect::<Vec<_>>();
let worktrees = project
.worktrees
.iter()
@ -1404,7 +1423,7 @@ async fn leave_project(request: proto::LeaveProject, session: Session) -> Result
let sender_id = session.connection_id;
let project_id = ProjectId::from_proto(request.project_id);
let project = session
let (room, project) = &*session
.db()
.await
.leave_project(project_id, sender_id)
@ -1415,7 +1434,9 @@ async fn leave_project(request: proto::LeaveProject, session: Session) -> Result
host_connection_id = %project.host_connection_id,
"leave project"
);
project_left(&project, &session);
room_updated(&room, &session.peer);
Ok(())
}
@ -1724,6 +1745,7 @@ async fn follow(
.ok_or_else(|| anyhow!("invalid leader id"))?
.into();
let follower_id = session.connection_id;
{
let project_connection_ids = session
.db()
@ -1744,6 +1766,14 @@ async fn follow(
.views
.retain(|view| view.leader_id != Some(follower_id.into()));
response.send(response_payload)?;
let room = session
.db()
.await
.follow(project_id, leader_id, follower_id)
.await?;
room_updated(&room, &session.peer);
Ok(())
}
@ -1753,17 +1783,29 @@ async fn unfollow(request: proto::Unfollow, session: Session) -> Result<()> {
.leader_id
.ok_or_else(|| anyhow!("invalid leader id"))?
.into();
let project_connection_ids = session
let follower_id = session.connection_id;
if !session
.db()
.await
.project_connection_ids(project_id, session.connection_id)
.await?;
if !project_connection_ids.contains(&leader_id) {
.await?
.contains(&leader_id)
{
Err(anyhow!("no such peer"))?;
}
session
.peer
.forward_send(session.connection_id, leader_id, request)?;
let room = session
.db()
.await
.unfollow(project_id, leader_id, follower_id)
.await?;
room_updated(&room, &session.peer);
Ok(())
}
@ -1833,7 +1875,7 @@ async fn fuzzy_search_users(
1 | 2 => session
.db()
.await
.get_user_by_github_account(&query, None)
.get_user_by_github_login(&query)
.await?
.into_iter()
.collect(),

View file

@ -7,15 +7,12 @@ use crate::{
use anyhow::anyhow;
use call::ActiveCall;
use client::{
self, proto::PeerId, test::FakeHttpClient, Client, Connection, Credentials,
EstablishConnectionError, UserStore,
self, proto::PeerId, Client, Connection, Credentials, EstablishConnectionError, UserStore,
};
use collections::{HashMap, HashSet};
use fs::FakeFs;
use futures::{channel::oneshot, StreamExt as _};
use gpui::{
executor::Deterministic, test::EmptyView, ModelHandle, Task, TestAppContext, ViewHandle,
};
use gpui::{executor::Deterministic, test::EmptyView, ModelHandle, TestAppContext, ViewHandle};
use language::LanguageRegistry;
use parking_lot::Mutex;
use project::{Project, WorktreeId};
@ -31,6 +28,7 @@ use std::{
},
};
use theme::ThemeRegistry;
use util::http::FakeHttpClient;
use workspace::Workspace;
mod integration_tests;
@ -105,11 +103,7 @@ impl TestServer {
});
let http = FakeHttpClient::with_404_response();
let user_id = if let Ok(Some(user)) = self
.app_state
.db
.get_user_by_github_account(name, None)
.await
let user_id = if let Ok(Some(user)) = self.app_state.db.get_user_by_github_login(name).await
{
user.id
} else {
@ -193,12 +187,13 @@ impl TestServer {
let app_state = Arc::new(workspace::AppState {
client: client.clone(),
user_store: user_store.clone(),
languages: Arc::new(LanguageRegistry::new(Task::ready(()))),
languages: Arc::new(LanguageRegistry::test()),
themes: ThemeRegistry::new((), cx.font_cache()),
fs: fs.clone(),
build_window_options: |_, _, _| Default::default(),
initialize_workspace: |_, _, _| unimplemented!(),
dock_default_item_factory: |_, _| unimplemented!(),
dock_default_item_factory: |_, _| None,
background_actions: || &[],
});
Project::init(&client);
@ -468,15 +463,7 @@ impl TestClient {
cx: &mut TestAppContext,
) -> ViewHandle<Workspace> {
let (_, root_view) = cx.add_window(|_| EmptyView);
cx.add_view(&root_view, |cx| {
Workspace::new(
Default::default(),
0,
project.clone(),
|_, _| unimplemented!(),
cx,
)
})
cx.add_view(&root_view, |cx| Workspace::test_new(project.clone(), cx))
}
}

View file

@ -274,10 +274,14 @@ async fn test_basic_calls(
}
// User A leaves the room.
active_call_a.update(cx_a, |call, cx| {
call.hang_up(cx).unwrap();
assert!(call.room().is_none());
});
active_call_a
.update(cx_a, |call, cx| {
let hang_up = call.hang_up(cx);
assert!(call.room().is_none());
hang_up
})
.await
.unwrap();
deterministic.run_until_parked();
assert_eq!(
room_participants(&room_a, cx_a),
@ -557,6 +561,7 @@ async fn test_room_uniqueness(
// Client C can successfully call client B after client B leaves the room.
active_call_b
.update(cx_b, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
active_call_c
@ -733,6 +738,14 @@ async fn test_server_restarts(
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let client_a = server.create_client(cx_a, "user_a").await;
client_a
.fs
.insert_tree("/a", json!({ "a.txt": "a-contents" }))
.await;
// Invite client B to collaborate on a project
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
let client_d = server.create_client(cx_d, "user_d").await;
@ -753,19 +766,19 @@ async fn test_server_restarts(
// User A calls users B, C, and D.
active_call_a
.update(cx_a, |call, cx| {
call.invite(client_b.user_id().unwrap(), None, cx)
call.invite(client_b.user_id().unwrap(), Some(project_a.clone()), cx)
})
.await
.unwrap();
active_call_a
.update(cx_a, |call, cx| {
call.invite(client_c.user_id().unwrap(), None, cx)
call.invite(client_c.user_id().unwrap(), Some(project_a.clone()), cx)
})
.await
.unwrap();
active_call_a
.update(cx_a, |call, cx| {
call.invite(client_d.user_id().unwrap(), None, cx)
call.invite(client_d.user_id().unwrap(), Some(project_a.clone()), cx)
})
.await
.unwrap();
@ -821,7 +834,7 @@ async fn test_server_restarts(
// Users A and B reconnect to the call. User C has troubles reconnecting, so it leaves the room.
client_c.override_establish_connection(|_, cx| cx.spawn(|_| future::pending()));
deterministic.advance_clock(RECEIVE_TIMEOUT);
deterministic.advance_clock(RECONNECT_TIMEOUT);
assert_eq!(
room_participants(&room_a, cx_a),
RoomParticipants {
@ -928,6 +941,7 @@ async fn test_server_restarts(
// User D hangs up.
active_call_d
.update(cx_d, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
assert_eq!(
@ -993,7 +1007,7 @@ async fn test_server_restarts(
client_a.override_establish_connection(|_, cx| cx.spawn(|_| future::pending()));
client_b.override_establish_connection(|_, cx| cx.spawn(|_| future::pending()));
client_c.override_establish_connection(|_, cx| cx.spawn(|_| future::pending()));
deterministic.advance_clock(RECEIVE_TIMEOUT);
deterministic.advance_clock(RECONNECT_TIMEOUT);
assert_eq!(
room_participants(&room_a, cx_a),
RoomParticipants {
@ -1083,7 +1097,7 @@ async fn test_calls_on_multiple_connections(
assert!(incoming_call_b2.next().await.unwrap().is_none());
// User B disconnects the client that is not on the call. Everything should be fine.
client_b1.disconnect(&cx_b1.to_async()).unwrap();
client_b1.disconnect(&cx_b1.to_async());
deterministic.advance_clock(RECEIVE_TIMEOUT);
client_b1
.authenticate_and_connect(false, &cx_b1.to_async())
@ -1091,7 +1105,10 @@ async fn test_calls_on_multiple_connections(
.unwrap();
// User B hangs up, and user A calls them again.
active_call_b2.update(cx_b2, |call, cx| call.hang_up(cx).unwrap());
active_call_b2
.update(cx_b2, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
active_call_a
.update(cx_a, |call, cx| {
@ -1126,7 +1143,10 @@ async fn test_calls_on_multiple_connections(
assert!(incoming_call_b2.next().await.unwrap().is_some());
// User A hangs up, causing both connections to stop ringing.
active_call_a.update(cx_a, |call, cx| call.hang_up(cx).unwrap());
active_call_a
.update(cx_a, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
assert!(incoming_call_b1.next().await.unwrap().is_none());
assert!(incoming_call_b2.next().await.unwrap().is_none());
@ -1363,7 +1383,10 @@ async fn test_unshare_project(
.unwrap();
// When client B leaves the room, the project becomes read-only.
active_call_b.update(cx_b, |call, cx| call.hang_up(cx).unwrap());
active_call_b
.update(cx_b, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
assert!(project_b.read_with(cx_b, |project, _| project.is_read_only()));
@ -1392,7 +1415,10 @@ async fn test_unshare_project(
.unwrap();
// When client A (the host) leaves the room, the project gets unshared and guests are notified.
active_call_a.update(cx_a, |call, cx| call.hang_up(cx).unwrap());
active_call_a
.update(cx_a, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
project_a.read_with(cx_a, |project, _| assert!(!project.is_shared()));
project_c2.read_with(cx_c, |project, _| {
@ -1441,15 +1467,7 @@ async fn test_host_disconnect(
deterministic.run_until_parked();
assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
let (_, workspace_b) = cx_b.add_window(|cx| {
Workspace::new(
Default::default(),
0,
project_b.clone(),
|_, _| unimplemented!(),
cx,
)
});
let (_, workspace_b) = cx_b.add_window(|cx| Workspace::test_new(project_b.clone(), cx));
let editor_b = workspace_b
.update(cx_b, |workspace, cx| {
workspace.open_path((worktree_id, "b.txt"), None, true, cx)
@ -1726,10 +1744,6 @@ async fn test_project_reconnect(
vec![
"a.txt",
"b.txt",
"subdir1",
"subdir1/c.txt",
"subdir1/d.txt",
"subdir1/e.txt",
"subdir2",
"subdir2/f.txt",
"subdir2/g.txt",
@ -1762,10 +1776,6 @@ async fn test_project_reconnect(
vec![
"a.txt",
"b.txt",
"subdir1",
"subdir1/c.txt",
"subdir1/d.txt",
"subdir1/e.txt",
"subdir2",
"subdir2/f.txt",
"subdir2/g.txt",
@ -1857,10 +1867,6 @@ async fn test_project_reconnect(
vec![
"a.txt",
"b.txt",
"subdir1",
"subdir1/c.txt",
"subdir1/d.txt",
"subdir1/e.txt",
"subdir2",
"subdir2/f.txt",
"subdir2/g.txt",
@ -2244,7 +2250,9 @@ async fn test_propagate_saves_and_fs_changes(
});
// Edit the buffer as the host and concurrently save as guest B.
let save_b = project_b.update(cx_b, |project, cx| project.save_buffer(buffer_b.clone(), cx));
let save_b = project_b.update(cx_b, |project, cx| {
project.save_buffer(buffer_b.clone(), cx)
});
buffer_a.update(cx_a, |buf, cx| buf.edit([(0..0, "hi-a, ")], None, cx));
save_b.await.unwrap();
assert_eq!(
@ -2917,7 +2925,10 @@ async fn test_buffer_conflict_after_save(
assert!(!buf.has_conflict());
});
project_b.update(cx_b, |project, cx| project.save_buffer(buffer_b.clone(), cx))
project_b
.update(cx_b, |project, cx| {
project.save_buffer(buffer_b.clone(), cx)
})
.await
.unwrap();
cx_a.foreground().forbid_parking();
@ -3222,7 +3233,7 @@ async fn test_leaving_project(
buffer_b2.read_with(cx_b, |buffer, _| assert_eq!(buffer.text(), "a-contents"));
// Drop client B's connection and ensure client A and client C observe client B leaving.
client_b.disconnect(&cx_b.to_async()).unwrap();
client_b.disconnect(&cx_b.to_async());
deterministic.advance_clock(RECONNECT_TIMEOUT);
project_a.read_with(cx_a, |project, _| {
assert_eq!(project.collaborators().len(), 1);
@ -3879,9 +3890,11 @@ async fn test_formatting_buffer(
})
.await
.unwrap();
// The edits from the LSP are applied, and a final newline is added.
assert_eq!(
buffer_b.read_with(cx_b, |buffer, _| buffer.text()),
"let honey = \"two\""
"let honey = \"two\"\n"
);
// Ensure buffer can be formatted using an external command. Notice how the
@ -4691,15 +4704,7 @@ async fn test_collaborating_with_code_actions(
// Join the project as client B.
let project_b = client_b.build_remote_project(project_id, cx_b).await;
let (_window_b, workspace_b) = cx_b.add_window(|cx| {
Workspace::new(
Default::default(),
0,
project_b.clone(),
|_, _| unimplemented!(),
cx,
)
});
let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::test_new(project_b.clone(), cx));
let editor_b = workspace_b
.update(cx_b, |workspace, cx| {
workspace.open_path((worktree_id, "main.rs"), None, true, cx)
@ -4922,15 +4927,7 @@ async fn test_collaborating_with_renames(
.unwrap();
let project_b = client_b.build_remote_project(project_id, cx_b).await;
let (_window_b, workspace_b) = cx_b.add_window(|cx| {
Workspace::new(
Default::default(),
0,
project_b.clone(),
|_, _| unimplemented!(),
cx,
)
});
let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::test_new(project_b.clone(), cx));
let editor_b = workspace_b
.update(cx_b, |workspace, cx| {
workspace.open_path((worktree_id, "one.rs"), None, true, cx)
@ -5464,7 +5461,10 @@ async fn test_contacts(
[("user_b".to_string(), "online", "busy")]
);
active_call_a.update(cx_a, |call, cx| call.hang_up(cx).unwrap());
active_call_a
.update(cx_a, |call, cx| call.hang_up(cx))
.await
.unwrap();
deterministic.run_until_parked();
assert_eq!(
contacts(&client_a, cx_a),
@ -5767,7 +5767,7 @@ async fn test_contact_requests(
.is_empty());
async fn disconnect_and_reconnect(client: &TestClient, cx: &mut TestAppContext) {
client.disconnect(&cx.to_async()).unwrap();
client.disconnect(&cx.to_async());
client.clear_contacts(cx).await;
client
.authenticate_and_connect(false, &cx.to_async())
@ -5777,10 +5777,12 @@ async fn test_contact_requests(
}
#[gpui::test(iterations = 10)]
async fn test_following(
async fn test_basic_following(
deterministic: Arc<Deterministic>,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
cx_c: &mut TestAppContext,
cx_d: &mut TestAppContext,
) {
deterministic.forbid_parking();
cx_a.update(editor::init);
@ -5789,8 +5791,15 @@ async fn test_following(
let mut server = TestServer::start(&deterministic).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
let client_c = server.create_client(cx_c, "user_c").await;
let client_d = server.create_client(cx_d, "user_d").await;
server
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)])
.create_room(&mut [
(&client_a, cx_a),
(&client_b, cx_b),
(&client_c, cx_c),
(&client_d, cx_d),
])
.await;
let active_call_a = cx_a.read(ActiveCall::global);
let active_call_b = cx_b.read(ActiveCall::global);
@ -5822,8 +5831,10 @@ async fn test_following(
.await
.unwrap();
// Client A opens some editors.
let workspace_a = client_a.build_workspace(&project_a, cx_a);
let workspace_b = client_b.build_workspace(&project_b, cx_b);
// Client A opens some editors.
let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone());
let editor_a1 = workspace_a
.update(cx_a, |workspace, cx| {
@ -5843,7 +5854,6 @@ async fn test_following(
.unwrap();
// Client B opens an editor.
let workspace_b = client_b.build_workspace(&project_b, cx_b);
let editor_b1 = workspace_b
.update(cx_b, |workspace, cx| {
workspace.open_path((worktree_id, "1.txt"), None, true, cx)
@ -5853,29 +5863,184 @@ async fn test_following(
.downcast::<Editor>()
.unwrap();
let client_a_id = project_b.read_with(cx_b, |project, _| {
project.collaborators().values().next().unwrap().peer_id
});
let client_b_id = project_a.read_with(cx_a, |project, _| {
project.collaborators().values().next().unwrap().peer_id
});
let peer_id_a = client_a.peer_id().unwrap();
let peer_id_b = client_b.peer_id().unwrap();
let peer_id_c = client_c.peer_id().unwrap();
let peer_id_d = client_d.peer_id().unwrap();
// When client B starts following client A, all visible view states are replicated to client B.
// Client A updates their selections in those editors
editor_a1.update(cx_a, |editor, cx| {
editor.change_selections(None, cx, |s| s.select_ranges([0..1]))
});
editor_a2.update(cx_a, |editor, cx| {
editor.change_selections(None, cx, |s| s.select_ranges([2..3]))
});
// When client B starts following client A, all visible view states are replicated to client B.
workspace_b
.update(cx_b, |workspace, cx| {
workspace
.toggle_follow(&ToggleFollow(client_a_id), cx)
.toggle_follow(&ToggleFollow(peer_id_a), cx)
.unwrap()
})
.await
.unwrap();
cx_c.foreground().run_until_parked();
let active_call_c = cx_c.read(ActiveCall::global);
let project_c = client_c.build_remote_project(project_id, cx_c).await;
let workspace_c = client_c.build_workspace(&project_c, cx_c);
active_call_c
.update(cx_c, |call, cx| call.set_location(Some(&project_c), cx))
.await
.unwrap();
drop(project_c);
// Client C also follows client A.
workspace_c
.update(cx_c, |workspace, cx| {
workspace
.toggle_follow(&ToggleFollow(peer_id_a), cx)
.unwrap()
})
.await
.unwrap();
cx_d.foreground().run_until_parked();
let active_call_d = cx_d.read(ActiveCall::global);
let project_d = client_d.build_remote_project(project_id, cx_d).await;
let workspace_d = client_d.build_workspace(&project_d, cx_d);
active_call_d
.update(cx_d, |call, cx| call.set_location(Some(&project_d), cx))
.await
.unwrap();
drop(project_d);
// All clients see that clients B and C are following client A.
cx_c.foreground().run_until_parked();
for (name, active_call, cx) in [
("A", &active_call_a, &cx_a),
("B", &active_call_b, &cx_b),
("C", &active_call_c, &cx_c),
("D", &active_call_d, &cx_d),
] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_a, project_id),
&[peer_id_b, peer_id_c],
"checking followers for A as {name}"
);
});
}
// Client C unfollows client A.
workspace_c.update(cx_c, |workspace, cx| {
workspace.toggle_follow(&ToggleFollow(peer_id_a), cx);
});
// All clients see that clients B is following client A.
cx_c.foreground().run_until_parked();
for (name, active_call, cx) in [
("A", &active_call_a, &cx_a),
("B", &active_call_b, &cx_b),
("C", &active_call_c, &cx_c),
("D", &active_call_d, &cx_d),
] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_a, project_id),
&[peer_id_b],
"checking followers for A as {name}"
);
});
}
// Client C re-follows client A.
workspace_c.update(cx_c, |workspace, cx| {
workspace.toggle_follow(&ToggleFollow(peer_id_a), cx);
});
// All clients see that clients B and C are following client A.
cx_c.foreground().run_until_parked();
for (name, active_call, cx) in [
("A", &active_call_a, &cx_a),
("B", &active_call_b, &cx_b),
("C", &active_call_c, &cx_c),
("D", &active_call_d, &cx_d),
] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_a, project_id),
&[peer_id_b, peer_id_c],
"checking followers for A as {name}"
);
});
}
// Client D follows client C.
workspace_d
.update(cx_d, |workspace, cx| {
workspace
.toggle_follow(&ToggleFollow(peer_id_c), cx)
.unwrap()
})
.await
.unwrap();
// All clients see that D is following C
cx_d.foreground().run_until_parked();
for (name, active_call, cx) in [
("A", &active_call_a, &cx_a),
("B", &active_call_b, &cx_b),
("C", &active_call_c, &cx_c),
("D", &active_call_d, &cx_d),
] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_c, project_id),
&[peer_id_d],
"checking followers for C as {name}"
);
});
}
// Client C closes the project.
cx_c.drop_last(workspace_c);
// Clients A and B see that client B is following A, and client C is not present in the followers.
cx_c.foreground().run_until_parked();
for (name, active_call, cx) in [("A", &active_call_a, &cx_a), ("B", &active_call_b, &cx_b)] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_a, project_id),
&[peer_id_b],
"checking followers for A as {name}"
);
});
}
// All clients see that no-one is following C
for (name, active_call, cx) in [
("A", &active_call_a, &cx_a),
("B", &active_call_b, &cx_b),
("C", &active_call_c, &cx_c),
("D", &active_call_d, &cx_d),
] {
active_call.read_with(*cx, |call, cx| {
let room = call.room().unwrap().read(cx);
assert_eq!(
room.followers_for(peer_id_c, project_id),
&[],
"checking followers for C as {name}"
);
});
}
let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| {
workspace
.active_item(cx)
@ -6028,14 +6193,14 @@ async fn test_following(
workspace_a
.update(cx_a, |workspace, cx| {
workspace
.toggle_follow(&ToggleFollow(client_b_id), cx)
.toggle_follow(&ToggleFollow(peer_id_b), cx)
.unwrap()
})
.await
.unwrap();
assert_eq!(
workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)),
Some(client_b_id)
Some(peer_id_b)
);
assert_eq!(
workspace_a.read_with(cx_a, |workspace, cx| workspace
@ -6107,7 +6272,7 @@ async fn test_following(
);
// Following interrupts when client B disconnects.
client_b.disconnect(&cx_b.to_async()).unwrap();
client_b.disconnect(&cx_b.to_async());
deterministic.advance_clock(RECONNECT_TIMEOUT);
assert_eq!(
workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)),
@ -6115,6 +6280,99 @@ async fn test_following(
);
}
#[gpui::test(iterations = 10)]
async fn test_join_call_after_screen_was_shared(
deterministic: Arc<Deterministic>,
cx_a: &mut TestAppContext,
cx_b: &mut TestAppContext,
) {
deterministic.forbid_parking();
let mut server = TestServer::start(&deterministic).await;
let client_a = server.create_client(cx_a, "user_a").await;
let client_b = server.create_client(cx_b, "user_b").await;
server
.make_contacts(&mut [(&client_a, cx_a), (&client_b, cx_b)])
.await;
let active_call_a = cx_a.read(ActiveCall::global);
let active_call_b = cx_b.read(ActiveCall::global);
// Call users B and C from client A.
active_call_a
.update(cx_a, |call, cx| {
call.invite(client_b.user_id().unwrap(), None, cx)
})
.await
.unwrap();
let room_a = active_call_a.read_with(cx_a, |call, _| call.room().unwrap().clone());
deterministic.run_until_parked();
assert_eq!(
room_participants(&room_a, cx_a),
RoomParticipants {
remote: Default::default(),
pending: vec!["user_b".to_string()]
}
);
// User B receives the call.
let mut incoming_call_b = active_call_b.read_with(cx_b, |call, _| call.incoming());
let call_b = incoming_call_b.next().await.unwrap().unwrap();
assert_eq!(call_b.calling_user.github_login, "user_a");
// User A shares their screen
let display = MacOSDisplay::new();
active_call_a
.update(cx_a, |call, cx| {
call.room().unwrap().update(cx, |room, cx| {
room.set_display_sources(vec![display.clone()]);
room.share_screen(cx)
})
})
.await
.unwrap();
client_b.user_store.update(cx_b, |user_store, _| {
user_store.clear_cache();
});
// User B joins the room
active_call_b
.update(cx_b, |call, cx| call.accept_incoming(cx))
.await
.unwrap();
let room_b = active_call_b.read_with(cx_b, |call, _| call.room().unwrap().clone());
assert!(incoming_call_b.next().await.unwrap().is_none());
deterministic.run_until_parked();
assert_eq!(
room_participants(&room_a, cx_a),
RoomParticipants {
remote: vec!["user_b".to_string()],
pending: vec![],
}
);
assert_eq!(
room_participants(&room_b, cx_b),
RoomParticipants {
remote: vec!["user_a".to_string()],
pending: vec![],
}
);
// Ensure User B sees User A's screenshare.
room_b.read_with(cx_b, |room, _| {
assert_eq!(
room.remote_participants()
.get(&client_a.user_id().unwrap())
.unwrap()
.tracks
.len(),
1
);
});
}
#[gpui::test]
async fn test_following_tab_order(
deterministic: Arc<Deterministic>,

View file

@ -554,7 +554,7 @@ async fn apply_client_operation(
}
log::info!("{}: hanging up", client.username);
active_call.update(cx, |call, cx| call.hang_up(cx))?;
active_call.update(cx, |call, cx| call.hang_up(cx)).await?;
}
ClientOperation::InviteContactToCall { user_id } => {