Remove dev servers (#19638)

TODO:

- [ ] Check that workspace migration worked
- [ ] Add server migrations and make sure SeaORM files are in sync
(maybe?)

Release Notes:

- N/A

---------

Co-authored-by: Conrad <conrad@zed.dev>
Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
This commit is contained in:
Mikayla Maki 2024-10-24 11:14:03 -07:00 committed by GitHub
parent b5f816dde5
commit 02718284ef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
55 changed files with 391 additions and 5024 deletions

View file

@ -86,7 +86,6 @@ client = { workspace = true, features = ["test-support"] }
collab_ui = { workspace = true, features = ["test-support"] }
collections = { workspace = true, features = ["test-support"] }
ctor.workspace = true
dev_server_projects.workspace = true
editor = { workspace = true, features = ["test-support"] }
env_logger.workspace = true
file_finder.workspace = true
@ -94,7 +93,6 @@ fs = { workspace = true, features = ["test-support"] }
git = { workspace = true, features = ["test-support"] }
git_hosting_providers.workspace = true
gpui = { workspace = true, features = ["test-support"] }
headless.workspace = true
hyper.workspace = true
indoc.workspace = true
language = { workspace = true, features = ["test-support"] }

View file

@ -1,5 +1,5 @@
use crate::{
db::{self, dev_server, AccessTokenId, Database, DevServerId, UserId},
db::{self, AccessTokenId, Database, UserId},
rpc::Principal,
AppState, Error, Result,
};
@ -44,19 +44,10 @@ pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl Into
let first = auth_header.next().unwrap_or("");
if first == "dev-server-token" {
let dev_server_token = auth_header.next().ok_or_else(|| {
Error::http(
StatusCode::BAD_REQUEST,
"missing dev-server-token token in authorization header".to_string(),
)
})?;
let dev_server = verify_dev_server_token(dev_server_token, &state.db)
.await
.map_err(|e| Error::http(StatusCode::UNAUTHORIZED, format!("{}", e)))?;
req.extensions_mut()
.insert(Principal::DevServer(dev_server));
return Ok::<_, Error>(next.run(req).await);
Err(Error::http(
StatusCode::UNAUTHORIZED,
"Dev servers were removed in Zed 0.157 please upgrade to SSH remoting".to_string(),
))?;
}
let user_id = UserId(first.parse().map_err(|_| {
@ -240,41 +231,6 @@ pub async fn verify_access_token(
})
}
pub fn generate_dev_server_token(id: usize, access_token: String) -> String {
format!("{}.{}", id, access_token)
}
pub async fn verify_dev_server_token(
dev_server_token: &str,
db: &Arc<Database>,
) -> anyhow::Result<dev_server::Model> {
let (id, token) = split_dev_server_token(dev_server_token)?;
let token_hash = hash_access_token(token);
let server = db.get_dev_server(id).await?;
if server
.hashed_token
.as_bytes()
.ct_eq(token_hash.as_ref())
.into()
{
Ok(server)
} else {
Err(anyhow!("wrong token for dev server"))
}
}
// a dev_server_token has the format <id>.<base64>. This is to make them
// relatively easy to copy/paste around.
pub fn split_dev_server_token(dev_server_token: &str) -> anyhow::Result<(DevServerId, &str)> {
let mut parts = dev_server_token.splitn(2, '.');
let id = DevServerId(parts.next().unwrap_or_default().parse()?);
let token = parts
.next()
.ok_or_else(|| anyhow!("invalid dev server token format"))?;
Ok((id, token))
}
#[cfg(test)]
mod test {
use rand::thread_rng;

View file

@ -726,7 +726,6 @@ pub struct Project {
pub collaborators: Vec<ProjectCollaborator>,
pub worktrees: BTreeMap<u64, Worktree>,
pub language_servers: Vec<proto::LanguageServer>,
pub dev_server_project_id: Option<DevServerProjectId>,
}
pub struct ProjectCollaborator {

View file

@ -79,7 +79,6 @@ id_type!(ChannelChatParticipantId);
id_type!(ChannelId);
id_type!(ChannelMemberId);
id_type!(ContactId);
id_type!(DevServerId);
id_type!(ExtensionId);
id_type!(FlagId);
id_type!(FollowerId);
@ -89,7 +88,6 @@ id_type!(NotificationId);
id_type!(NotificationKindId);
id_type!(ProjectCollaboratorId);
id_type!(ProjectId);
id_type!(DevServerProjectId);
id_type!(ReplicaId);
id_type!(RoomId);
id_type!(RoomParticipantId);
@ -277,12 +275,6 @@ impl From<ChannelVisibility> for i32 {
}
}
#[derive(Copy, Clone, Debug, Serialize, PartialEq)]
pub enum PrincipalId {
UserId(UserId),
DevServerId(DevServerId),
}
/// Indicate whether a [Buffer] has permissions to edit.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Capability {

View file

@ -8,8 +8,6 @@ pub mod buffers;
pub mod channels;
pub mod contacts;
pub mod contributors;
pub mod dev_server_projects;
pub mod dev_servers;
pub mod embeddings;
pub mod extensions;
pub mod hosted_projects;

View file

@ -1,365 +1 @@
use anyhow::anyhow;
use rpc::{
proto::{self},
ConnectionId,
};
use sea_orm::{
ActiveModelTrait, ActiveValue, ColumnTrait, Condition, DatabaseTransaction, EntityTrait,
IntoActiveModel, ModelTrait, QueryFilter,
};
use crate::db::ProjectId;
use super::{
dev_server, dev_server_project, project, project_collaborator, worktree, Database, DevServerId,
DevServerProjectId, RejoinedProject, ResharedProject, ServerId, UserId,
};
impl Database {
pub async fn get_dev_server_project(
&self,
dev_server_project_id: DevServerProjectId,
) -> crate::Result<dev_server_project::Model> {
self.transaction(|tx| async move {
Ok(
dev_server_project::Entity::find_by_id(dev_server_project_id)
.one(&*tx)
.await?
.ok_or_else(|| {
anyhow!("no dev server project with id {}", dev_server_project_id)
})?,
)
})
.await
}
pub async fn get_projects_for_dev_server(
&self,
dev_server_id: DevServerId,
) -> crate::Result<Vec<proto::DevServerProject>> {
self.transaction(|tx| async move {
self.get_projects_for_dev_server_internal(dev_server_id, &tx)
.await
})
.await
}
pub async fn get_projects_for_dev_server_internal(
&self,
dev_server_id: DevServerId,
tx: &DatabaseTransaction,
) -> crate::Result<Vec<proto::DevServerProject>> {
let servers = dev_server_project::Entity::find()
.filter(dev_server_project::Column::DevServerId.eq(dev_server_id))
.find_also_related(project::Entity)
.all(tx)
.await?;
Ok(servers
.into_iter()
.map(|(dev_server_project, project)| dev_server_project.to_proto(project))
.collect())
}
pub async fn dev_server_project_ids_for_user(
&self,
user_id: UserId,
tx: &DatabaseTransaction,
) -> crate::Result<Vec<DevServerProjectId>> {
let dev_servers = dev_server::Entity::find()
.filter(dev_server::Column::UserId.eq(user_id))
.find_with_related(dev_server_project::Entity)
.all(tx)
.await?;
Ok(dev_servers
.into_iter()
.flat_map(|(_, projects)| projects.into_iter().map(|p| p.id))
.collect())
}
pub async fn owner_for_dev_server_project(
&self,
dev_server_project_id: DevServerProjectId,
tx: &DatabaseTransaction,
) -> crate::Result<UserId> {
let dev_server = dev_server_project::Entity::find_by_id(dev_server_project_id)
.find_also_related(dev_server::Entity)
.one(tx)
.await?
.and_then(|(_, dev_server)| dev_server)
.ok_or_else(|| anyhow!("no dev server project"))?;
Ok(dev_server.user_id)
}
pub async fn get_stale_dev_server_projects(
&self,
connection: ConnectionId,
) -> crate::Result<Vec<ProjectId>> {
self.transaction(|tx| async move {
let projects = project::Entity::find()
.filter(
Condition::all()
.add(project::Column::HostConnectionId.eq(connection.id))
.add(project::Column::HostConnectionServerId.eq(connection.owner_id)),
)
.all(&*tx)
.await?;
Ok(projects.into_iter().map(|p| p.id).collect())
})
.await
}
pub async fn create_dev_server_project(
&self,
dev_server_id: DevServerId,
path: &str,
user_id: UserId,
) -> crate::Result<(dev_server_project::Model, proto::DevServerProjectsUpdate)> {
self.transaction(|tx| async move {
let dev_server = dev_server::Entity::find_by_id(dev_server_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no dev server with id {}", dev_server_id))?;
if dev_server.user_id != user_id {
return Err(anyhow!("not your dev server"))?;
}
let project = dev_server_project::Entity::insert(dev_server_project::ActiveModel {
id: ActiveValue::NotSet,
dev_server_id: ActiveValue::Set(dev_server_id),
paths: ActiveValue::Set(dev_server_project::JSONPaths(vec![path.to_string()])),
})
.exec_with_returning(&*tx)
.await?;
let status = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok((project, status))
})
.await
}
pub async fn update_dev_server_project(
&self,
id: DevServerProjectId,
paths: &[String],
user_id: UserId,
) -> crate::Result<(dev_server_project::Model, proto::DevServerProjectsUpdate)> {
self.transaction(move |tx| async move {
let paths = paths.to_owned();
let Some((project, Some(dev_server))) = dev_server_project::Entity::find_by_id(id)
.find_also_related(dev_server::Entity)
.one(&*tx)
.await?
else {
return Err(anyhow!("no such dev server project"))?;
};
if dev_server.user_id != user_id {
return Err(anyhow!("not your dev server"))?;
}
let mut project = project.into_active_model();
project.paths = ActiveValue::Set(dev_server_project::JSONPaths(paths));
let project = project.update(&*tx).await?;
let status = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok((project, status))
})
.await
}
pub async fn delete_dev_server_project(
&self,
dev_server_project_id: DevServerProjectId,
dev_server_id: DevServerId,
user_id: UserId,
) -> crate::Result<(Vec<proto::DevServerProject>, proto::DevServerProjectsUpdate)> {
self.transaction(|tx| async move {
project::Entity::delete_many()
.filter(project::Column::DevServerProjectId.eq(dev_server_project_id))
.exec(&*tx)
.await?;
let result = dev_server_project::Entity::delete_by_id(dev_server_project_id)
.exec(&*tx)
.await?;
if result.rows_affected != 1 {
return Err(anyhow!(
"no dev server project with id {}",
dev_server_project_id
))?;
}
let status = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
let projects = self
.get_projects_for_dev_server_internal(dev_server_id, &tx)
.await?;
Ok((projects, status))
})
.await
}
pub async fn share_dev_server_project(
&self,
dev_server_project_id: DevServerProjectId,
dev_server_id: DevServerId,
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
) -> crate::Result<(
proto::DevServerProject,
UserId,
proto::DevServerProjectsUpdate,
)> {
self.transaction(|tx| async move {
let dev_server = dev_server::Entity::find_by_id(dev_server_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no dev server with id {}", dev_server_id))?;
let dev_server_project = dev_server_project::Entity::find_by_id(dev_server_project_id)
.one(&*tx)
.await?
.ok_or_else(|| {
anyhow!("no dev server project with id {}", dev_server_project_id)
})?;
if dev_server_project.dev_server_id != dev_server_id {
return Err(anyhow!("dev server project shared from wrong server"))?;
}
let project = project::ActiveModel {
room_id: ActiveValue::Set(None),
host_user_id: ActiveValue::Set(None),
host_connection_id: ActiveValue::set(Some(connection.id as i32)),
host_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
id: ActiveValue::NotSet,
hosted_project_id: ActiveValue::Set(None),
dev_server_project_id: ActiveValue::Set(Some(dev_server_project_id)),
}
.insert(&*tx)
.await?;
if !worktrees.is_empty() {
worktree::Entity::insert_many(worktrees.iter().map(|worktree| {
worktree::ActiveModel {
id: ActiveValue::set(worktree.id as i64),
project_id: ActiveValue::set(project.id),
abs_path: ActiveValue::set(worktree.abs_path.clone()),
root_name: ActiveValue::set(worktree.root_name.clone()),
visible: ActiveValue::set(worktree.visible),
scan_id: ActiveValue::set(0),
completed_scan_id: ActiveValue::set(0),
}
}))
.exec(&*tx)
.await?;
}
let status = self
.dev_server_projects_update_internal(dev_server.user_id, &tx)
.await?;
Ok((
dev_server_project.to_proto(Some(project)),
dev_server.user_id,
status,
))
})
.await
}
pub async fn reshare_dev_server_projects(
&self,
reshared_projects: &Vec<proto::UpdateProject>,
dev_server_id: DevServerId,
connection: ConnectionId,
) -> crate::Result<Vec<ResharedProject>> {
self.transaction(|tx| async move {
let mut ret = Vec::new();
for reshared_project in reshared_projects {
let project_id = ProjectId::from_proto(reshared_project.project_id);
let (project, dev_server_project) = project::Entity::find_by_id(project_id)
.find_also_related(dev_server_project::Entity)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("project does not exist"))?;
if dev_server_project.map(|rp| rp.dev_server_id) != Some(dev_server_id) {
return Err(anyhow!("dev server project reshared from wrong server"))?;
}
let Ok(old_connection_id) = project.host_connection() else {
return Err(anyhow!("dev server project was not shared"))?;
};
project::Entity::update(project::ActiveModel {
id: ActiveValue::set(project_id),
host_connection_id: ActiveValue::set(Some(connection.id as i32)),
host_connection_server_id: ActiveValue::set(Some(ServerId(
connection.owner_id as i32,
))),
..Default::default()
})
.exec(&*tx)
.await?;
let collaborators = project
.find_related(project_collaborator::Entity)
.all(&*tx)
.await?;
self.update_project_worktrees(project_id, &reshared_project.worktrees, &tx)
.await?;
ret.push(super::ResharedProject {
id: project_id,
old_connection_id,
collaborators: collaborators
.iter()
.map(|collaborator| super::ProjectCollaborator {
connection_id: collaborator.connection(),
user_id: collaborator.user_id,
replica_id: collaborator.replica_id,
is_host: collaborator.is_host,
})
.collect(),
worktrees: reshared_project.worktrees.clone(),
});
}
Ok(ret)
})
.await
}
pub async fn rejoin_dev_server_projects(
&self,
rejoined_projects: &Vec<proto::RejoinProject>,
user_id: UserId,
connection_id: ConnectionId,
) -> crate::Result<Vec<RejoinedProject>> {
self.transaction(|tx| async move {
let mut ret = Vec::new();
for rejoined_project in rejoined_projects {
if let Some(project) = self
.rejoin_project_internal(&tx, rejoined_project, user_id, connection_id)
.await?
{
ret.push(project);
}
}
Ok(ret)
})
.await
}
}

View file

@ -1,222 +1 @@
use rpc::proto;
use sea_orm::{
ActiveValue, ColumnTrait, DatabaseTransaction, EntityTrait, IntoActiveModel, QueryFilter,
};
use super::{dev_server, dev_server_project, Database, DevServerId, UserId};
impl Database {
pub async fn get_dev_server(
&self,
dev_server_id: DevServerId,
) -> crate::Result<dev_server::Model> {
self.transaction(|tx| async move {
Ok(dev_server::Entity::find_by_id(dev_server_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow::anyhow!("no dev server with id {}", dev_server_id))?)
})
.await
}
pub async fn get_dev_server_for_user(
&self,
dev_server_id: DevServerId,
user_id: UserId,
) -> crate::Result<dev_server::Model> {
self.transaction(|tx| async move {
let server = dev_server::Entity::find_by_id(dev_server_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow::anyhow!("no dev server with id {}", dev_server_id))?;
if server.user_id != user_id {
return Err(anyhow::anyhow!(
"dev server {} is not owned by user {}",
dev_server_id,
user_id
))?;
}
Ok(server)
})
.await
}
pub async fn get_dev_servers(&self, user_id: UserId) -> crate::Result<Vec<dev_server::Model>> {
self.transaction(|tx| async move {
Ok(dev_server::Entity::find()
.filter(dev_server::Column::UserId.eq(user_id))
.all(&*tx)
.await?)
})
.await
}
pub async fn dev_server_projects_update(
&self,
user_id: UserId,
) -> crate::Result<proto::DevServerProjectsUpdate> {
self.transaction(|tx| async move {
self.dev_server_projects_update_internal(user_id, &tx).await
})
.await
}
pub async fn dev_server_projects_update_internal(
&self,
user_id: UserId,
tx: &DatabaseTransaction,
) -> crate::Result<proto::DevServerProjectsUpdate> {
let dev_servers = dev_server::Entity::find()
.filter(dev_server::Column::UserId.eq(user_id))
.all(tx)
.await?;
let dev_server_projects = dev_server_project::Entity::find()
.filter(
dev_server_project::Column::DevServerId
.is_in(dev_servers.iter().map(|d| d.id).collect::<Vec<_>>()),
)
.find_also_related(super::project::Entity)
.all(tx)
.await?;
Ok(proto::DevServerProjectsUpdate {
dev_servers: dev_servers
.into_iter()
.map(|d| d.to_proto(proto::DevServerStatus::Offline))
.collect(),
dev_server_projects: dev_server_projects
.into_iter()
.map(|(dev_server_project, project)| dev_server_project.to_proto(project))
.collect(),
})
}
pub async fn create_dev_server(
&self,
name: &str,
ssh_connection_string: Option<&str>,
hashed_access_token: &str,
user_id: UserId,
) -> crate::Result<(dev_server::Model, proto::DevServerProjectsUpdate)> {
self.transaction(|tx| async move {
if name.trim().is_empty() {
return Err(anyhow::anyhow!(proto::ErrorCode::Forbidden))?;
}
let dev_server = dev_server::Entity::insert(dev_server::ActiveModel {
id: ActiveValue::NotSet,
hashed_token: ActiveValue::Set(hashed_access_token.to_string()),
name: ActiveValue::Set(name.trim().to_string()),
user_id: ActiveValue::Set(user_id),
ssh_connection_string: ActiveValue::Set(
ssh_connection_string.map(ToOwned::to_owned),
),
})
.exec_with_returning(&*tx)
.await?;
let dev_server_projects = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok((dev_server, dev_server_projects))
})
.await
}
pub async fn update_dev_server_token(
&self,
id: DevServerId,
hashed_token: &str,
user_id: UserId,
) -> crate::Result<proto::DevServerProjectsUpdate> {
self.transaction(|tx| async move {
let Some(dev_server) = dev_server::Entity::find_by_id(id).one(&*tx).await? else {
return Err(anyhow::anyhow!("no dev server with id {}", id))?;
};
if dev_server.user_id != user_id {
return Err(anyhow::anyhow!(proto::ErrorCode::Forbidden))?;
}
dev_server::Entity::update(dev_server::ActiveModel {
hashed_token: ActiveValue::Set(hashed_token.to_string()),
..dev_server.clone().into_active_model()
})
.exec(&*tx)
.await?;
let dev_server_projects = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok(dev_server_projects)
})
.await
}
pub async fn rename_dev_server(
&self,
id: DevServerId,
name: &str,
ssh_connection_string: Option<&str>,
user_id: UserId,
) -> crate::Result<proto::DevServerProjectsUpdate> {
self.transaction(|tx| async move {
let Some(dev_server) = dev_server::Entity::find_by_id(id).one(&*tx).await? else {
return Err(anyhow::anyhow!("no dev server with id {}", id))?;
};
if dev_server.user_id != user_id || name.trim().is_empty() {
return Err(anyhow::anyhow!(proto::ErrorCode::Forbidden))?;
}
dev_server::Entity::update(dev_server::ActiveModel {
name: ActiveValue::Set(name.trim().to_string()),
ssh_connection_string: ActiveValue::Set(
ssh_connection_string.map(ToOwned::to_owned),
),
..dev_server.clone().into_active_model()
})
.exec(&*tx)
.await?;
let dev_server_projects = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok(dev_server_projects)
})
.await
}
pub async fn delete_dev_server(
&self,
id: DevServerId,
user_id: UserId,
) -> crate::Result<proto::DevServerProjectsUpdate> {
self.transaction(|tx| async move {
let Some(dev_server) = dev_server::Entity::find_by_id(id).one(&*tx).await? else {
return Err(anyhow::anyhow!("no dev server with id {}", id))?;
};
if dev_server.user_id != user_id {
return Err(anyhow::anyhow!(proto::ErrorCode::Forbidden))?;
}
dev_server_project::Entity::delete_many()
.filter(dev_server_project::Column::DevServerId.eq(id))
.exec(&*tx)
.await?;
dev_server::Entity::delete(dev_server.into_active_model())
.exec(&*tx)
.await?;
let dev_server_projects = self
.dev_server_projects_update_internal(user_id, &tx)
.await?;
Ok(dev_server_projects)
})
.await
}
}

View file

@ -32,7 +32,6 @@ impl Database {
connection: ConnectionId,
worktrees: &[proto::WorktreeMetadata],
is_ssh_project: bool,
dev_server_project_id: Option<DevServerProjectId>,
) -> Result<TransactionGuard<(ProjectId, proto::Room)>> {
self.room_transaction(room_id, |tx| async move {
let participant = room_participant::Entity::find()
@ -61,38 +60,6 @@ impl Database {
return Err(anyhow!("guests cannot share projects"))?;
}
if let Some(dev_server_project_id) = dev_server_project_id {
let project = project::Entity::find()
.filter(project::Column::DevServerProjectId.eq(Some(dev_server_project_id)))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no remote project"))?;
let (_, dev_server) = dev_server_project::Entity::find_by_id(dev_server_project_id)
.find_also_related(dev_server::Entity)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no dev_server_project"))?;
if !dev_server.is_some_and(|dev_server| dev_server.user_id == participant.user_id) {
return Err(anyhow!("not your dev server"))?;
}
if project.room_id.is_some() {
return Err(anyhow!("project already shared"))?;
};
let project = project::Entity::update(project::ActiveModel {
room_id: ActiveValue::Set(Some(room_id)),
..project.into_active_model()
})
.exec(&*tx)
.await?;
let room = self.get_room(room_id, &tx).await?;
return Ok((project.id, room));
}
let project = project::ActiveModel {
room_id: ActiveValue::set(Some(participant.room_id)),
host_user_id: ActiveValue::set(Some(participant.user_id)),
@ -102,7 +69,6 @@ impl Database {
))),
id: ActiveValue::NotSet,
hosted_project_id: ActiveValue::Set(None),
dev_server_project_id: ActiveValue::Set(None),
}
.insert(&*tx)
.await?;
@ -156,7 +122,6 @@ impl Database {
&self,
project_id: ProjectId,
connection: ConnectionId,
user_id: Option<UserId>,
) -> Result<TransactionGuard<(bool, Option<proto::Room>, Vec<ConnectionId>)>> {
self.project_transaction(project_id, |tx| async move {
let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?;
@ -172,25 +137,6 @@ impl Database {
if project.host_connection()? == connection {
return Ok((true, room, guest_connection_ids));
}
if let Some(dev_server_project_id) = project.dev_server_project_id {
if let Some(user_id) = user_id {
if user_id
!= self
.owner_for_dev_server_project(dev_server_project_id, &tx)
.await?
{
Err(anyhow!("cannot unshare a project hosted by another user"))?
}
project::Entity::update(project::ActiveModel {
room_id: ActiveValue::Set(None),
..project.into_active_model()
})
.exec(&*tx)
.await?;
return Ok((false, room, guest_connection_ids));
}
}
Err(anyhow!("cannot unshare a project hosted by another user"))?
})
.await
@ -633,17 +579,6 @@ impl Database {
.await
}
pub async fn find_dev_server_project(&self, id: DevServerProjectId) -> Result<project::Model> {
self.transaction(|tx| async move {
Ok(project::Entity::find()
.filter(project::Column::DevServerProjectId.eq(id))
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?)
})
.await
}
/// Adds the given connection to the specified project
/// in the current room.
pub async fn join_project(
@ -654,13 +589,7 @@ impl Database {
) -> Result<TransactionGuard<(Project, ReplicaId)>> {
self.project_transaction(project_id, |tx| async move {
let (project, role) = self
.access_project(
project_id,
connection,
PrincipalId::UserId(user_id),
Capability::ReadOnly,
&tx,
)
.access_project(project_id, connection, Capability::ReadOnly, &tx)
.await?;
self.join_project_internal(project, user_id, connection, role, &tx)
.await
@ -851,7 +780,6 @@ impl Database {
worktree_id: None,
})
.collect(),
dev_server_project_id: project.dev_server_project_id,
};
Ok((project, replica_id as ReplicaId))
}
@ -1007,29 +935,14 @@ impl Database {
&self,
project_id: ProjectId,
connection_id: ConnectionId,
principal_id: PrincipalId,
capability: Capability,
tx: &DatabaseTransaction,
) -> Result<(project::Model, ChannelRole)> {
let (mut project, dev_server_project) = project::Entity::find_by_id(project_id)
.find_also_related(dev_server_project::Entity)
let project = project::Entity::find_by_id(project_id)
.one(tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let user_id = match principal_id {
PrincipalId::DevServerId(_) => {
if project
.host_connection()
.is_ok_and(|connection| connection == connection_id)
{
return Ok((project, ChannelRole::Admin));
}
return Err(anyhow!("not the project host"))?;
}
PrincipalId::UserId(user_id) => user_id,
};
let role_from_room = if let Some(room_id) = project.room_id {
room_participant::Entity::find()
.filter(room_participant::Column::RoomId.eq(room_id))
@ -1040,34 +953,8 @@ impl Database {
} else {
None
};
let role_from_dev_server = if let Some(dev_server_project) = dev_server_project {
let dev_server = dev_server::Entity::find_by_id(dev_server_project.dev_server_id)
.one(tx)
.await?
.ok_or_else(|| anyhow!("no such channel"))?;
if user_id == dev_server.user_id {
// If the user left the room "uncleanly" they may rejoin the
// remote project before leave_room runs. IN that case kick
// the project out of the room pre-emptively.
if role_from_room.is_none() {
project = project::Entity::update(project::ActiveModel {
room_id: ActiveValue::Set(None),
..project.into_active_model()
})
.exec(tx)
.await?;
}
Some(ChannelRole::Admin)
} else {
None
}
} else {
None
};
let role = role_from_dev_server
.or(role_from_room)
.unwrap_or(ChannelRole::Banned);
let role = role_from_room.unwrap_or(ChannelRole::Banned);
match capability {
Capability::ReadWrite => {
@ -1090,17 +977,10 @@ impl Database {
&self,
project_id: ProjectId,
connection_id: ConnectionId,
user_id: UserId,
) -> Result<ConnectionId> {
self.project_transaction(project_id, |tx| async move {
let (project, _) = self
.access_project(
project_id,
connection_id,
PrincipalId::UserId(user_id),
Capability::ReadOnly,
&tx,
)
.access_project(project_id, connection_id, Capability::ReadOnly, &tx)
.await?;
project.host_connection()
})
@ -1113,17 +993,10 @@ impl Database {
&self,
project_id: ProjectId,
connection_id: ConnectionId,
user_id: UserId,
) -> Result<ConnectionId> {
self.project_transaction(project_id, |tx| async move {
let (project, _) = self
.access_project(
project_id,
connection_id,
PrincipalId::UserId(user_id),
Capability::ReadWrite,
&tx,
)
.access_project(project_id, connection_id, Capability::ReadWrite, &tx)
.await?;
project.host_connection()
})
@ -1131,47 +1004,16 @@ impl Database {
.map(|guard| guard.into_inner())
}
/// Returns the host connection for a request to join a shared project.
pub async fn host_for_owner_project_request(
&self,
project_id: ProjectId,
_connection_id: ConnectionId,
user_id: UserId,
) -> Result<ConnectionId> {
self.project_transaction(project_id, |tx| async move {
let (project, dev_server_project) = project::Entity::find_by_id(project_id)
.find_also_related(dev_server_project::Entity)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such project"))?;
let Some(dev_server_project) = dev_server_project else {
return Err(anyhow!("not a dev server project"))?;
};
let dev_server = dev_server::Entity::find_by_id(dev_server_project.dev_server_id)
.one(&*tx)
.await?
.ok_or_else(|| anyhow!("no such dev server"))?;
if dev_server.user_id != user_id {
return Err(anyhow!("not your project"))?;
}
project.host_connection()
})
.await
.map(|guard| guard.into_inner())
}
pub async fn connections_for_buffer_update(
&self,
project_id: ProjectId,
principal_id: PrincipalId,
connection_id: ConnectionId,
capability: Capability,
) -> Result<TransactionGuard<(ConnectionId, Vec<ConnectionId>)>> {
self.project_transaction(project_id, |tx| async move {
// Authorize
let (project, _) = self
.access_project(project_id, connection_id, principal_id, capability, &tx)
.access_project(project_id, connection_id, capability, &tx)
.await?;
let host_connection_id = project.host_connection()?;

View file

@ -858,25 +858,6 @@ impl Database {
.all(&*tx)
.await?;
// if any project in the room has a remote-project-id that belongs to a dev server that this user owns.
let dev_server_projects_for_user = self
.dev_server_project_ids_for_user(leaving_participant.user_id, &tx)
.await?;
let dev_server_projects_to_unshare = project::Entity::find()
.filter(
Condition::all()
.add(project::Column::RoomId.eq(room_id))
.add(
project::Column::DevServerProjectId
.is_in(dev_server_projects_for_user.clone()),
),
)
.all(&*tx)
.await?
.into_iter()
.map(|project| project.id)
.collect::<HashSet<_>>();
let mut left_projects = HashMap::default();
let mut collaborators = project_collaborator::Entity::find()
.filter(project_collaborator::Column::ProjectId.is_in(project_ids))
@ -899,9 +880,7 @@ impl Database {
left_project.connection_ids.push(collaborator_connection_id);
}
if (collaborator.is_host && collaborator.connection() == connection)
|| dev_server_projects_to_unshare.contains(&collaborator.project_id)
{
if collaborator.is_host && collaborator.connection() == connection {
left_project.should_unshare = true;
}
}
@ -944,17 +923,6 @@ impl Database {
.exec(&*tx)
.await?;
if !dev_server_projects_to_unshare.is_empty() {
project::Entity::update_many()
.filter(project::Column::Id.is_in(dev_server_projects_to_unshare))
.set(project::ActiveModel {
room_id: ActiveValue::Set(None),
..Default::default()
})
.exec(&*tx)
.await?;
}
let (channel, room) = self.get_channel_room(room_id, &tx).await?;
let deleted = if room.participants.is_empty() {
let result = room::Entity::delete_by_id(room_id).exec(&*tx).await?;
@ -1323,26 +1291,6 @@ impl Database {
project.worktree_root_names.push(db_worktree.root_name);
}
}
} else if let Some(dev_server_project_id) = db_project.dev_server_project_id {
let host = self
.owner_for_dev_server_project(dev_server_project_id, tx)
.await?;
if let Some((_, participant)) = participants
.iter_mut()
.find(|(_, v)| v.user_id == host.to_proto())
{
participant.projects.push(proto::ParticipantProject {
id: db_project.id.to_proto(),
worktree_root_names: Default::default(),
});
let project = participant.projects.last_mut().unwrap();
for db_worktree in db_worktrees {
if db_worktree.visible {
project.worktree_root_names.push(db_worktree.root_name);
}
}
}
}
}

View file

@ -13,8 +13,6 @@ pub mod channel_message;
pub mod channel_message_mention;
pub mod contact;
pub mod contributor;
pub mod dev_server;
pub mod dev_server_project;
pub mod embedding;
pub mod extension;
pub mod extension_version;

View file

@ -1,39 +0,0 @@
use crate::db::{DevServerId, UserId};
use rpc::proto;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "dev_servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: DevServerId,
pub name: String,
pub user_id: UserId,
pub hashed_token: String,
pub ssh_connection_string: Option<String>,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::dev_server_project::Entity")]
RemoteProject,
}
impl Related<super::dev_server_project::Entity> for Entity {
fn to() -> RelationDef {
Relation::RemoteProject.def()
}
}
impl Model {
pub fn to_proto(&self, status: proto::DevServerStatus) -> proto::DevServer {
proto::DevServer {
dev_server_id: self.id.to_proto(),
name: self.name.clone(),
status: status as i32,
ssh_connection_string: self.ssh_connection_string.clone(),
}
}
}

View file

@ -1,59 +0,0 @@
use super::project;
use crate::db::{DevServerId, DevServerProjectId};
use rpc::proto;
use sea_orm::{entity::prelude::*, FromJsonQueryResult};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "dev_server_projects")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: DevServerProjectId,
pub dev_server_id: DevServerId,
pub paths: JSONPaths,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, FromJsonQueryResult)]
pub struct JSONPaths(pub Vec<String>);
impl ActiveModelBehavior for ActiveModel {}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_one = "super::project::Entity")]
Project,
#[sea_orm(
belongs_to = "super::dev_server::Entity",
from = "Column::DevServerId",
to = "super::dev_server::Column::Id"
)]
DevServer,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl Related<super::dev_server::Entity> for Entity {
fn to() -> RelationDef {
Relation::DevServer.def()
}
}
impl Model {
pub fn to_proto(&self, project: Option<project::Model>) -> proto::DevServerProject {
proto::DevServerProject {
id: self.id.to_proto(),
project_id: project.map(|p| p.id.to_proto()),
dev_server_id: self.dev_server_id.to_proto(),
path: self.paths().first().cloned().unwrap_or_default(),
paths: self.paths().clone(),
}
}
pub fn paths(&self) -> &Vec<String> {
&self.paths.0
}
}

View file

@ -1,4 +1,4 @@
use crate::db::{DevServerProjectId, HostedProjectId, ProjectId, Result, RoomId, ServerId, UserId};
use crate::db::{HostedProjectId, ProjectId, Result, RoomId, ServerId, UserId};
use anyhow::anyhow;
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
@ -13,7 +13,6 @@ pub struct Model {
pub host_connection_id: Option<i32>,
pub host_connection_server_id: Option<ServerId>,
pub hosted_project_id: Option<HostedProjectId>,
pub dev_server_project_id: Option<DevServerProjectId>,
}
impl Model {
@ -57,12 +56,6 @@ pub enum Relation {
to = "super::hosted_project::Column::Id"
)]
HostedProject,
#[sea_orm(
belongs_to = "super::dev_server_project::Entity",
from = "Column::DevServerProjectId",
to = "super::dev_server_project::Column::Id"
)]
RemoteProject,
}
impl Related<super::user::Entity> for Entity {
@ -101,10 +94,4 @@ impl Related<super::hosted_project::Entity> for Entity {
}
}
impl Related<super::dev_server_project::Entity> for Entity {
fn to() -> RelationDef {
Relation::RemoteProject.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View file

@ -540,18 +540,18 @@ async fn test_project_count(db: &Arc<Database>) {
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0);
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[], false, None)
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[], false)
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1);
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[], false, None)
db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[], false)
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2);
// Projects shared by admins aren't counted.
db.share_project(room_id, ConnectionId { owner_id, id: 0 }, &[], false, None)
db.share_project(room_id, ConnectionId { owner_id, id: 0 }, &[], false)
.await
.unwrap();
assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2);

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
use crate::db::{ChannelId, ChannelRole, DevServerId, PrincipalId, UserId};
use crate::db::{ChannelId, ChannelRole, UserId};
use anyhow::{anyhow, Result};
use collections::{BTreeMap, HashMap, HashSet};
use rpc::{proto, ConnectionId};
use rpc::ConnectionId;
use semantic_version::SemanticVersion;
use serde::Serialize;
use std::fmt;
@ -11,9 +11,7 @@ use tracing::instrument;
pub struct ConnectionPool {
connections: BTreeMap<ConnectionId, Connection>,
connected_users: BTreeMap<UserId, ConnectedPrincipal>,
connected_dev_servers: BTreeMap<DevServerId, ConnectionId>,
channels: ChannelPool,
offline_dev_servers: HashSet<DevServerId>,
}
#[derive(Default, Serialize)]
@ -32,13 +30,13 @@ impl fmt::Display for ZedVersion {
impl ZedVersion {
pub fn can_collaborate(&self) -> bool {
self.0 >= SemanticVersion::new(0, 151, 0)
self.0 >= SemanticVersion::new(0, 157, 0)
}
}
#[derive(Serialize)]
pub struct Connection {
pub principal_id: PrincipalId,
pub user_id: UserId,
pub admin: bool,
pub zed_version: ZedVersion,
}
@ -47,7 +45,6 @@ impl ConnectionPool {
pub fn reset(&mut self) {
self.connections.clear();
self.connected_users.clear();
self.connected_dev_servers.clear();
self.channels.clear();
}
@ -66,7 +63,7 @@ impl ConnectionPool {
self.connections.insert(
connection_id,
Connection {
principal_id: PrincipalId::UserId(user_id),
user_id,
admin,
zed_version,
},
@ -75,25 +72,6 @@ impl ConnectionPool {
connected_user.connection_ids.insert(connection_id);
}
pub fn add_dev_server(
&mut self,
connection_id: ConnectionId,
dev_server_id: DevServerId,
zed_version: ZedVersion,
) {
self.connections.insert(
connection_id,
Connection {
principal_id: PrincipalId::DevServerId(dev_server_id),
admin: false,
zed_version,
},
);
self.connected_dev_servers
.insert(dev_server_id, connection_id);
}
#[instrument(skip(self))]
pub fn remove_connection(&mut self, connection_id: ConnectionId) -> Result<()> {
let connection = self
@ -101,28 +79,18 @@ impl ConnectionPool {
.get_mut(&connection_id)
.ok_or_else(|| anyhow!("no such connection"))?;
match connection.principal_id {
PrincipalId::UserId(user_id) => {
let connected_user = self.connected_users.get_mut(&user_id).unwrap();
connected_user.connection_ids.remove(&connection_id);
if connected_user.connection_ids.is_empty() {
self.connected_users.remove(&user_id);
self.channels.remove_user(&user_id);
}
}
PrincipalId::DevServerId(dev_server_id) => {
self.connected_dev_servers.remove(&dev_server_id);
self.offline_dev_servers.remove(&dev_server_id);
}
}
let user_id = connection.user_id;
let connected_user = self.connected_users.get_mut(&user_id).unwrap();
connected_user.connection_ids.remove(&connection_id);
if connected_user.connection_ids.is_empty() {
self.connected_users.remove(&user_id);
self.channels.remove_user(&user_id);
};
self.connections.remove(&connection_id).unwrap();
Ok(())
}
pub fn set_dev_server_offline(&mut self, dev_server_id: DevServerId) {
self.offline_dev_servers.insert(dev_server_id);
}
pub fn connections(&self) -> impl Iterator<Item = &Connection> {
self.connections.values()
}
@ -147,42 +115,6 @@ impl ConnectionPool {
.copied()
}
pub fn dev_server_status(&self, dev_server_id: DevServerId) -> proto::DevServerStatus {
if self.dev_server_connection_id(dev_server_id).is_some()
&& !self.offline_dev_servers.contains(&dev_server_id)
{
proto::DevServerStatus::Online
} else {
proto::DevServerStatus::Offline
}
}
pub fn dev_server_connection_id(&self, dev_server_id: DevServerId) -> Option<ConnectionId> {
self.connected_dev_servers.get(&dev_server_id).copied()
}
pub fn online_dev_server_connection_id(
&self,
dev_server_id: DevServerId,
) -> Result<ConnectionId> {
match self.connected_dev_servers.get(&dev_server_id) {
Some(cid) => Ok(*cid),
None => Err(anyhow!(proto::ErrorCode::DevServerOffline)),
}
}
pub fn dev_server_connection_id_supporting(
&self,
dev_server_id: DevServerId,
required: ZedVersion,
) -> Result<ConnectionId> {
match self.connected_dev_servers.get(&dev_server_id) {
Some(cid) if self.connections[cid].zed_version >= required => Ok(*cid),
Some(_) => Err(anyhow!(proto::ErrorCode::RemoteUpgradeRequired)),
None => Err(anyhow!(proto::ErrorCode::DevServerOffline)),
}
}
pub fn channel_user_ids(
&self,
channel_id: ChannelId,
@ -227,39 +159,22 @@ impl ConnectionPool {
#[cfg(test)]
pub fn check_invariants(&self) {
for (connection_id, connection) in &self.connections {
match &connection.principal_id {
PrincipalId::UserId(user_id) => {
assert!(self
.connected_users
.get(user_id)
.unwrap()
.connection_ids
.contains(connection_id));
}
PrincipalId::DevServerId(dev_server_id) => {
assert_eq!(
self.connected_dev_servers.get(dev_server_id).unwrap(),
connection_id
);
}
}
assert!(self
.connected_users
.get(&connection.user_id)
.unwrap()
.connection_ids
.contains(connection_id));
}
for (user_id, state) in &self.connected_users {
for connection_id in &state.connection_ids {
assert_eq!(
self.connections.get(connection_id).unwrap().principal_id,
PrincipalId::UserId(*user_id)
self.connections.get(connection_id).unwrap().user_id,
*user_id
);
}
}
for (dev_server_id, connection_id) in &self.connected_dev_servers {
assert_eq!(
self.connections.get(connection_id).unwrap().principal_id,
PrincipalId::DevServerId(*dev_server_id)
);
}
}
}

View file

@ -8,7 +8,6 @@ mod channel_buffer_tests;
mod channel_guest_tests;
mod channel_message_tests;
mod channel_tests;
mod dev_server_tests;
mod editor_tests;
mod following_tests;
mod integration_tests;

View file

@ -1,643 +0,0 @@
use std::{path::Path, sync::Arc};
use call::ActiveCall;
use editor::Editor;
use fs::Fs;
use gpui::{TestAppContext, VisualTestContext, WindowHandle};
use rpc::{proto::DevServerStatus, ErrorCode, ErrorExt};
use serde_json::json;
use workspace::{AppState, Workspace};
use crate::tests::{following_tests::join_channel, TestServer};
use super::TestClient;
#[gpui::test]
async fn test_dev_server(cx: &mut gpui::TestAppContext, cx2: &mut gpui::TestAppContext) {
let (server, client) = TestServer::start1(cx).await;
let store = cx.update(|cx| dev_server_projects::Store::global(cx).clone());
let resp = store
.update(cx, |store, cx| {
store.create_dev_server("server-1".to_string(), None, cx)
})
.await
.unwrap();
store.update(cx, |store, _| {
assert_eq!(store.dev_servers().len(), 1);
assert_eq!(store.dev_servers()[0].name, "server-1");
assert_eq!(store.dev_servers()[0].status, DevServerStatus::Offline);
});
let dev_server = server.create_dev_server(resp.access_token, cx2).await;
cx.executor().run_until_parked();
store.update(cx, |store, _| {
assert_eq!(store.dev_servers()[0].status, DevServerStatus::Online);
});
dev_server
.fs()
.insert_tree(
"/remote",
json!({
"1.txt": "remote\nremote\nremote",
"2.js": "function two() { return 2; }",
"3.rs": "mod test",
}),
)
.await;
store
.update(cx, |store, cx| {
store.create_dev_server_project(
client::DevServerId(resp.dev_server_id),
"/remote".to_string(),
cx,
)
})
.await
.unwrap();
cx.executor().run_until_parked();
let remote_workspace = store
.update(cx, |store, cx| {
let projects = store.dev_server_projects();
assert_eq!(projects.len(), 1);
assert_eq!(projects[0].paths, vec!["/remote"]);
workspace::join_dev_server_project(
projects[0].id,
projects[0].project_id.unwrap(),
client.app_state.clone(),
None,
cx,
)
})
.await
.unwrap();
cx.executor().run_until_parked();
let cx = VisualTestContext::from_window(remote_workspace.into(), cx).as_mut();
cx.simulate_keystrokes("cmd-p 1 enter");
let editor = remote_workspace
.update(cx, |ws, cx| {
ws.active_item_as::<Editor>(cx).unwrap().clone()
})
.unwrap();
editor.update(cx, |ed, cx| {
assert_eq!(ed.text(cx).to_string(), "remote\nremote\nremote");
});
cx.simulate_input("wow!");
cx.simulate_keystrokes("cmd-s");
let content = dev_server
.fs()
.load(Path::new("/remote/1.txt"))
.await
.unwrap();
assert_eq!(content, "wow!remote\nremote\nremote\n");
}
#[gpui::test]
async fn test_dev_server_env_files(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (server, client1, client2, channel_id) = TestServer::start2(cx1, cx2).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.executor().run_until_parked();
let cx1 = VisualTestContext::from_window(remote_workspace.into(), cx1).as_mut();
cx1.simulate_keystrokes("cmd-p . e enter");
let editor = remote_workspace
.update(cx1, |ws, cx| {
ws.active_item_as::<Editor>(cx).unwrap().clone()
})
.unwrap();
editor.update(cx1, |ed, cx| {
assert_eq!(ed.text(cx).to_string(), "SECRET");
});
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
join_channel(channel_id, &client2, cx2).await.unwrap();
cx2.executor().run_until_parked();
let (workspace2, cx2) = client2.active_workspace(cx2);
let editor = workspace2.update(cx2, |ws, cx| {
ws.active_item_as::<Editor>(cx).unwrap().clone()
});
// TODO: it'd be nice to hide .env files from other people
editor.update(cx2, |ed, cx| {
assert_eq!(ed.text(cx).to_string(), "SECRET");
});
}
async fn create_dev_server_project(
server: &TestServer,
client_app_state: Arc<AppState>,
cx: &mut TestAppContext,
cx_devserver: &mut TestAppContext,
) -> (TestClient, WindowHandle<Workspace>) {
let store = cx.update(|cx| dev_server_projects::Store::global(cx).clone());
let resp = store
.update(cx, |store, cx| {
store.create_dev_server("server-1".to_string(), None, cx)
})
.await
.unwrap();
let dev_server = server
.create_dev_server(resp.access_token, cx_devserver)
.await;
cx.executor().run_until_parked();
dev_server
.fs()
.insert_tree(
"/remote",
json!({
"1.txt": "remote\nremote\nremote",
".env": "SECRET",
}),
)
.await;
store
.update(cx, |store, cx| {
store.create_dev_server_project(
client::DevServerId(resp.dev_server_id),
"/remote".to_string(),
cx,
)
})
.await
.unwrap();
cx.executor().run_until_parked();
let workspace = store
.update(cx, |store, cx| {
let projects = store.dev_server_projects();
assert_eq!(projects.len(), 1);
assert_eq!(projects[0].paths, vec!["/remote"]);
workspace::join_dev_server_project(
projects[0].id,
projects[0].project_id.unwrap(),
client_app_state,
None,
cx,
)
})
.await
.unwrap();
cx.executor().run_until_parked();
(dev_server, workspace)
}
#[gpui::test]
async fn test_dev_server_leave_room(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (server, client1, client2, channel_id) = TestServer::start2(cx1, cx2).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
join_channel(channel_id, &client2, cx2).await.unwrap();
cx2.executor().run_until_parked();
cx1.update(|cx| ActiveCall::global(cx).update(cx, |active_call, cx| active_call.hang_up(cx)))
.await
.unwrap();
cx1.executor().run_until_parked();
let (workspace, cx2) = client2.active_workspace(cx2);
cx2.update(|cx| assert!(workspace.read(cx).project().read(cx).is_disconnected(cx)));
}
#[gpui::test]
async fn test_dev_server_delete(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (server, client1, client2, channel_id) = TestServer::start2(cx1, cx2).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
join_channel(channel_id, &client2, cx2).await.unwrap();
cx2.executor().run_until_parked();
cx1.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, cx| {
store.delete_dev_server_project(store.dev_server_projects().first().unwrap().id, cx)
})
})
.await
.unwrap();
cx1.executor().run_until_parked();
let (workspace, cx2) = client2.active_workspace(cx2);
cx2.update(|cx| assert!(workspace.read(cx).project().read(cx).is_disconnected(cx)));
cx1.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, _| {
assert_eq!(store.dev_server_projects().len(), 0);
})
})
}
#[gpui::test]
async fn test_dev_server_rename(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (server, client1, client2, channel_id) = TestServer::start2(cx1, cx2).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
join_channel(channel_id, &client2, cx2).await.unwrap();
cx2.executor().run_until_parked();
cx1.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, cx| {
store.rename_dev_server(
store.dev_servers().first().unwrap().id,
"name-edited".to_string(),
None,
cx,
)
})
})
.await
.unwrap();
cx1.executor().run_until_parked();
cx1.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, _| {
assert_eq!(store.dev_servers().first().unwrap().name, "name-edited");
})
})
}
#[gpui::test]
async fn test_dev_server_refresh_access_token(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
cx4: &mut gpui::TestAppContext,
) {
let (server, client1, client2, channel_id) = TestServer::start2(cx1, cx2).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
join_channel(channel_id, &client2, cx2).await.unwrap();
cx2.executor().run_until_parked();
// Regenerate the access token
let new_token_response = cx1
.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, cx| {
store.regenerate_dev_server_token(store.dev_servers().first().unwrap().id, cx)
})
})
.await
.unwrap();
cx1.executor().run_until_parked();
// Assert that the other client was disconnected
let (workspace, cx2) = client2.active_workspace(cx2);
cx2.update(|cx| assert!(workspace.read(cx).project().read(cx).is_disconnected(cx)));
// Assert that the owner of the dev server does not see the dev server as online anymore
let (workspace, cx1) = client1.active_workspace(cx1);
cx1.update(|cx| {
assert!(workspace.read(cx).project().read(cx).is_disconnected(cx));
dev_server_projects::Store::global(cx).update(cx, |store, _| {
assert_eq!(
store.dev_servers().first().unwrap().status,
DevServerStatus::Offline
);
})
});
// Reconnect the dev server with the new token
let _dev_server = server
.create_dev_server(new_token_response.access_token, cx4)
.await;
cx1.executor().run_until_parked();
// Assert that the dev server is online again
cx1.update(|cx| {
dev_server_projects::Store::global(cx).update(cx, |store, _| {
assert_eq!(store.dev_servers().len(), 1);
assert_eq!(
store.dev_servers().first().unwrap().status,
DevServerStatus::Online
);
})
});
}
#[gpui::test]
async fn test_dev_server_reconnect(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (mut server, client1) = TestServer::start1(cx1).await;
let channel_id = server
.make_channel("test", None, (&client1, cx1), &mut [])
.await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx3).await;
cx1.update(|cx| {
workspace::join_channel(
channel_id,
client1.app_state.clone(),
Some(remote_workspace),
cx,
)
})
.await
.unwrap();
cx1.executor().run_until_parked();
remote_workspace
.update(cx1, |ws, cx| {
assert!(ws.project().read(cx).is_shared());
})
.unwrap();
drop(client1);
let client2 = server.create_client(cx2, "user_a").await;
let store = cx2.update(|cx| dev_server_projects::Store::global(cx).clone());
store
.update(cx2, |store, cx| {
let projects = store.dev_server_projects();
workspace::join_dev_server_project(
projects[0].id,
projects[0].project_id.unwrap(),
client2.app_state.clone(),
None,
cx,
)
})
.await
.unwrap();
}
#[gpui::test]
async fn test_dev_server_restart(cx1: &mut gpui::TestAppContext, cx2: &mut gpui::TestAppContext) {
let (server, client1) = TestServer::start1(cx1).await;
let (_dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx2).await;
let cx = VisualTestContext::from_window(remote_workspace.into(), cx1).as_mut();
server.reset().await;
cx.run_until_parked();
cx.simulate_keystrokes("cmd-p 1 enter");
remote_workspace
.update(cx, |ws, cx| {
ws.active_item_as::<Editor>(cx)
.unwrap()
.update(cx, |ed, cx| {
assert_eq!(ed.text(cx).to_string(), "remote\nremote\nremote");
})
})
.unwrap();
}
#[gpui::test]
async fn test_create_dev_server_project_path_validation(
cx1: &mut gpui::TestAppContext,
cx2: &mut gpui::TestAppContext,
cx3: &mut gpui::TestAppContext,
) {
let (server, client1) = TestServer::start1(cx1).await;
let _channel_id = server
.make_channel("test", None, (&client1, cx1), &mut [])
.await;
// Creating a project with a path that does exist should not fail
let (_dev_server, _) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx2).await;
cx1.executor().run_until_parked();
let store = cx1.update(|cx| dev_server_projects::Store::global(cx).clone());
let resp = store
.update(cx1, |store, cx| {
store.create_dev_server("server-2".to_string(), None, cx)
})
.await
.unwrap();
cx1.executor().run_until_parked();
let _dev_server = server.create_dev_server(resp.access_token, cx3).await;
cx1.executor().run_until_parked();
// Creating a remote project with a path that does not exist should fail
let result = store
.update(cx1, |store, cx| {
store.create_dev_server_project(
client::DevServerId(resp.dev_server_id),
"/notfound".to_string(),
cx,
)
})
.await;
cx1.executor().run_until_parked();
let error = result.unwrap_err();
assert!(matches!(
error.error_code(),
ErrorCode::DevServerProjectPathDoesNotExist
));
}
#[gpui::test]
async fn test_save_as_remote(cx1: &mut gpui::TestAppContext, cx2: &mut gpui::TestAppContext) {
let (server, client1) = TestServer::start1(cx1).await;
// Creating a project with a path that does exist should not fail
let (dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx2).await;
let mut cx = VisualTestContext::from_window(remote_workspace.into(), cx1);
cx.simulate_keystrokes("cmd-p 1 enter");
cx.simulate_keystrokes("cmd-shift-s");
cx.simulate_input("2.txt");
cx.simulate_keystrokes("enter");
cx.executor().run_until_parked();
let title = remote_workspace
.update(&mut cx, |ws, cx| {
let active_item = ws.active_item(cx).unwrap();
active_item.tab_description(0, cx).unwrap()
})
.unwrap();
assert_eq!(title, "2.txt");
let path = Path::new("/remote/2.txt");
assert_eq!(
dev_server.fs().load(path).await.unwrap(),
"remote\nremote\nremote"
);
}
#[gpui::test]
async fn test_new_file_remote(cx1: &mut gpui::TestAppContext, cx2: &mut gpui::TestAppContext) {
let (server, client1) = TestServer::start1(cx1).await;
// Creating a project with a path that does exist should not fail
let (dev_server, remote_workspace) =
create_dev_server_project(&server, client1.app_state.clone(), cx1, cx2).await;
let mut cx = VisualTestContext::from_window(remote_workspace.into(), cx1);
cx.simulate_keystrokes("cmd-n");
cx.simulate_input("new!");
cx.simulate_keystrokes("cmd-shift-s");
cx.simulate_input("2.txt");
cx.simulate_keystrokes("enter");
cx.executor().run_until_parked();
let title = remote_workspace
.update(&mut cx, |ws, cx| {
ws.active_item(cx).unwrap().tab_description(0, cx).unwrap()
})
.unwrap();
assert_eq!(title, "2.txt");
let path = Path::new("/remote/2.txt");
assert_eq!(dev_server.fs().load(path).await.unwrap(), "new!");
}

View file

@ -1,5 +1,4 @@
use crate::{
auth::split_dev_server_token,
db::{tests::TestDb, NewUserParams, UserId},
executor::Executor,
rpc::{Principal, Server, ZedVersion, CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
@ -204,7 +203,7 @@ impl TestServer {
.override_authenticate(move |cx| {
cx.spawn(|_| async move {
let access_token = "the-token".to_string();
Ok(Credentials::User {
Ok(Credentials {
user_id: user_id.to_proto(),
access_token,
})
@ -213,7 +212,7 @@ impl TestServer {
.override_establish_connection(move |credentials, cx| {
assert_eq!(
credentials,
&Credentials::User {
&Credentials {
user_id: user_id.0 as u64,
access_token: "the-token".into()
}
@ -297,7 +296,6 @@ impl TestServer {
collab_ui::init(&app_state, cx);
file_finder::init(cx);
menu::init();
dev_server_projects::init(client.clone(), cx);
settings::KeymapFile::load_asset(os_keymap, cx).unwrap();
language_model::LanguageModelRegistry::test(cx);
assistant::context_store::init(&client.clone().into());
@ -319,135 +317,6 @@ impl TestServer {
client
}
pub async fn create_dev_server(
&self,
access_token: String,
cx: &mut TestAppContext,
) -> TestClient {
cx.update(|cx| {
if cx.has_global::<SettingsStore>() {
panic!("Same cx used to create two test clients")
}
let settings = SettingsStore::test(cx);
cx.set_global(settings);
release_channel::init(SemanticVersion::default(), cx);
client::init_settings(cx);
});
let (dev_server_id, _) = split_dev_server_token(&access_token).unwrap();
let clock = Arc::new(FakeSystemClock::default());
let http = FakeHttpClient::with_404_response();
let mut client = cx.update(|cx| Client::new(clock, http.clone(), cx));
let server = self.server.clone();
let db = self.app_state.db.clone();
let connection_killers = self.connection_killers.clone();
let forbid_connections = self.forbid_connections.clone();
Arc::get_mut(&mut client)
.unwrap()
.set_id(1)
.set_dev_server_token(client::DevServerToken(access_token.clone()))
.override_establish_connection(move |credentials, cx| {
assert_eq!(
credentials,
&Credentials::DevServer {
token: client::DevServerToken(access_token.to_string())
}
);
let server = server.clone();
let db = db.clone();
let connection_killers = connection_killers.clone();
let forbid_connections = forbid_connections.clone();
cx.spawn(move |cx| async move {
if forbid_connections.load(SeqCst) {
Err(EstablishConnectionError::other(anyhow!(
"server is forbidding connections"
)))
} else {
let (client_conn, server_conn, killed) =
Connection::in_memory(cx.background_executor().clone());
let (connection_id_tx, connection_id_rx) = oneshot::channel();
let dev_server = db
.get_dev_server(dev_server_id)
.await
.expect("retrieving dev_server failed");
cx.background_executor()
.spawn(server.handle_connection(
server_conn,
"dev-server".to_string(),
Principal::DevServer(dev_server),
ZedVersion(SemanticVersion::new(1, 0, 0)),
None,
Some(connection_id_tx),
Executor::Deterministic(cx.background_executor().clone()),
))
.detach();
let connection_id = connection_id_rx.await.map_err(|e| {
EstablishConnectionError::Other(anyhow!(
"{} (is server shutting down?)",
e
))
})?;
connection_killers
.lock()
.insert(connection_id.into(), killed);
Ok(client_conn)
}
})
});
let fs = FakeFs::new(cx.executor());
let user_store = cx.new_model(|cx| UserStore::new(client.clone(), cx));
let workspace_store = cx.new_model(|cx| WorkspaceStore::new(client.clone(), cx));
let language_registry = Arc::new(LanguageRegistry::test(cx.executor()));
let session = cx.new_model(|cx| AppSession::new(Session::test(), cx));
let app_state = Arc::new(workspace::AppState {
client: client.clone(),
user_store: user_store.clone(),
workspace_store,
languages: language_registry,
fs: fs.clone(),
build_window_options: |_, _| Default::default(),
node_runtime: NodeRuntime::unavailable(),
session,
});
cx.update(|cx| {
theme::init(theme::LoadThemes::JustBase, cx);
Project::init(&client, cx);
client::init(&client, cx);
language::init(cx);
editor::init(cx);
workspace::init(app_state.clone(), cx);
call::init(client.clone(), user_store.clone(), cx);
channel::init(&client, user_store.clone(), cx);
notifications::init(client.clone(), user_store, cx);
collab_ui::init(&app_state, cx);
file_finder::init(cx);
menu::init();
headless::init(
client.clone(),
headless::AppState {
languages: app_state.languages.clone(),
user_store: app_state.user_store.clone(),
fs: fs.clone(),
node_runtime: app_state.node_runtime.clone(),
},
cx,
)
})
.await
.unwrap();
TestClient {
app_state,
username: "dev-server".to_string(),
channel_store: cx.read(ChannelStore::global).clone(),
notification_store: cx.read(NotificationStore::global).clone(),
state: Default::default(),
}
}
pub fn disconnect_client(&self, peer_id: PeerId) {
self.connection_killers
.lock()