Get zed.dev working with new collab backend
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
This commit is contained in:
parent
be040b60b7
commit
2adb9fe472
10 changed files with 6632 additions and 6624 deletions
29
Cargo.lock
generated
29
Cargo.lock
generated
|
@ -463,9 +463,11 @@ checksum = "f523b4e98ba6897ae90994bc18423d9877c54f9047b06a00ddc8122a957b1c70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum-core",
|
"axum-core",
|
||||||
|
"base64 0.13.0",
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"bytes 1.0.1",
|
"bytes 1.0.1",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
|
"headers",
|
||||||
"http",
|
"http",
|
||||||
"http-body",
|
"http-body",
|
||||||
"hyper",
|
"hyper",
|
||||||
|
@ -478,8 +480,10 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
|
"sha-1 0.10.0",
|
||||||
"sync_wrapper",
|
"sync_wrapper",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-tungstenite",
|
||||||
"tower",
|
"tower",
|
||||||
"tower-http",
|
"tower-http",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
|
@ -2111,6 +2115,31 @@ dependencies = [
|
||||||
"hashbrown 0.11.2",
|
"hashbrown 0.11.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "headers"
|
||||||
|
version = "0.3.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d"
|
||||||
|
dependencies = [
|
||||||
|
"base64 0.13.0",
|
||||||
|
"bitflags",
|
||||||
|
"bytes 1.0.1",
|
||||||
|
"headers-core",
|
||||||
|
"http",
|
||||||
|
"httpdate",
|
||||||
|
"mime",
|
||||||
|
"sha-1 0.10.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "headers-core"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
||||||
|
dependencies = [
|
||||||
|
"http",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
version = "0.3.3"
|
version = "0.3.3"
|
||||||
|
|
|
@ -11,7 +11,7 @@ use async_tungstenite::tungstenite::{
|
||||||
error::Error as WebsocketError,
|
error::Error as WebsocketError,
|
||||||
http::{Request, StatusCode},
|
http::{Request, StatusCode},
|
||||||
};
|
};
|
||||||
use futures::{future::LocalBoxFuture, FutureExt, StreamExt};
|
use futures::{future::LocalBoxFuture, FutureExt, SinkExt, StreamExt, TryStreamExt};
|
||||||
use gpui::{
|
use gpui::{
|
||||||
actions, AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AsyncAppContext,
|
actions, AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AsyncAppContext,
|
||||||
Entity, ModelContext, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle,
|
Entity, ModelContext, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle,
|
||||||
|
@ -774,7 +774,7 @@ impl Client {
|
||||||
"Authorization",
|
"Authorization",
|
||||||
format!("{} {}", credentials.user_id, credentials.access_token),
|
format!("{} {}", credentials.user_id, credentials.access_token),
|
||||||
)
|
)
|
||||||
.header("X-Zed-Protocol-Version", rpc::PROTOCOL_VERSION);
|
.header("x-zed-protocol-version", rpc::PROTOCOL_VERSION);
|
||||||
|
|
||||||
let http = self.http.clone();
|
let http = self.http.clone();
|
||||||
cx.background().spawn(async move {
|
cx.background().spawn(async move {
|
||||||
|
@ -817,13 +817,21 @@ impl Client {
|
||||||
let request = request.uri(rpc_url.as_str()).body(())?;
|
let request = request.uri(rpc_url.as_str()).body(())?;
|
||||||
let (stream, _) =
|
let (stream, _) =
|
||||||
async_tungstenite::async_tls::client_async_tls(request, stream).await?;
|
async_tungstenite::async_tls::client_async_tls(request, stream).await?;
|
||||||
Ok(Connection::new(stream))
|
Ok(Connection::new(
|
||||||
|
stream
|
||||||
|
.map_err(|error| anyhow!(error))
|
||||||
|
.sink_map_err(|error| anyhow!(error)),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
"http" => {
|
"http" => {
|
||||||
rpc_url.set_scheme("ws").unwrap();
|
rpc_url.set_scheme("ws").unwrap();
|
||||||
let request = request.uri(rpc_url.as_str()).body(())?;
|
let request = request.uri(rpc_url.as_str()).body(())?;
|
||||||
let (stream, _) = async_tungstenite::client_async(request, stream).await?;
|
let (stream, _) = async_tungstenite::client_async(request, stream).await?;
|
||||||
Ok(Connection::new(stream))
|
Ok(Connection::new(
|
||||||
|
stream
|
||||||
|
.map_err(|error| anyhow!(error))
|
||||||
|
.sink_map_err(|error| anyhow!(error)),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
_ => Err(anyhow!("invalid rpc url: {}", rpc_url))?,
|
_ => Err(anyhow!("invalid rpc url: {}", rpc_url))?,
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,12 +20,13 @@ util = { path = "../util" }
|
||||||
anyhow = "1.0.40"
|
anyhow = "1.0.40"
|
||||||
async-trait = "0.1.50"
|
async-trait = "0.1.50"
|
||||||
async-tungstenite = "0.16"
|
async-tungstenite = "0.16"
|
||||||
axum = { version = "0.5", features = ["json"] }
|
axum = { version = "0.5", features = ["json", "headers", "ws"] }
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
envy = "0.4.2"
|
envy = "0.4.2"
|
||||||
env_logger = "0.8"
|
env_logger = "0.8"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
json_env_logger = "0.1"
|
json_env_logger = "0.1"
|
||||||
|
lazy_static = "1.4"
|
||||||
lipsum = { version = "0.8", optional = true }
|
lipsum = { version = "0.8", optional = true }
|
||||||
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
||||||
parking_lot = "0.11.1"
|
parking_lot = "0.11.1"
|
||||||
|
|
|
@ -20,9 +20,11 @@ use tower::ServiceBuilder;
|
||||||
pub fn routes(state: Arc<AppState>) -> Router<Body> {
|
pub fn routes(state: Arc<AppState>) -> Router<Body> {
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/users", get(get_users).post(create_user))
|
.route("/users", get(get_users).post(create_user))
|
||||||
.route("/users/:id", put(update_user).delete(destroy_user))
|
.route(
|
||||||
.route("/users/:gh_login", get(get_user))
|
"/users/:id",
|
||||||
.route("/users/:gh_login/access_tokens", post(create_access_token))
|
put(update_user).delete(destroy_user).get(get_user),
|
||||||
|
)
|
||||||
|
.route("/users/:id/access_tokens", post(create_access_token))
|
||||||
.layer(
|
.layer(
|
||||||
ServiceBuilder::new()
|
ServiceBuilder::new()
|
||||||
.layer(Extension(state))
|
.layer(Extension(state))
|
||||||
|
|
|
@ -14,7 +14,7 @@ use scrypt::{
|
||||||
Scrypt,
|
Scrypt,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub async fn validate_header<B>(req: Request<B>, next: Next<B>) -> impl IntoResponse {
|
pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
|
||||||
let mut auth_header = req
|
let mut auth_header = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(http::header::AUTHORIZATION)
|
.get(http::header::AUTHORIZATION)
|
||||||
|
@ -50,14 +50,15 @@ pub async fn validate_header<B>(req: Request<B>, next: Next<B>) -> impl IntoResp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !credentials_valid {
|
if credentials_valid {
|
||||||
|
req.extensions_mut().insert(user_id);
|
||||||
|
Ok::<_, Error>(next.run(req).await)
|
||||||
|
} else {
|
||||||
Err(Error::Http(
|
Err(Error::Http(
|
||||||
StatusCode::UNAUTHORIZED,
|
StatusCode::UNAUTHORIZED,
|
||||||
"invalid credentials".to_string(),
|
"invalid credentials".to_string(),
|
||||||
))?;
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok::<_, Error>(next.run(req).await)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
|
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
|
||||||
|
|
|
@ -1,26 +1,10 @@
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures::executor::block_on;
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
pub use sqlx::postgres::PgPoolOptions as DbOptions;
|
pub use sqlx::postgres::PgPoolOptions as DbOptions;
|
||||||
use sqlx::{types::Uuid, FromRow};
|
use sqlx::{types::Uuid, FromRow};
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use tokio::task::yield_now;
|
|
||||||
|
|
||||||
macro_rules! test_support {
|
|
||||||
($self:ident, { $($token:tt)* }) => {{
|
|
||||||
let body = async {
|
|
||||||
$($token)*
|
|
||||||
};
|
|
||||||
if $self.test_mode {
|
|
||||||
yield_now().await;
|
|
||||||
block_on(body)
|
|
||||||
} else {
|
|
||||||
body.await
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Db: Send + Sync {
|
pub trait Db: Send + Sync {
|
||||||
|
@ -78,7 +62,6 @@ pub trait Db: Send + Sync {
|
||||||
|
|
||||||
pub struct PostgresDb {
|
pub struct PostgresDb {
|
||||||
pool: sqlx::PgPool,
|
pool: sqlx::PgPool,
|
||||||
test_mode: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PostgresDb {
|
impl PostgresDb {
|
||||||
|
@ -88,10 +71,7 @@ impl PostgresDb {
|
||||||
.connect(url)
|
.connect(url)
|
||||||
.await
|
.await
|
||||||
.context("failed to connect to postgres database")?;
|
.context("failed to connect to postgres database")?;
|
||||||
Ok(Self {
|
Ok(Self { pool })
|
||||||
pool,
|
|
||||||
test_mode: false,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,27 +80,23 @@ impl Db for PostgresDb {
|
||||||
// users
|
// users
|
||||||
|
|
||||||
async fn create_user(&self, github_login: &str, admin: bool) -> Result<UserId> {
|
async fn create_user(&self, github_login: &str, admin: bool) -> Result<UserId> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO users (github_login, admin)
|
INSERT INTO users (github_login, admin)
|
||||||
VALUES ($1, $2)
|
VALUES ($1, $2)
|
||||||
ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login
|
ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login
|
||||||
RETURNING id
|
RETURNING id
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar(query)
|
Ok(sqlx::query_scalar(query)
|
||||||
.bind(github_login)
|
.bind(github_login)
|
||||||
.bind(admin)
|
.bind(admin)
|
||||||
.fetch_one(&self.pool)
|
.fetch_one(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(UserId)?)
|
.map(UserId)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_all_users(&self) -> Result<Vec<User>> {
|
async fn get_all_users(&self) -> Result<Vec<User>> {
|
||||||
test_support!(self, {
|
let query = "SELECT * FROM users ORDER BY github_login ASC";
|
||||||
let query = "SELECT * FROM users ORDER BY github_login ASC";
|
Ok(sqlx::query_as(query).fetch_all(&self.pool).await?)
|
||||||
Ok(sqlx::query_as(query).fetch_all(&self.pool).await?)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_user_by_id(&self, id: UserId) -> Result<Option<User>> {
|
async fn get_user_by_id(&self, id: UserId) -> Result<Option<User>> {
|
||||||
|
@ -130,57 +106,49 @@ impl Db for PostgresDb {
|
||||||
|
|
||||||
async fn get_users_by_ids(&self, ids: Vec<UserId>) -> Result<Vec<User>> {
|
async fn get_users_by_ids(&self, ids: Vec<UserId>) -> Result<Vec<User>> {
|
||||||
let ids = ids.into_iter().map(|id| id.0).collect::<Vec<_>>();
|
let ids = ids.into_iter().map(|id| id.0).collect::<Vec<_>>();
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT users.*
|
SELECT users.*
|
||||||
FROM users
|
FROM users
|
||||||
WHERE users.id = ANY ($1)
|
WHERE users.id = ANY ($1)
|
||||||
";
|
";
|
||||||
|
|
||||||
Ok(sqlx::query_as(query)
|
Ok(sqlx::query_as(query)
|
||||||
.bind(&ids)
|
.bind(&ids)
|
||||||
.fetch_all(&self.pool)
|
.fetch_all(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
|
async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
|
||||||
test_support!(self, {
|
let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1";
|
||||||
let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1";
|
Ok(sqlx::query_as(query)
|
||||||
Ok(sqlx::query_as(query)
|
.bind(github_login)
|
||||||
.bind(github_login)
|
.fetch_optional(&self.pool)
|
||||||
.fetch_optional(&self.pool)
|
.await?)
|
||||||
.await?)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> {
|
async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> {
|
||||||
test_support!(self, {
|
let query = "UPDATE users SET admin = $1 WHERE id = $2";
|
||||||
let query = "UPDATE users SET admin = $1 WHERE id = $2";
|
Ok(sqlx::query(query)
|
||||||
Ok(sqlx::query(query)
|
.bind(is_admin)
|
||||||
.bind(is_admin)
|
.bind(id.0)
|
||||||
.bind(id.0)
|
.execute(&self.pool)
|
||||||
.execute(&self.pool)
|
.await
|
||||||
.await
|
.map(drop)?)
|
||||||
.map(drop)?)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn destroy_user(&self, id: UserId) -> Result<()> {
|
async fn destroy_user(&self, id: UserId) -> Result<()> {
|
||||||
test_support!(self, {
|
let query = "DELETE FROM access_tokens WHERE user_id = $1;";
|
||||||
let query = "DELETE FROM access_tokens WHERE user_id = $1;";
|
sqlx::query(query)
|
||||||
sqlx::query(query)
|
.bind(id.0)
|
||||||
.bind(id.0)
|
.execute(&self.pool)
|
||||||
.execute(&self.pool)
|
.await
|
||||||
.await
|
.map(drop)?;
|
||||||
.map(drop)?;
|
let query = "DELETE FROM users WHERE id = $1;";
|
||||||
let query = "DELETE FROM users WHERE id = $1;";
|
Ok(sqlx::query(query)
|
||||||
Ok(sqlx::query(query)
|
.bind(id.0)
|
||||||
.bind(id.0)
|
.execute(&self.pool)
|
||||||
.execute(&self.pool)
|
.await
|
||||||
.await
|
.map(drop)?)
|
||||||
.map(drop)?)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// access tokens
|
// access tokens
|
||||||
|
@ -191,12 +159,11 @@ impl Db for PostgresDb {
|
||||||
access_token_hash: &str,
|
access_token_hash: &str,
|
||||||
max_access_token_count: usize,
|
max_access_token_count: usize,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
test_support!(self, {
|
let insert_query = "
|
||||||
let insert_query = "
|
|
||||||
INSERT INTO access_tokens (user_id, hash)
|
INSERT INTO access_tokens (user_id, hash)
|
||||||
VALUES ($1, $2);
|
VALUES ($1, $2);
|
||||||
";
|
";
|
||||||
let cleanup_query = "
|
let cleanup_query = "
|
||||||
DELETE FROM access_tokens
|
DELETE FROM access_tokens
|
||||||
WHERE id IN (
|
WHERE id IN (
|
||||||
SELECT id from access_tokens
|
SELECT id from access_tokens
|
||||||
|
@ -206,35 +173,32 @@ impl Db for PostgresDb {
|
||||||
)
|
)
|
||||||
";
|
";
|
||||||
|
|
||||||
let mut tx = self.pool.begin().await?;
|
let mut tx = self.pool.begin().await?;
|
||||||
sqlx::query(insert_query)
|
sqlx::query(insert_query)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.bind(access_token_hash)
|
.bind(access_token_hash)
|
||||||
.execute(&mut tx)
|
.execute(&mut tx)
|
||||||
.await?;
|
.await?;
|
||||||
sqlx::query(cleanup_query)
|
sqlx::query(cleanup_query)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.bind(access_token_hash)
|
.bind(access_token_hash)
|
||||||
.bind(max_access_token_count as u32)
|
.bind(max_access_token_count as u32)
|
||||||
.execute(&mut tx)
|
.execute(&mut tx)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(tx.commit().await?)
|
Ok(tx.commit().await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_access_token_hashes(&self, user_id: UserId) -> Result<Vec<String>> {
|
async fn get_access_token_hashes(&self, user_id: UserId) -> Result<Vec<String>> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT hash
|
SELECT hash
|
||||||
FROM access_tokens
|
FROM access_tokens
|
||||||
WHERE user_id = $1
|
WHERE user_id = $1
|
||||||
ORDER BY id DESC
|
ORDER BY id DESC
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar(query)
|
Ok(sqlx::query_scalar(query)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.fetch_all(&self.pool)
|
.fetch_all(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// orgs
|
// orgs
|
||||||
|
@ -242,94 +206,83 @@ impl Db for PostgresDb {
|
||||||
#[allow(unused)] // Help rust-analyzer
|
#[allow(unused)] // Help rust-analyzer
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
async fn find_org_by_slug(&self, slug: &str) -> Result<Option<Org>> {
|
async fn find_org_by_slug(&self, slug: &str) -> Result<Option<Org>> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM orgs
|
FROM orgs
|
||||||
WHERE slug = $1
|
WHERE slug = $1
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_as(query)
|
Ok(sqlx::query_as(query)
|
||||||
.bind(slug)
|
.bind(slug)
|
||||||
.fetch_optional(&self.pool)
|
.fetch_optional(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
async fn create_org(&self, name: &str, slug: &str) -> Result<OrgId> {
|
async fn create_org(&self, name: &str, slug: &str) -> Result<OrgId> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO orgs (name, slug)
|
INSERT INTO orgs (name, slug)
|
||||||
VALUES ($1, $2)
|
VALUES ($1, $2)
|
||||||
RETURNING id
|
RETURNING id
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar(query)
|
Ok(sqlx::query_scalar(query)
|
||||||
.bind(name)
|
.bind(name)
|
||||||
.bind(slug)
|
.bind(slug)
|
||||||
.fetch_one(&self.pool)
|
.fetch_one(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(OrgId)?)
|
.map(OrgId)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
async fn add_org_member(&self, org_id: OrgId, user_id: UserId, is_admin: bool) -> Result<()> {
|
async fn add_org_member(&self, org_id: OrgId, user_id: UserId, is_admin: bool) -> Result<()> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO org_memberships (org_id, user_id, admin)
|
INSERT INTO org_memberships (org_id, user_id, admin)
|
||||||
VALUES ($1, $2, $3)
|
VALUES ($1, $2, $3)
|
||||||
ON CONFLICT DO NOTHING
|
ON CONFLICT DO NOTHING
|
||||||
";
|
";
|
||||||
Ok(sqlx::query(query)
|
Ok(sqlx::query(query)
|
||||||
.bind(org_id.0)
|
.bind(org_id.0)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.bind(is_admin)
|
.bind(is_admin)
|
||||||
.execute(&self.pool)
|
.execute(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(drop)?)
|
.map(drop)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// channels
|
// channels
|
||||||
|
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
async fn create_org_channel(&self, org_id: OrgId, name: &str) -> Result<ChannelId> {
|
async fn create_org_channel(&self, org_id: OrgId, name: &str) -> Result<ChannelId> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO channels (owner_id, owner_is_user, name)
|
INSERT INTO channels (owner_id, owner_is_user, name)
|
||||||
VALUES ($1, false, $2)
|
VALUES ($1, false, $2)
|
||||||
RETURNING id
|
RETURNING id
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar(query)
|
Ok(sqlx::query_scalar(query)
|
||||||
.bind(org_id.0)
|
.bind(org_id.0)
|
||||||
.bind(name)
|
.bind(name)
|
||||||
.fetch_one(&self.pool)
|
.fetch_one(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(ChannelId)?)
|
.map(ChannelId)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)] // Help rust-analyzer
|
#[allow(unused)] // Help rust-analyzer
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
async fn get_org_channels(&self, org_id: OrgId) -> Result<Vec<Channel>> {
|
async fn get_org_channels(&self, org_id: OrgId) -> Result<Vec<Channel>> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM channels
|
FROM channels
|
||||||
WHERE
|
WHERE
|
||||||
channels.owner_is_user = false AND
|
channels.owner_is_user = false AND
|
||||||
channels.owner_id = $1
|
channels.owner_id = $1
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_as(query)
|
Ok(sqlx::query_as(query)
|
||||||
.bind(org_id.0)
|
.bind(org_id.0)
|
||||||
.fetch_all(&self.pool)
|
.fetch_all(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_accessible_channels(&self, user_id: UserId) -> Result<Vec<Channel>> {
|
async fn get_accessible_channels(&self, user_id: UserId) -> Result<Vec<Channel>> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT
|
SELECT
|
||||||
channels.*
|
channels.*
|
||||||
FROM
|
FROM
|
||||||
|
@ -338,11 +291,10 @@ impl Db for PostgresDb {
|
||||||
channel_memberships.user_id = $1 AND
|
channel_memberships.user_id = $1 AND
|
||||||
channel_memberships.channel_id = channels.id
|
channel_memberships.channel_id = channels.id
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_as(query)
|
Ok(sqlx::query_as(query)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.fetch_all(&self.pool)
|
.fetch_all(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn can_user_access_channel(
|
async fn can_user_access_channel(
|
||||||
|
@ -350,20 +302,18 @@ impl Db for PostgresDb {
|
||||||
user_id: UserId,
|
user_id: UserId,
|
||||||
channel_id: ChannelId,
|
channel_id: ChannelId,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT id
|
SELECT id
|
||||||
FROM channel_memberships
|
FROM channel_memberships
|
||||||
WHERE user_id = $1 AND channel_id = $2
|
WHERE user_id = $1 AND channel_id = $2
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar::<_, i32>(query)
|
Ok(sqlx::query_scalar::<_, i32>(query)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.bind(channel_id.0)
|
.bind(channel_id.0)
|
||||||
.fetch_optional(&self.pool)
|
.fetch_optional(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(|e| e.is_some())?)
|
.map(|e| e.is_some())?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(test, feature = "seed-support"))]
|
#[cfg(any(test, feature = "seed-support"))]
|
||||||
|
@ -373,20 +323,18 @@ impl Db for PostgresDb {
|
||||||
user_id: UserId,
|
user_id: UserId,
|
||||||
is_admin: bool,
|
is_admin: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO channel_memberships (channel_id, user_id, admin)
|
INSERT INTO channel_memberships (channel_id, user_id, admin)
|
||||||
VALUES ($1, $2, $3)
|
VALUES ($1, $2, $3)
|
||||||
ON CONFLICT DO NOTHING
|
ON CONFLICT DO NOTHING
|
||||||
";
|
";
|
||||||
Ok(sqlx::query(query)
|
Ok(sqlx::query(query)
|
||||||
.bind(channel_id.0)
|
.bind(channel_id.0)
|
||||||
.bind(user_id.0)
|
.bind(user_id.0)
|
||||||
.bind(is_admin)
|
.bind(is_admin)
|
||||||
.execute(&self.pool)
|
.execute(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(drop)?)
|
.map(drop)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// messages
|
// messages
|
||||||
|
@ -399,23 +347,21 @@ impl Db for PostgresDb {
|
||||||
timestamp: OffsetDateTime,
|
timestamp: OffsetDateTime,
|
||||||
nonce: u128,
|
nonce: u128,
|
||||||
) -> Result<MessageId> {
|
) -> Result<MessageId> {
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
INSERT INTO channel_messages (channel_id, sender_id, body, sent_at, nonce)
|
INSERT INTO channel_messages (channel_id, sender_id, body, sent_at, nonce)
|
||||||
VALUES ($1, $2, $3, $4, $5)
|
VALUES ($1, $2, $3, $4, $5)
|
||||||
ON CONFLICT (nonce) DO UPDATE SET nonce = excluded.nonce
|
ON CONFLICT (nonce) DO UPDATE SET nonce = excluded.nonce
|
||||||
RETURNING id
|
RETURNING id
|
||||||
";
|
";
|
||||||
Ok(sqlx::query_scalar(query)
|
Ok(sqlx::query_scalar(query)
|
||||||
.bind(channel_id.0)
|
.bind(channel_id.0)
|
||||||
.bind(sender_id.0)
|
.bind(sender_id.0)
|
||||||
.bind(body)
|
.bind(body)
|
||||||
.bind(timestamp)
|
.bind(timestamp)
|
||||||
.bind(Uuid::from_u128(nonce))
|
.bind(Uuid::from_u128(nonce))
|
||||||
.fetch_one(&self.pool)
|
.fetch_one(&self.pool)
|
||||||
.await
|
.await
|
||||||
.map(MessageId)?)
|
.map(MessageId)?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_channel_messages(
|
async fn get_channel_messages(
|
||||||
|
@ -424,8 +370,7 @@ impl Db for PostgresDb {
|
||||||
count: usize,
|
count: usize,
|
||||||
before_id: Option<MessageId>,
|
before_id: Option<MessageId>,
|
||||||
) -> Result<Vec<ChannelMessage>> {
|
) -> Result<Vec<ChannelMessage>> {
|
||||||
test_support!(self, {
|
let query = r#"
|
||||||
let query = r#"
|
|
||||||
SELECT * FROM (
|
SELECT * FROM (
|
||||||
SELECT
|
SELECT
|
||||||
id, channel_id, sender_id, body, sent_at AT TIME ZONE 'UTC' as sent_at, nonce
|
id, channel_id, sender_id, body, sent_at AT TIME ZONE 'UTC' as sent_at, nonce
|
||||||
|
@ -439,35 +384,32 @@ impl Db for PostgresDb {
|
||||||
) as recent_messages
|
) as recent_messages
|
||||||
ORDER BY id ASC
|
ORDER BY id ASC
|
||||||
"#;
|
"#;
|
||||||
Ok(sqlx::query_as(query)
|
Ok(sqlx::query_as(query)
|
||||||
.bind(channel_id.0)
|
.bind(channel_id.0)
|
||||||
.bind(before_id.unwrap_or(MessageId::MAX))
|
.bind(before_id.unwrap_or(MessageId::MAX))
|
||||||
.bind(count as i64)
|
.bind(count as i64)
|
||||||
.fetch_all(&self.pool)
|
.fetch_all(&self.pool)
|
||||||
.await?)
|
.await?)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn teardown(&self, name: &str, url: &str) {
|
async fn teardown(&self, name: &str, url: &str) {
|
||||||
use util::ResultExt;
|
use util::ResultExt;
|
||||||
|
|
||||||
test_support!(self, {
|
let query = "
|
||||||
let query = "
|
|
||||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||||
FROM pg_stat_activity
|
FROM pg_stat_activity
|
||||||
WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid();
|
WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid();
|
||||||
";
|
";
|
||||||
sqlx::query(query)
|
sqlx::query(query)
|
||||||
.bind(name)
|
.bind(name)
|
||||||
.execute(&self.pool)
|
.execute(&self.pool)
|
||||||
.await
|
.await
|
||||||
.log_err();
|
.log_err();
|
||||||
self.pool.close().await;
|
self.pool.close().await;
|
||||||
<sqlx::Postgres as sqlx::migrate::MigrateDatabase>::drop_database(url)
|
<sqlx::Postgres as sqlx::migrate::MigrateDatabase>::drop_database(url)
|
||||||
.await
|
.await
|
||||||
.log_err();
|
.log_err();
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,12 +647,11 @@ pub mod tests {
|
||||||
let name = format!("zed-test-{}", rng.gen::<u128>());
|
let name = format!("zed-test-{}", rng.gen::<u128>());
|
||||||
let url = format!("postgres://postgres@localhost/{}", name);
|
let url = format!("postgres://postgres@localhost/{}", name);
|
||||||
let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
|
let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
|
||||||
let db = block_on(async {
|
let db = futures::executor::block_on(async {
|
||||||
Postgres::create_database(&url)
|
Postgres::create_database(&url)
|
||||||
.await
|
.await
|
||||||
.expect("failed to create test db");
|
.expect("failed to create test db");
|
||||||
let mut db = PostgresDb::new(&url, 5).await.unwrap();
|
let db = PostgresDb::new(&url, 5).await.unwrap();
|
||||||
db.test_mode = true;
|
|
||||||
let migrator = Migrator::new(migrations_path).await.unwrap();
|
let migrator = Migrator::new(migrations_path).await.unwrap();
|
||||||
migrator.run(&db.pool).await.unwrap();
|
migrator.run(&db.pool).await.unwrap();
|
||||||
db
|
db
|
||||||
|
@ -738,7 +679,7 @@ pub mod tests {
|
||||||
impl Drop for TestDb {
|
impl Drop for TestDb {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
if let Some(db) = self.db.take() {
|
if let Some(db) = self.db.take() {
|
||||||
block_on(db.teardown(&self.name, &self.url));
|
futures::executor::block_on(db.teardown(&self.name, &self.url));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,12 +4,14 @@ mod db;
|
||||||
mod env;
|
mod env;
|
||||||
mod rpc;
|
mod rpc;
|
||||||
|
|
||||||
use ::rpc::Peer;
|
|
||||||
use axum::{body::Body, http::StatusCode, response::IntoResponse, Router};
|
use axum::{body::Body, http::StatusCode, response::IntoResponse, Router};
|
||||||
use db::{Db, PostgresDb};
|
use db::{Db, PostgresDb};
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::{net::TcpListener, sync::Arc};
|
use std::{
|
||||||
|
net::{SocketAddr, TcpListener},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Default, Deserialize)]
|
#[derive(Default, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
@ -56,17 +58,17 @@ async fn main() -> Result<()> {
|
||||||
.expect("failed to bind TCP listener");
|
.expect("failed to bind TCP listener");
|
||||||
|
|
||||||
let app = Router::<Body>::new()
|
let app = Router::<Body>::new()
|
||||||
.merge(api::routes(state))
|
.merge(api::routes(state.clone()))
|
||||||
.merge(rpc::routes(Peer::new()));
|
.merge(rpc::routes(state));
|
||||||
|
|
||||||
axum::Server::from_tcp(listener)?
|
axum::Server::from_tcp(listener)?
|
||||||
.serve(app.into_make_service())
|
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||||
|
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
Http(StatusCode, String),
|
Http(StatusCode, String),
|
||||||
|
|
12761
crates/collab/src/rpc.rs
12761
crates/collab/src/rpc.rs
File diff suppressed because it is too large
Load diff
|
@ -1,14 +1,14 @@
|
||||||
use async_tungstenite::tungstenite::{Error as WebSocketError, Message as WebSocketMessage};
|
use async_tungstenite::tungstenite::Message as WebSocketMessage;
|
||||||
use futures::{SinkExt as _, StreamExt as _};
|
use futures::{SinkExt as _, StreamExt as _};
|
||||||
|
|
||||||
pub struct Connection {
|
pub struct Connection {
|
||||||
pub(crate) tx:
|
pub(crate) tx:
|
||||||
Box<dyn 'static + Send + Unpin + futures::Sink<WebSocketMessage, Error = WebSocketError>>,
|
Box<dyn 'static + Send + Unpin + futures::Sink<WebSocketMessage, Error = anyhow::Error>>,
|
||||||
pub(crate) rx: Box<
|
pub(crate) rx: Box<
|
||||||
dyn 'static
|
dyn 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Unpin
|
+ Unpin
|
||||||
+ futures::Stream<Item = Result<WebSocketMessage, WebSocketError>>,
|
+ futures::Stream<Item = Result<WebSocketMessage, anyhow::Error>>,
|
||||||
>,
|
>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,8 +18,8 @@ impl Connection {
|
||||||
S: 'static
|
S: 'static
|
||||||
+ Send
|
+ Send
|
||||||
+ Unpin
|
+ Unpin
|
||||||
+ futures::Sink<WebSocketMessage, Error = WebSocketError>
|
+ futures::Sink<WebSocketMessage, Error = anyhow::Error>
|
||||||
+ futures::Stream<Item = Result<WebSocketMessage, WebSocketError>>,
|
+ futures::Stream<Item = Result<WebSocketMessage, anyhow::Error>>,
|
||||||
{
|
{
|
||||||
let (tx, rx) = stream.split();
|
let (tx, rx) = stream.split();
|
||||||
Self {
|
Self {
|
||||||
|
@ -28,7 +28,7 @@ impl Connection {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send(&mut self, message: WebSocketMessage) -> Result<(), WebSocketError> {
|
pub async fn send(&mut self, message: WebSocketMessage) -> Result<(), anyhow::Error> {
|
||||||
self.tx.send(message).await
|
self.tx.send(message).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,40 +54,37 @@ impl Connection {
|
||||||
killed: Arc<AtomicBool>,
|
killed: Arc<AtomicBool>,
|
||||||
executor: Arc<gpui::executor::Background>,
|
executor: Arc<gpui::executor::Background>,
|
||||||
) -> (
|
) -> (
|
||||||
Box<dyn Send + Unpin + futures::Sink<WebSocketMessage, Error = WebSocketError>>,
|
Box<dyn Send + Unpin + futures::Sink<WebSocketMessage, Error = anyhow::Error>>,
|
||||||
Box<
|
Box<dyn Send + Unpin + futures::Stream<Item = Result<WebSocketMessage, anyhow::Error>>>,
|
||||||
dyn Send + Unpin + futures::Stream<Item = Result<WebSocketMessage, WebSocketError>>,
|
|
||||||
>,
|
|
||||||
) {
|
) {
|
||||||
|
use anyhow::anyhow;
|
||||||
use futures::channel::mpsc;
|
use futures::channel::mpsc;
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::{Error, ErrorKind};
|
||||||
|
|
||||||
let (tx, rx) = mpsc::unbounded::<WebSocketMessage>();
|
let (tx, rx) = mpsc::unbounded::<WebSocketMessage>();
|
||||||
|
|
||||||
let tx = tx
|
let tx = tx.sink_map_err(|error| anyhow!(error)).with({
|
||||||
.sink_map_err(|e| WebSocketError::from(Error::new(ErrorKind::Other, e)))
|
let killed = killed.clone();
|
||||||
.with({
|
let executor = Arc::downgrade(&executor);
|
||||||
|
move |msg| {
|
||||||
let killed = killed.clone();
|
let killed = killed.clone();
|
||||||
let executor = Arc::downgrade(&executor);
|
let executor = executor.clone();
|
||||||
move |msg| {
|
Box::pin(async move {
|
||||||
let killed = killed.clone();
|
if let Some(executor) = executor.upgrade() {
|
||||||
let executor = executor.clone();
|
executor.simulate_random_delay().await;
|
||||||
Box::pin(async move {
|
}
|
||||||
if let Some(executor) = executor.upgrade() {
|
|
||||||
executor.simulate_random_delay().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes to a half-open TCP connection will error.
|
// Writes to a half-open TCP connection will error.
|
||||||
if killed.load(SeqCst) {
|
if killed.load(SeqCst) {
|
||||||
std::io::Result::Err(
|
std::io::Result::Err(
|
||||||
Error::new(ErrorKind::Other, "connection lost").into(),
|
Error::new(ErrorKind::Other, "connection lost").into(),
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(msg)
|
Ok(msg)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let rx = rx.then({
|
let rx = rx.then({
|
||||||
let killed = killed.clone();
|
let killed = killed.clone();
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use super::{ConnectionId, PeerId, TypedEnvelope};
|
use super::{ConnectionId, PeerId, TypedEnvelope};
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use async_tungstenite::tungstenite::{Error as WebSocketError, Message as WebSocketMessage};
|
use async_tungstenite::tungstenite::Message as WebSocketMessage;
|
||||||
use futures::{SinkExt as _, StreamExt as _};
|
use futures::{SinkExt as _, StreamExt as _};
|
||||||
use prost::Message as _;
|
use prost::Message as _;
|
||||||
use std::any::{Any, TypeId};
|
use std::any::{Any, TypeId};
|
||||||
|
@ -318,9 +318,9 @@ impl<S> MessageStream<S> {
|
||||||
|
|
||||||
impl<S> MessageStream<S>
|
impl<S> MessageStream<S>
|
||||||
where
|
where
|
||||||
S: futures::Sink<WebSocketMessage, Error = WebSocketError> + Unpin,
|
S: futures::Sink<WebSocketMessage, Error = anyhow::Error> + Unpin,
|
||||||
{
|
{
|
||||||
pub async fn write(&mut self, message: Message) -> Result<(), WebSocketError> {
|
pub async fn write(&mut self, message: Message) -> Result<(), anyhow::Error> {
|
||||||
#[cfg(any(test, feature = "test-support"))]
|
#[cfg(any(test, feature = "test-support"))]
|
||||||
const COMPRESSION_LEVEL: i32 = -7;
|
const COMPRESSION_LEVEL: i32 = -7;
|
||||||
|
|
||||||
|
@ -357,9 +357,9 @@ where
|
||||||
|
|
||||||
impl<S> MessageStream<S>
|
impl<S> MessageStream<S>
|
||||||
where
|
where
|
||||||
S: futures::Stream<Item = Result<WebSocketMessage, WebSocketError>> + Unpin,
|
S: futures::Stream<Item = Result<WebSocketMessage, anyhow::Error>> + Unpin,
|
||||||
{
|
{
|
||||||
pub async fn read(&mut self) -> Result<Message, WebSocketError> {
|
pub async fn read(&mut self) -> Result<Message, anyhow::Error> {
|
||||||
while let Some(bytes) = self.stream.next().await {
|
while let Some(bytes) = self.stream.next().await {
|
||||||
match bytes? {
|
match bytes? {
|
||||||
WebSocketMessage::Binary(bytes) => {
|
WebSocketMessage::Binary(bytes) => {
|
||||||
|
@ -375,7 +375,7 @@ where
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(WebSocketError::ConnectionClosed)
|
Err(anyhow!("connection closed"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue