Merge branch 'main' into panels
This commit is contained in:
commit
146809eef0
183 changed files with 10202 additions and 5720 deletions
|
@ -3,7 +3,7 @@ authors = ["Nathan Sobo <nathan@zed.dev>"]
|
|||
default-run = "collab"
|
||||
edition = "2021"
|
||||
name = "collab"
|
||||
version = "0.12.0"
|
||||
version = "0.12.4"
|
||||
publish = false
|
||||
|
||||
[[bin]]
|
||||
|
@ -51,7 +51,7 @@ tokio = { version = "1", features = ["full"] }
|
|||
tokio-tungstenite = "0.17"
|
||||
tonic = "0.6"
|
||||
tower = "0.4"
|
||||
toml = "0.5.8"
|
||||
toml.workspace = true
|
||||
tracing = "0.1.34"
|
||||
tracing-log = "0.1.3"
|
||||
tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] }
|
||||
|
|
|
@ -86,8 +86,8 @@ CREATE TABLE "worktree_repositories" (
|
|||
"project_id" INTEGER NOT NULL,
|
||||
"worktree_id" INTEGER NOT NULL,
|
||||
"work_directory_id" INTEGER NOT NULL,
|
||||
"scan_id" INTEGER NOT NULL,
|
||||
"branch" VARCHAR,
|
||||
"scan_id" INTEGER NOT NULL,
|
||||
"is_deleted" BOOL NOT NULL,
|
||||
PRIMARY KEY(project_id, worktree_id, work_directory_id),
|
||||
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
|
||||
|
@ -96,6 +96,23 @@ CREATE TABLE "worktree_repositories" (
|
|||
CREATE INDEX "index_worktree_repositories_on_project_id" ON "worktree_repositories" ("project_id");
|
||||
CREATE INDEX "index_worktree_repositories_on_project_id_and_worktree_id" ON "worktree_repositories" ("project_id", "worktree_id");
|
||||
|
||||
CREATE TABLE "worktree_repository_statuses" (
|
||||
"project_id" INTEGER NOT NULL,
|
||||
"worktree_id" INTEGER NOT NULL,
|
||||
"work_directory_id" INTEGER NOT NULL,
|
||||
"repo_path" VARCHAR NOT NULL,
|
||||
"status" INTEGER NOT NULL,
|
||||
"scan_id" INTEGER NOT NULL,
|
||||
"is_deleted" BOOL NOT NULL,
|
||||
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
|
||||
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX "index_worktree_repository_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
|
||||
CREATE INDEX "index_worktree_repository_statuses_on_project_id_and_worktree_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
|
||||
CREATE INDEX "index_worktree_repository_statuses_on_project_id_and_worktree_id_and_work_directory_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");
|
||||
|
||||
|
||||
CREATE TABLE "worktree_diagnostic_summaries" (
|
||||
"project_id" INTEGER NOT NULL,
|
||||
"worktree_id" INTEGER NOT NULL,
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
CREATE TABLE "worktree_repository_statuses" (
|
||||
"project_id" INTEGER NOT NULL,
|
||||
"worktree_id" INT8 NOT NULL,
|
||||
"work_directory_id" INT8 NOT NULL,
|
||||
"repo_path" VARCHAR NOT NULL,
|
||||
"status" INT8 NOT NULL,
|
||||
"scan_id" INT8 NOT NULL,
|
||||
"is_deleted" BOOL NOT NULL,
|
||||
PRIMARY KEY(project_id, worktree_id, work_directory_id, repo_path),
|
||||
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(project_id, worktree_id, work_directory_id) REFERENCES worktree_entries (project_id, worktree_id, id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX "index_wt_repos_statuses_on_project_id" ON "worktree_repository_statuses" ("project_id");
|
||||
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id" ON "worktree_repository_statuses" ("project_id", "worktree_id");
|
||||
CREATE INDEX "index_wt_repos_statuses_on_project_id_and_wt_id_and_wd_id" ON "worktree_repository_statuses" ("project_id", "worktree_id", "work_directory_id");
|
|
@ -15,6 +15,7 @@ mod worktree;
|
|||
mod worktree_diagnostic_summary;
|
||||
mod worktree_entry;
|
||||
mod worktree_repository;
|
||||
mod worktree_repository_statuses;
|
||||
|
||||
use crate::executor::Executor;
|
||||
use crate::{Error, Result};
|
||||
|
@ -1513,6 +1514,7 @@ impl Database {
|
|||
let mut db_entries = worktree_entry::Entity::find()
|
||||
.filter(
|
||||
Condition::all()
|
||||
.add(worktree_entry::Column::ProjectId.eq(project.id))
|
||||
.add(worktree_entry::Column::WorktreeId.eq(worktree.id))
|
||||
.add(entry_filter),
|
||||
)
|
||||
|
@ -1552,6 +1554,7 @@ impl Database {
|
|||
let mut db_repositories = worktree_repository::Entity::find()
|
||||
.filter(
|
||||
Condition::all()
|
||||
.add(worktree_repository::Column::ProjectId.eq(project.id))
|
||||
.add(worktree_repository::Column::WorktreeId.eq(worktree.id))
|
||||
.add(repository_entry_filter),
|
||||
)
|
||||
|
@ -1568,6 +1571,54 @@ impl Database {
|
|||
worktree.updated_repositories.push(proto::RepositoryEntry {
|
||||
work_directory_id: db_repository.work_directory_id as u64,
|
||||
branch: db_repository.branch,
|
||||
removed_repo_paths: Default::default(),
|
||||
updated_statuses: Default::default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Repository Status Entries
|
||||
for repository in worktree.updated_repositories.iter_mut() {
|
||||
let repository_status_entry_filter =
|
||||
if let Some(rejoined_worktree) = rejoined_worktree {
|
||||
worktree_repository_statuses::Column::ScanId
|
||||
.gt(rejoined_worktree.scan_id)
|
||||
} else {
|
||||
worktree_repository_statuses::Column::IsDeleted.eq(false)
|
||||
};
|
||||
|
||||
let mut db_repository_statuses =
|
||||
worktree_repository_statuses::Entity::find()
|
||||
.filter(
|
||||
Condition::all()
|
||||
.add(
|
||||
worktree_repository_statuses::Column::ProjectId
|
||||
.eq(project.id),
|
||||
)
|
||||
.add(
|
||||
worktree_repository_statuses::Column::WorktreeId
|
||||
.eq(worktree.id),
|
||||
)
|
||||
.add(
|
||||
worktree_repository_statuses::Column::WorkDirectoryId
|
||||
.eq(repository.work_directory_id),
|
||||
)
|
||||
.add(repository_status_entry_filter),
|
||||
)
|
||||
.stream(&*tx)
|
||||
.await?;
|
||||
|
||||
while let Some(db_status_entry) = db_repository_statuses.next().await {
|
||||
let db_status_entry = db_status_entry?;
|
||||
if db_status_entry.is_deleted {
|
||||
repository
|
||||
.removed_repo_paths
|
||||
.push(db_status_entry.repo_path);
|
||||
} else {
|
||||
repository.updated_statuses.push(proto::StatusEntry {
|
||||
repo_path: db_status_entry.repo_path,
|
||||
status: db_status_entry.status as i32,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -2395,6 +2446,68 @@ impl Database {
|
|||
)
|
||||
.exec(&*tx)
|
||||
.await?;
|
||||
|
||||
for repository in update.updated_repositories.iter() {
|
||||
if !repository.updated_statuses.is_empty() {
|
||||
worktree_repository_statuses::Entity::insert_many(
|
||||
repository.updated_statuses.iter().map(|status_entry| {
|
||||
worktree_repository_statuses::ActiveModel {
|
||||
project_id: ActiveValue::set(project_id),
|
||||
worktree_id: ActiveValue::set(worktree_id),
|
||||
work_directory_id: ActiveValue::set(
|
||||
repository.work_directory_id as i64,
|
||||
),
|
||||
repo_path: ActiveValue::set(status_entry.repo_path.clone()),
|
||||
status: ActiveValue::set(status_entry.status as i64),
|
||||
scan_id: ActiveValue::set(update.scan_id as i64),
|
||||
is_deleted: ActiveValue::set(false),
|
||||
}
|
||||
}),
|
||||
)
|
||||
.on_conflict(
|
||||
OnConflict::columns([
|
||||
worktree_repository_statuses::Column::ProjectId,
|
||||
worktree_repository_statuses::Column::WorktreeId,
|
||||
worktree_repository_statuses::Column::WorkDirectoryId,
|
||||
worktree_repository_statuses::Column::RepoPath,
|
||||
])
|
||||
.update_columns([
|
||||
worktree_repository_statuses::Column::ScanId,
|
||||
worktree_repository_statuses::Column::Status,
|
||||
worktree_repository_statuses::Column::IsDeleted,
|
||||
])
|
||||
.to_owned(),
|
||||
)
|
||||
.exec(&*tx)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if !repository.removed_repo_paths.is_empty() {
|
||||
worktree_repository_statuses::Entity::update_many()
|
||||
.filter(
|
||||
worktree_repository_statuses::Column::ProjectId
|
||||
.eq(project_id)
|
||||
.and(
|
||||
worktree_repository_statuses::Column::WorktreeId
|
||||
.eq(worktree_id),
|
||||
)
|
||||
.and(
|
||||
worktree_repository_statuses::Column::WorkDirectoryId
|
||||
.eq(repository.work_directory_id as i64),
|
||||
)
|
||||
.and(worktree_repository_statuses::Column::RepoPath.is_in(
|
||||
repository.removed_repo_paths.iter().map(String::as_str),
|
||||
)),
|
||||
)
|
||||
.set(worktree_repository_statuses::ActiveModel {
|
||||
is_deleted: ActiveValue::Set(true),
|
||||
scan_id: ActiveValue::Set(update.scan_id as i64),
|
||||
..Default::default()
|
||||
})
|
||||
.exec(&*tx)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !update.removed_repositories.is_empty() {
|
||||
|
@ -2645,10 +2758,42 @@ impl Database {
|
|||
if let Some(worktree) =
|
||||
worktrees.get_mut(&(db_repository_entry.worktree_id as u64))
|
||||
{
|
||||
worktree.repository_entries.push(proto::RepositoryEntry {
|
||||
work_directory_id: db_repository_entry.work_directory_id as u64,
|
||||
branch: db_repository_entry.branch,
|
||||
});
|
||||
worktree.repository_entries.insert(
|
||||
db_repository_entry.work_directory_id as u64,
|
||||
proto::RepositoryEntry {
|
||||
work_directory_id: db_repository_entry.work_directory_id as u64,
|
||||
branch: db_repository_entry.branch,
|
||||
removed_repo_paths: Default::default(),
|
||||
updated_statuses: Default::default(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut db_status_entries = worktree_repository_statuses::Entity::find()
|
||||
.filter(
|
||||
Condition::all()
|
||||
.add(worktree_repository_statuses::Column::ProjectId.eq(project_id))
|
||||
.add(worktree_repository_statuses::Column::IsDeleted.eq(false)),
|
||||
)
|
||||
.stream(&*tx)
|
||||
.await?;
|
||||
|
||||
while let Some(db_status_entry) = db_status_entries.next().await {
|
||||
let db_status_entry = db_status_entry?;
|
||||
if let Some(worktree) = worktrees.get_mut(&(db_status_entry.worktree_id as u64))
|
||||
{
|
||||
if let Some(repository_entry) = worktree
|
||||
.repository_entries
|
||||
.get_mut(&(db_status_entry.work_directory_id as u64))
|
||||
{
|
||||
repository_entry.updated_statuses.push(proto::StatusEntry {
|
||||
repo_path: db_status_entry.repo_path,
|
||||
status: db_status_entry.status as i32,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3390,7 +3535,7 @@ pub struct Worktree {
|
|||
pub root_name: String,
|
||||
pub visible: bool,
|
||||
pub entries: Vec<proto::Entry>,
|
||||
pub repository_entries: Vec<proto::RepositoryEntry>,
|
||||
pub repository_entries: BTreeMap<u64, proto::RepositoryEntry>,
|
||||
pub diagnostic_summaries: Vec<proto::DiagnosticSummary>,
|
||||
pub scan_id: u64,
|
||||
pub completed_scan_id: u64,
|
||||
|
|
23
crates/collab/src/db/worktree_repository_statuses.rs
Normal file
23
crates/collab/src/db/worktree_repository_statuses.rs
Normal file
|
@ -0,0 +1,23 @@
|
|||
use super::ProjectId;
|
||||
use sea_orm::entity::prelude::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
|
||||
#[sea_orm(table_name = "worktree_repository_statuses")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub project_id: ProjectId,
|
||||
#[sea_orm(primary_key)]
|
||||
pub worktree_id: i64,
|
||||
#[sea_orm(primary_key)]
|
||||
pub work_directory_id: i64,
|
||||
#[sea_orm(primary_key)]
|
||||
pub repo_path: String,
|
||||
pub status: i64,
|
||||
pub scan_id: i64,
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
|
||||
pub enum Relation {}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
|
@ -51,7 +51,7 @@ use std::{
|
|||
atomic::{AtomicBool, Ordering::SeqCst},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::{watch, Semaphore};
|
||||
use tower::ServiceBuilder;
|
||||
|
@ -397,10 +397,16 @@ impl Server {
|
|||
"message received"
|
||||
);
|
||||
});
|
||||
let start_time = Instant::now();
|
||||
let future = (handler)(*envelope, session);
|
||||
async move {
|
||||
if let Err(error) = future.await {
|
||||
tracing::error!(%error, "error handling message");
|
||||
let result = future.await;
|
||||
let duration_ms = start_time.elapsed().as_micros() as f64 / 1000.0;
|
||||
match result {
|
||||
Err(error) => {
|
||||
tracing::error!(%error, ?duration_ms, "error handling message")
|
||||
}
|
||||
Ok(()) => tracing::info!(?duration_ms, "finished handling message"),
|
||||
}
|
||||
}
|
||||
.instrument(span)
|
||||
|
@ -1385,7 +1391,7 @@ async fn join_project(
|
|||
removed_entries: Default::default(),
|
||||
scan_id: worktree.scan_id,
|
||||
is_last_update: worktree.scan_id == worktree.completed_scan_id,
|
||||
updated_repositories: worktree.repository_entries,
|
||||
updated_repositories: worktree.repository_entries.into_values().collect(),
|
||||
removed_repositories: Default::default(),
|
||||
};
|
||||
for update in proto::split_worktree_update(message, MAX_CHUNK_SIZE) {
|
||||
|
|
|
@ -19,7 +19,7 @@ use gpui::{
|
|||
use language::LanguageRegistry;
|
||||
use parking_lot::Mutex;
|
||||
use project::{Project, WorktreeId};
|
||||
use settings::Settings;
|
||||
use settings::SettingsStore;
|
||||
use std::{
|
||||
cell::{Ref, RefCell, RefMut},
|
||||
env,
|
||||
|
@ -30,7 +30,6 @@ use std::{
|
|||
Arc,
|
||||
},
|
||||
};
|
||||
use theme::ThemeRegistry;
|
||||
use util::http::FakeHttpClient;
|
||||
use workspace::Workspace;
|
||||
|
||||
|
@ -102,7 +101,7 @@ impl TestServer {
|
|||
|
||||
async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient {
|
||||
cx.update(|cx| {
|
||||
cx.set_global(Settings::test(cx));
|
||||
cx.set_global(SettingsStore::test(cx));
|
||||
});
|
||||
|
||||
let http = FakeHttpClient::with_404_response();
|
||||
|
@ -191,15 +190,18 @@ impl TestServer {
|
|||
client: client.clone(),
|
||||
user_store: user_store.clone(),
|
||||
languages: Arc::new(LanguageRegistry::test()),
|
||||
themes: ThemeRegistry::new((), cx.font_cache()),
|
||||
fs: fs.clone(),
|
||||
build_window_options: |_, _, _| Default::default(),
|
||||
initialize_workspace: |_, _, _, _| unimplemented!(),
|
||||
background_actions: || &[],
|
||||
});
|
||||
|
||||
Project::init(&client);
|
||||
cx.update(|cx| {
|
||||
theme::init((), cx);
|
||||
Project::init(&client, cx);
|
||||
client::init(&client, cx);
|
||||
language::init(cx);
|
||||
editor::init_settings(cx);
|
||||
workspace::init(app_state.clone(), cx);
|
||||
call::init(client.clone(), user_store.clone(), cx);
|
||||
});
|
||||
|
|
|
@ -10,7 +10,7 @@ use editor::{
|
|||
ConfirmRename, Editor, ExcerptRange, MultiBuffer, Redo, Rename, ToOffset, ToggleCodeActions,
|
||||
Undo,
|
||||
};
|
||||
use fs::{FakeFs, Fs as _, LineEnding, RemoveOptions};
|
||||
use fs::{repository::GitFileStatus, FakeFs, Fs as _, LineEnding, RemoveOptions};
|
||||
use futures::StreamExt as _;
|
||||
use gpui::{
|
||||
executor::Deterministic, geometry::vector::vec2f, test::EmptyView, AppContext, ModelHandle,
|
||||
|
@ -18,6 +18,7 @@ use gpui::{
|
|||
};
|
||||
use indoc::indoc;
|
||||
use language::{
|
||||
language_settings::{AllLanguageSettings, Formatter},
|
||||
tree_sitter_rust, Anchor, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language,
|
||||
LanguageConfig, OffsetRangeExt, Point, Rope,
|
||||
};
|
||||
|
@ -26,7 +27,7 @@ use lsp::LanguageServerId;
|
|||
use project::{search::SearchQuery, DiagnosticSummary, HoverBlockKind, Project, ProjectPath};
|
||||
use rand::prelude::*;
|
||||
use serde_json::json;
|
||||
use settings::{Formatter, Settings};
|
||||
use settings::SettingsStore;
|
||||
use std::{
|
||||
cell::{Cell, RefCell},
|
||||
env, future, mem,
|
||||
|
@ -1438,7 +1439,6 @@ async fn test_host_disconnect(
|
|||
cx_b: &mut TestAppContext,
|
||||
cx_c: &mut TestAppContext,
|
||||
) {
|
||||
cx_b.update(editor::init);
|
||||
deterministic.forbid_parking();
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
|
@ -1448,6 +1448,8 @@ async fn test_host_disconnect(
|
|||
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b), (&client_c, cx_c)])
|
||||
.await;
|
||||
|
||||
cx_b.update(editor::init);
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
|
@ -1545,7 +1547,6 @@ async fn test_project_reconnect(
|
|||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
cx_b.update(editor::init);
|
||||
deterministic.forbid_parking();
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
|
@ -1554,6 +1555,8 @@ async fn test_project_reconnect(
|
|||
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)])
|
||||
.await;
|
||||
|
||||
cx_b.update(editor::init);
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
|
@ -2434,7 +2437,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_local_a.read_with(cx_a, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(1..2, "", "two\n")],
|
||||
|
@ -2454,7 +2457,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_remote_a.read_with(cx_b, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(1..2, "", "two\n")],
|
||||
|
@ -2478,7 +2481,7 @@ async fn test_git_diff_base_change(
|
|||
assert_eq!(buffer.diff_base(), Some(new_diff_base.as_ref()));
|
||||
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(2..3, "", "three\n")],
|
||||
|
@ -2489,7 +2492,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_remote_a.read_with(cx_b, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(new_diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(2..3, "", "three\n")],
|
||||
|
@ -2532,7 +2535,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_local_b.read_with(cx_a, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(1..2, "", "two\n")],
|
||||
|
@ -2552,7 +2555,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_remote_b.read_with(cx_b, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(1..2, "", "two\n")],
|
||||
|
@ -2580,12 +2583,12 @@ async fn test_git_diff_base_change(
|
|||
"{:?}",
|
||||
buffer
|
||||
.snapshot()
|
||||
.git_diff_hunks_in_row_range(0..4, false)
|
||||
.git_diff_hunks_in_row_range(0..4)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(2..3, "", "three\n")],
|
||||
|
@ -2596,7 +2599,7 @@ async fn test_git_diff_base_change(
|
|||
buffer_remote_b.read_with(cx_b, |buffer, _| {
|
||||
assert_eq!(buffer.diff_base(), Some(new_diff_base.as_ref()));
|
||||
git::diff::assert_hunks(
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4, false),
|
||||
buffer.snapshot().git_diff_hunks_in_row_range(0..4),
|
||||
&buffer,
|
||||
&diff_base,
|
||||
&[(2..3, "", "three\n")],
|
||||
|
@ -2690,6 +2693,154 @@ async fn test_git_branch_name(
|
|||
});
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_git_status_sync(
|
||||
deterministic: Arc<Deterministic>,
|
||||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
cx_c: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
let client_c = server.create_client(cx_c, "user_c").await;
|
||||
server
|
||||
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b), (&client_c, cx_c)])
|
||||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
"/dir",
|
||||
json!({
|
||||
".git": {},
|
||||
"a.txt": "a",
|
||||
"b.txt": "b",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
const A_TXT: &'static str = "a.txt";
|
||||
const B_TXT: &'static str = "b.txt";
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.as_fake()
|
||||
.set_status_for_repo(
|
||||
Path::new("/dir/.git"),
|
||||
&[
|
||||
(&Path::new(A_TXT), GitFileStatus::Added),
|
||||
(&Path::new(B_TXT), GitFileStatus::Added),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
let (project_local, _worktree_id) = client_a.build_local_project("/dir", cx_a).await;
|
||||
let project_id = active_call_a
|
||||
.update(cx_a, |call, cx| {
|
||||
call.share_project(project_local.clone(), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let project_remote = client_b.build_remote_project(project_id, cx_b).await;
|
||||
|
||||
// Wait for it to catch up to the new status
|
||||
deterministic.run_until_parked();
|
||||
|
||||
#[track_caller]
|
||||
fn assert_status(
|
||||
file: &impl AsRef<Path>,
|
||||
status: Option<GitFileStatus>,
|
||||
project: &Project,
|
||||
cx: &AppContext,
|
||||
) {
|
||||
let file = file.as_ref();
|
||||
let worktrees = project.visible_worktrees(cx).collect::<Vec<_>>();
|
||||
assert_eq!(worktrees.len(), 1);
|
||||
let worktree = worktrees[0].clone();
|
||||
let snapshot = worktree.read(cx).snapshot();
|
||||
let root_entry = snapshot.root_git_entry().unwrap();
|
||||
assert_eq!(root_entry.status_for_file(&snapshot, file), status);
|
||||
}
|
||||
|
||||
// Smoke test status reading
|
||||
project_local.read_with(cx_a, |project, cx| {
|
||||
assert_status(&Path::new(A_TXT), Some(GitFileStatus::Added), project, cx);
|
||||
assert_status(&Path::new(B_TXT), Some(GitFileStatus::Added), project, cx);
|
||||
});
|
||||
project_remote.read_with(cx_b, |project, cx| {
|
||||
assert_status(&Path::new(A_TXT), Some(GitFileStatus::Added), project, cx);
|
||||
assert_status(&Path::new(B_TXT), Some(GitFileStatus::Added), project, cx);
|
||||
});
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.as_fake()
|
||||
.set_status_for_repo(
|
||||
Path::new("/dir/.git"),
|
||||
&[
|
||||
(&Path::new(A_TXT), GitFileStatus::Modified),
|
||||
(&Path::new(B_TXT), GitFileStatus::Modified),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
// Wait for buffer_local_a to receive it
|
||||
deterministic.run_until_parked();
|
||||
|
||||
// Smoke test status reading
|
||||
project_local.read_with(cx_a, |project, cx| {
|
||||
assert_status(
|
||||
&Path::new(A_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
assert_status(
|
||||
&Path::new(B_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
});
|
||||
project_remote.read_with(cx_b, |project, cx| {
|
||||
assert_status(
|
||||
&Path::new(A_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
assert_status(
|
||||
&Path::new(B_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
});
|
||||
|
||||
// And synchronization while joining
|
||||
let project_remote_c = client_c.build_remote_project(project_id, cx_c).await;
|
||||
deterministic.run_until_parked();
|
||||
|
||||
project_remote_c.read_with(cx_c, |project, cx| {
|
||||
assert_status(
|
||||
&Path::new(A_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
assert_status(
|
||||
&Path::new(B_TXT),
|
||||
Some(GitFileStatus::Modified),
|
||||
project,
|
||||
cx,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[gpui::test(iterations = 10)]
|
||||
async fn test_fs_operations(
|
||||
deterministic: Arc<Deterministic>,
|
||||
|
@ -4219,10 +4370,12 @@ async fn test_formatting_buffer(
|
|||
// Ensure buffer can be formatted using an external command. Notice how the
|
||||
// host's configuration is honored as opposed to using the guest's settings.
|
||||
cx_a.update(|cx| {
|
||||
cx.update_global(|settings: &mut Settings, _| {
|
||||
settings.editor_defaults.formatter = Some(Formatter::External {
|
||||
command: "awk".to_string(),
|
||||
arguments: vec!["{sub(/two/,\"{buffer_path}\")}1".to_string()],
|
||||
cx.update_global(|store: &mut SettingsStore, cx| {
|
||||
store.update_user_settings::<AllLanguageSettings>(cx, |file| {
|
||||
file.defaults.formatter = Some(Formatter::External {
|
||||
command: "awk".into(),
|
||||
arguments: vec!["{sub(/two/,\"{buffer_path}\")}1".to_string()].into(),
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -4989,7 +5142,6 @@ async fn test_collaborating_with_code_actions(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_b.update(editor::init);
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
|
@ -4998,6 +5150,8 @@ async fn test_collaborating_with_code_actions(
|
|||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// Set up a fake language server.
|
||||
let mut language = Language::new(
|
||||
LanguageConfig {
|
||||
|
@ -5202,7 +5356,6 @@ async fn test_collaborating_with_renames(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_b.update(editor::init);
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
|
@ -5211,6 +5364,8 @@ async fn test_collaborating_with_renames(
|
|||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// Set up a fake language server.
|
||||
let mut language = Language::new(
|
||||
LanguageConfig {
|
||||
|
@ -5392,8 +5547,6 @@ async fn test_language_server_statuses(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
|
||||
cx_b.update(editor::init);
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
|
@ -5402,6 +5555,8 @@ async fn test_language_server_statuses(
|
|||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// Set up a fake language server.
|
||||
let mut language = Language::new(
|
||||
LanguageConfig {
|
||||
|
@ -6109,8 +6264,6 @@ async fn test_basic_following(
|
|||
cx_d: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
|
@ -6128,6 +6281,9 @@ async fn test_basic_following(
|
|||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
let active_call_b = cx_b.read(ActiveCall::global);
|
||||
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
|
@ -6706,9 +6862,6 @@ async fn test_following_tab_order(
|
|||
cx_a: &mut TestAppContext,
|
||||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
|
@ -6718,6 +6871,9 @@ async fn test_following_tab_order(
|
|||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
let active_call_b = cx_b.read(ActiveCall::global);
|
||||
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
client_a
|
||||
.fs
|
||||
.insert_tree(
|
||||
|
@ -6828,9 +6984,6 @@ async fn test_peers_following_each_other(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
let client_b = server.create_client(cx_b, "user_b").await;
|
||||
|
@ -6840,6 +6993,9 @@ async fn test_peers_following_each_other(
|
|||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
let active_call_b = cx_b.read(ActiveCall::global);
|
||||
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// Client A shares a project.
|
||||
client_a
|
||||
.fs
|
||||
|
@ -6999,8 +7155,6 @@ async fn test_auto_unfollowing(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// 2 clients connect to a server.
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
|
@ -7012,6 +7166,9 @@ async fn test_auto_unfollowing(
|
|||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
let active_call_b = cx_b.read(ActiveCall::global);
|
||||
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
// Client A shares a project.
|
||||
client_a
|
||||
.fs
|
||||
|
@ -7166,8 +7323,6 @@ async fn test_peers_simultaneously_following_each_other(
|
|||
cx_b: &mut TestAppContext,
|
||||
) {
|
||||
deterministic.forbid_parking();
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
let mut server = TestServer::start(&deterministic).await;
|
||||
let client_a = server.create_client(cx_a, "user_a").await;
|
||||
|
@ -7177,6 +7332,9 @@ async fn test_peers_simultaneously_following_each_other(
|
|||
.await;
|
||||
let active_call_a = cx_a.read(ActiveCall::global);
|
||||
|
||||
cx_a.update(editor::init);
|
||||
cx_b.update(editor::init);
|
||||
|
||||
client_a.fs.insert_tree("/a", json!({})).await;
|
||||
let (project_a, _) = client_a.build_local_project("/a", cx_a).await;
|
||||
let workspace_a = client_a.build_workspace(&project_a, cx_a);
|
||||
|
|
|
@ -8,19 +8,20 @@ use call::ActiveCall;
|
|||
use client::RECEIVE_TIMEOUT;
|
||||
use collections::BTreeMap;
|
||||
use editor::Bias;
|
||||
use fs::{FakeFs, Fs as _};
|
||||
use fs::{repository::GitFileStatus, FakeFs, Fs as _};
|
||||
use futures::StreamExt as _;
|
||||
use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext};
|
||||
use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
|
||||
use lsp::FakeLanguageServer;
|
||||
use parking_lot::Mutex;
|
||||
use pretty_assertions::assert_eq;
|
||||
use project::{search::SearchQuery, Project, ProjectPath};
|
||||
use rand::{
|
||||
distributions::{Alphanumeric, DistString},
|
||||
prelude::*,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use settings::Settings;
|
||||
use settings::SettingsStore;
|
||||
use std::{
|
||||
env,
|
||||
ops::Range,
|
||||
|
@ -148,8 +149,9 @@ async fn test_random_collaboration(
|
|||
|
||||
for (client, mut cx) in clients {
|
||||
cx.update(|cx| {
|
||||
let store = cx.remove_global::<SettingsStore>();
|
||||
cx.clear_globals();
|
||||
cx.set_global(Settings::test(cx));
|
||||
cx.set_global(store);
|
||||
drop(client);
|
||||
});
|
||||
}
|
||||
|
@ -763,53 +765,85 @@ async fn apply_client_operation(
|
|||
}
|
||||
}
|
||||
|
||||
ClientOperation::WriteGitIndex {
|
||||
repo_path,
|
||||
contents,
|
||||
} => {
|
||||
if !client.fs.directories().contains(&repo_path) {
|
||||
return Err(TestError::Inapplicable);
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"{}: writing git index for repo {:?}: {:?}",
|
||||
client.username,
|
||||
ClientOperation::GitOperation { operation } => match operation {
|
||||
GitOperation::WriteGitIndex {
|
||||
repo_path,
|
||||
contents
|
||||
);
|
||||
contents,
|
||||
} => {
|
||||
if !client.fs.directories().contains(&repo_path) {
|
||||
return Err(TestError::Inapplicable);
|
||||
}
|
||||
|
||||
let dot_git_dir = repo_path.join(".git");
|
||||
let contents = contents
|
||||
.iter()
|
||||
.map(|(path, contents)| (path.as_path(), contents.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
if client.fs.metadata(&dot_git_dir).await?.is_none() {
|
||||
client.fs.create_dir(&dot_git_dir).await?;
|
||||
log::info!(
|
||||
"{}: writing git index for repo {:?}: {:?}",
|
||||
client.username,
|
||||
repo_path,
|
||||
contents
|
||||
);
|
||||
|
||||
let dot_git_dir = repo_path.join(".git");
|
||||
let contents = contents
|
||||
.iter()
|
||||
.map(|(path, contents)| (path.as_path(), contents.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
if client.fs.metadata(&dot_git_dir).await?.is_none() {
|
||||
client.fs.create_dir(&dot_git_dir).await?;
|
||||
}
|
||||
client.fs.set_index_for_repo(&dot_git_dir, &contents).await;
|
||||
}
|
||||
client.fs.set_index_for_repo(&dot_git_dir, &contents).await;
|
||||
}
|
||||
|
||||
ClientOperation::WriteGitBranch {
|
||||
repo_path,
|
||||
new_branch,
|
||||
} => {
|
||||
if !client.fs.directories().contains(&repo_path) {
|
||||
return Err(TestError::Inapplicable);
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"{}: writing git branch for repo {:?}: {:?}",
|
||||
client.username,
|
||||
GitOperation::WriteGitBranch {
|
||||
repo_path,
|
||||
new_branch
|
||||
);
|
||||
new_branch,
|
||||
} => {
|
||||
if !client.fs.directories().contains(&repo_path) {
|
||||
return Err(TestError::Inapplicable);
|
||||
}
|
||||
|
||||
let dot_git_dir = repo_path.join(".git");
|
||||
if client.fs.metadata(&dot_git_dir).await?.is_none() {
|
||||
client.fs.create_dir(&dot_git_dir).await?;
|
||||
log::info!(
|
||||
"{}: writing git branch for repo {:?}: {:?}",
|
||||
client.username,
|
||||
repo_path,
|
||||
new_branch
|
||||
);
|
||||
|
||||
let dot_git_dir = repo_path.join(".git");
|
||||
if client.fs.metadata(&dot_git_dir).await?.is_none() {
|
||||
client.fs.create_dir(&dot_git_dir).await?;
|
||||
}
|
||||
client.fs.set_branch_name(&dot_git_dir, new_branch).await;
|
||||
}
|
||||
client.fs.set_branch_name(&dot_git_dir, new_branch).await;
|
||||
}
|
||||
GitOperation::WriteGitStatuses {
|
||||
repo_path,
|
||||
statuses,
|
||||
} => {
|
||||
if !client.fs.directories().contains(&repo_path) {
|
||||
return Err(TestError::Inapplicable);
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"{}: writing git statuses for repo {:?}: {:?}",
|
||||
client.username,
|
||||
repo_path,
|
||||
statuses
|
||||
);
|
||||
|
||||
let dot_git_dir = repo_path.join(".git");
|
||||
|
||||
let statuses = statuses
|
||||
.iter()
|
||||
.map(|(path, val)| (path.as_path(), val.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if client.fs.metadata(&dot_git_dir).await?.is_none() {
|
||||
client.fs.create_dir(&dot_git_dir).await?;
|
||||
}
|
||||
|
||||
client
|
||||
.fs
|
||||
.set_status_for_repo(&dot_git_dir, statuses.as_slice())
|
||||
.await;
|
||||
}
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1178,6 +1212,13 @@ enum ClientOperation {
|
|||
is_dir: bool,
|
||||
content: String,
|
||||
},
|
||||
GitOperation {
|
||||
operation: GitOperation,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
enum GitOperation {
|
||||
WriteGitIndex {
|
||||
repo_path: PathBuf,
|
||||
contents: Vec<(PathBuf, String)>,
|
||||
|
@ -1186,6 +1227,10 @@ enum ClientOperation {
|
|||
repo_path: PathBuf,
|
||||
new_branch: Option<String>,
|
||||
},
|
||||
WriteGitStatuses {
|
||||
repo_path: PathBuf,
|
||||
statuses: Vec<(PathBuf, GitFileStatus)>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
|
@ -1698,57 +1743,10 @@ impl TestPlan {
|
|||
}
|
||||
}
|
||||
|
||||
// Update a git index
|
||||
91..=93 => {
|
||||
let repo_path = client
|
||||
.fs
|
||||
.directories()
|
||||
.into_iter()
|
||||
.choose(&mut self.rng)
|
||||
.unwrap()
|
||||
.clone();
|
||||
|
||||
let mut file_paths = client
|
||||
.fs
|
||||
.files()
|
||||
.into_iter()
|
||||
.filter(|path| path.starts_with(&repo_path))
|
||||
.collect::<Vec<_>>();
|
||||
let count = self.rng.gen_range(0..=file_paths.len());
|
||||
file_paths.shuffle(&mut self.rng);
|
||||
file_paths.truncate(count);
|
||||
|
||||
let mut contents = Vec::new();
|
||||
for abs_child_file_path in &file_paths {
|
||||
let child_file_path = abs_child_file_path
|
||||
.strip_prefix(&repo_path)
|
||||
.unwrap()
|
||||
.to_path_buf();
|
||||
let new_base = Alphanumeric.sample_string(&mut self.rng, 16);
|
||||
contents.push((child_file_path, new_base));
|
||||
}
|
||||
|
||||
break ClientOperation::WriteGitIndex {
|
||||
repo_path,
|
||||
contents,
|
||||
};
|
||||
}
|
||||
|
||||
// Update a git branch
|
||||
94..=95 => {
|
||||
let repo_path = client
|
||||
.fs
|
||||
.directories()
|
||||
.choose(&mut self.rng)
|
||||
.unwrap()
|
||||
.clone();
|
||||
|
||||
let new_branch = (self.rng.gen_range(0..10) > 3)
|
||||
.then(|| Alphanumeric.sample_string(&mut self.rng, 8));
|
||||
|
||||
break ClientOperation::WriteGitBranch {
|
||||
repo_path,
|
||||
new_branch,
|
||||
// Update a git related action
|
||||
91..=95 => {
|
||||
break ClientOperation::GitOperation {
|
||||
operation: self.generate_git_operation(client),
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -1786,6 +1784,86 @@ impl TestPlan {
|
|||
})
|
||||
}
|
||||
|
||||
fn generate_git_operation(&mut self, client: &TestClient) -> GitOperation {
|
||||
fn generate_file_paths(
|
||||
repo_path: &Path,
|
||||
rng: &mut StdRng,
|
||||
client: &TestClient,
|
||||
) -> Vec<PathBuf> {
|
||||
let mut paths = client
|
||||
.fs
|
||||
.files()
|
||||
.into_iter()
|
||||
.filter(|path| path.starts_with(repo_path))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let count = rng.gen_range(0..=paths.len());
|
||||
paths.shuffle(rng);
|
||||
paths.truncate(count);
|
||||
|
||||
paths
|
||||
.iter()
|
||||
.map(|path| path.strip_prefix(repo_path).unwrap().to_path_buf())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
let repo_path = client
|
||||
.fs
|
||||
.directories()
|
||||
.choose(&mut self.rng)
|
||||
.unwrap()
|
||||
.clone();
|
||||
|
||||
match self.rng.gen_range(0..100_u32) {
|
||||
0..=25 => {
|
||||
let file_paths = generate_file_paths(&repo_path, &mut self.rng, client);
|
||||
|
||||
let contents = file_paths
|
||||
.into_iter()
|
||||
.map(|path| (path, Alphanumeric.sample_string(&mut self.rng, 16)))
|
||||
.collect();
|
||||
|
||||
GitOperation::WriteGitIndex {
|
||||
repo_path,
|
||||
contents,
|
||||
}
|
||||
}
|
||||
26..=63 => {
|
||||
let new_branch = (self.rng.gen_range(0..10) > 3)
|
||||
.then(|| Alphanumeric.sample_string(&mut self.rng, 8));
|
||||
|
||||
GitOperation::WriteGitBranch {
|
||||
repo_path,
|
||||
new_branch,
|
||||
}
|
||||
}
|
||||
64..=100 => {
|
||||
let file_paths = generate_file_paths(&repo_path, &mut self.rng, client);
|
||||
|
||||
let statuses = file_paths
|
||||
.into_iter()
|
||||
.map(|paths| {
|
||||
(
|
||||
paths,
|
||||
match self.rng.gen_range(0..3_u32) {
|
||||
0 => GitFileStatus::Added,
|
||||
1 => GitFileStatus::Modified,
|
||||
2 => GitFileStatus::Conflict,
|
||||
_ => unreachable!(),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
GitOperation::WriteGitStatuses {
|
||||
repo_path,
|
||||
statuses,
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn next_root_dir_name(&mut self, user_id: UserId) -> String {
|
||||
let user_ix = self
|
||||
.users
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue