Figured out a good schema for the pane serialization stuff
This commit is contained in:
parent
46ff0885f0
commit
5505a776e6
9 changed files with 156 additions and 65 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -1560,6 +1560,7 @@ dependencies = [
|
||||||
"rusqlite_migration",
|
"rusqlite_migration",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_rusqlite",
|
"serde_rusqlite",
|
||||||
|
"settings",
|
||||||
"tempdir",
|
"tempdir",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ test-support = []
|
||||||
[dependencies]
|
[dependencies]
|
||||||
collections = { path = "../collections" }
|
collections = { path = "../collections" }
|
||||||
gpui = { path = "../gpui" }
|
gpui = { path = "../gpui" }
|
||||||
|
settings = { path = "../settings" }
|
||||||
anyhow = "1.0.57"
|
anyhow = "1.0.57"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
|
|
27
crates/db/examples/serialize-pane.rs
Normal file
27
crates/db/examples/serialize-pane.rs
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
use std::{fs::File, path::Path, thread::sleep, time::Duration};
|
||||||
|
|
||||||
|
const TEST_FILE: &'static str = "test-db.db";
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let db = db::Db::open_in_memory();
|
||||||
|
if db.real().is_none() {
|
||||||
|
return Err(anyhow::anyhow!("Migrations failed"));
|
||||||
|
}
|
||||||
|
let file = Path::new(TEST_FILE);
|
||||||
|
|
||||||
|
let f = File::create(file)?;
|
||||||
|
drop(f);
|
||||||
|
|
||||||
|
let workspace = db.make_new_workspace();
|
||||||
|
|
||||||
|
db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]);
|
||||||
|
|
||||||
|
db.save_pane_splits(center_pane_group);
|
||||||
|
db.save_dock_pane();
|
||||||
|
|
||||||
|
db.write_to(file).ok();
|
||||||
|
|
||||||
|
println!("Wrote database!");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -15,13 +15,13 @@ fn main() -> anyhow::Result<()> {
|
||||||
db.write_kvp("test", "1")?;
|
db.write_kvp("test", "1")?;
|
||||||
db.write_kvp("test-2", "2")?;
|
db.write_kvp("test-2", "2")?;
|
||||||
|
|
||||||
let workspace_1 = db.workspace_for_worktree_roots(&[]);
|
let workspace_1 = db.make_new_workspace();
|
||||||
let workspace_2 = db.workspace_for_worktree_roots(&[]);
|
let workspace_2 = db.make_new_workspace();
|
||||||
let workspace_3 = db.workspace_for_worktree_roots(&[]);
|
let workspace_3 = db.make_new_workspace();
|
||||||
let workspace_4 = db.workspace_for_worktree_roots(&[]);
|
let workspace_4 = db.make_new_workspace();
|
||||||
let workspace_5 = db.workspace_for_worktree_roots(&[]);
|
let workspace_5 = db.make_new_workspace();
|
||||||
let workspace_6 = db.workspace_for_worktree_roots(&[]);
|
let workspace_6 = db.make_new_workspace();
|
||||||
let workspace_7 = db.workspace_for_worktree_roots(&[]);
|
let workspace_7 = db.make_new_workspace();
|
||||||
|
|
||||||
// Order scrambled + sleeps added because sqlite only has 1 second resolution on
|
// Order scrambled + sleeps added because sqlite only has 1 second resolution on
|
||||||
// their timestamps
|
// their timestamps
|
||||||
|
|
|
@ -46,15 +46,8 @@ use super::Db;
|
||||||
// Items
|
// Items
|
||||||
// Sidebars
|
// Sidebars
|
||||||
|
|
||||||
|
// Things I'm doing: finding about nullability for foreign keys
|
||||||
pub(crate) const ITEMS_M_1: &str = "
|
pub(crate) const ITEMS_M_1: &str = "
|
||||||
CREATE TABLE items(
|
|
||||||
workspace_id INTEGER,
|
|
||||||
item_id INTEGER,
|
|
||||||
kind TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (workspace_id, item_id)
|
|
||||||
FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id)
|
|
||||||
) STRICT;
|
|
||||||
|
|
||||||
CREATE TABLE project_searches(
|
CREATE TABLE project_searches(
|
||||||
workspace_id INTEGER,
|
workspace_id INTEGER,
|
||||||
item_id INTEGER,
|
item_id INTEGER,
|
||||||
|
|
|
@ -4,10 +4,14 @@ use rusqlite::OptionalExtension;
|
||||||
use super::Db;
|
use super::Db;
|
||||||
|
|
||||||
pub(crate) const KVP_M_1: &str = "
|
pub(crate) const KVP_M_1: &str = "
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
CREATE TABLE kv_store(
|
CREATE TABLE kv_store(
|
||||||
key TEXT PRIMARY KEY,
|
key TEXT PRIMARY KEY,
|
||||||
value TEXT NOT NULL
|
value TEXT NOT NULL
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
";
|
";
|
||||||
|
|
||||||
impl Db {
|
impl Db {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use rusqlite_migration::{Migrations, M};
|
use rusqlite_migration::{Migrations, M};
|
||||||
|
|
||||||
// use crate::items::ITEMS_M_1;
|
// use crate::items::ITEMS_M_1;
|
||||||
use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
|
use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1};
|
||||||
|
|
||||||
// This must be ordered by development time! Only ever add new migrations to the end!!
|
// This must be ordered by development time! Only ever add new migrations to the end!!
|
||||||
// Bad things will probably happen if you don't monotonically edit this vec!!!!
|
// Bad things will probably happen if you don't monotonically edit this vec!!!!
|
||||||
|
@ -10,6 +10,7 @@ use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
|
pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
|
||||||
M::up(KVP_M_1),
|
M::up(KVP_M_1),
|
||||||
M::up(WORKSPACE_M_1)
|
M::up(WORKSPACE_M_1),
|
||||||
|
M::up(PANE_M_1)
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,41 +1,23 @@
|
||||||
use gpui::Axis;
|
use gpui::Axis;
|
||||||
|
use settings::DockAnchor;
|
||||||
|
|
||||||
use crate::{items::ItemId, workspace::WorkspaceId};
|
use crate::{items::ItemId, workspace::WorkspaceId};
|
||||||
|
|
||||||
use super::Db;
|
use super::Db;
|
||||||
|
|
||||||
pub(crate) const PANE_M_1: &str = "
|
// We have an many-branched, unbalanced tree with three types:
|
||||||
CREATE TABLE pane_groups(
|
// Pane Groups
|
||||||
workspace_id INTEGER,
|
// Panes
|
||||||
group_id INTEGER,
|
// Items
|
||||||
axis STRING NOT NULL, -- 'Vertical' / 'Horizontal'
|
|
||||||
PRIMARY KEY (workspace_id, group_id)
|
|
||||||
) STRICT;
|
|
||||||
|
|
||||||
CREATE TABLE pane_group_children(
|
// The root is always a Pane Group
|
||||||
workspace_id INTEGER,
|
// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children
|
||||||
group_id INTEGER,
|
// Panes can have 0 or more items as children
|
||||||
child_pane_id INTEGER, -- Nullable
|
// Panes can be their own root
|
||||||
child_group_id INTEGER, -- Nullable
|
// Items cannot have children
|
||||||
index INTEGER,
|
// References pointing down is hard (SQL doesn't like arrays)
|
||||||
PRIMARY KEY (workspace_id, group_id)
|
// References pointing up is easy (1-1 item / parent relationship) but is harder to query
|
||||||
) STRICT;
|
//
|
||||||
|
|
||||||
CREATE TABLE pane_items(
|
|
||||||
workspace_id INTEGER,
|
|
||||||
pane_id INTEGER,
|
|
||||||
item_id INTEGER, -- Array
|
|
||||||
index INTEGER,
|
|
||||||
KEY (workspace_id, pane_id)
|
|
||||||
) STRICT;
|
|
||||||
|
|
||||||
ALTER TABLE WORKSPACE
|
|
||||||
ADD THESE COLS:
|
|
||||||
center_group INTEGER NOT NULL,
|
|
||||||
dock_pane INTEGER NOT NULL,
|
|
||||||
-- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
|
|
||||||
-- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
|
|
||||||
";
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||||
pub struct PaneId {
|
pub struct PaneId {
|
||||||
|
@ -93,6 +75,71 @@ pub struct SerializedPane {
|
||||||
children: Vec<ItemId>,
|
children: Vec<ItemId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) const PANE_M_1: &str = "
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TABLE dock_panes(
|
||||||
|
dock_pane_id INTEGER PRIMARY KEY,
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded'
|
||||||
|
shown INTEGER NOT NULL, -- Boolean
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE pane_groups(
|
||||||
|
group_id INTEGER PRIMARY KEY,
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
parent_group INTEGER, -- NULL indicates that this is a root node
|
||||||
|
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE grouped_panes(
|
||||||
|
pane_id INTEGER PRIMARY KEY,
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
group_id INTEGER NOT NULL,
|
||||||
|
idx INTEGER NOT NULL,
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE items(
|
||||||
|
item_id INTEGER PRIMARY KEY,
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
kind TEXT NOT NULL,
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE group_items(
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
pane_id INTEGER NOT NULL,
|
||||||
|
item_id INTEGER NOT NULL,
|
||||||
|
idx INTEGER NOT NULL,
|
||||||
|
PRIMARY KEY (workspace_id, pane_id, item_id)
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE dock_items(
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
dock_pane_id INTEGER NOT NULL,
|
||||||
|
item_id INTEGER NOT NULL,
|
||||||
|
idx INTEGER NOT NULL,
|
||||||
|
PRIMARY KEY (workspace_id, dock_pane_id, item_id)
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
";
|
||||||
|
|
||||||
|
struct SerializedDockPane {
|
||||||
|
//Cols
|
||||||
|
}
|
||||||
|
|
||||||
impl Db {
|
impl Db {
|
||||||
pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup {
|
pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup {
|
||||||
let axis = self.get_pane_group_axis(pane_group_id);
|
let axis = self.get_pane_group_axis(pane_group_id);
|
||||||
|
@ -147,5 +194,7 @@ impl Db {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_pane(&self, pane: SerializedPane) {}
|
fn save_dock_pane() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod tests {}
|
||||||
|
|
|
@ -13,10 +13,15 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup};
|
||||||
|
|
||||||
use super::Db;
|
use super::Db;
|
||||||
|
|
||||||
|
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
|
||||||
|
// you might want to update some of the parsing code as well, I've left the variations in but commented
|
||||||
|
// out
|
||||||
pub(crate) const WORKSPACE_M_1: &str = "
|
pub(crate) const WORKSPACE_M_1: &str = "
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
CREATE TABLE workspaces(
|
CREATE TABLE workspaces(
|
||||||
workspace_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
workspace_id INTEGER PRIMARY KEY,
|
||||||
timestamp TEXT DEFAULT CURRENT_TIMESTAMP
|
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
CREATE TABLE worktree_roots(
|
CREATE TABLE worktree_roots(
|
||||||
|
@ -25,16 +30,13 @@ CREATE TABLE worktree_roots(
|
||||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||||
PRIMARY KEY(worktree_root, workspace_id)
|
PRIMARY KEY(worktree_root, workspace_id)
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
";
|
";
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
|
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
|
||||||
pub struct WorkspaceId(i64);
|
pub struct WorkspaceId(i64);
|
||||||
|
|
||||||
struct WorkspaceRow {
|
|
||||||
pub center_group_id: PaneGroupId,
|
|
||||||
pub dock_pane_id: PaneId,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct SerializedWorkspace {
|
pub struct SerializedWorkspace {
|
||||||
pub workspace_id: WorkspaceId,
|
pub workspace_id: WorkspaceId,
|
||||||
|
@ -72,7 +74,7 @@ impl Db {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_new_workspace(&self) -> SerializedWorkspace {
|
pub fn make_new_workspace(&self) -> SerializedWorkspace {
|
||||||
self.real()
|
self.real()
|
||||||
.map(|db| {
|
.map(|db| {
|
||||||
let lock = db.connection.lock();
|
let lock = db.connection.lock();
|
||||||
|
@ -140,6 +142,8 @@ impl Db {
|
||||||
|
|
||||||
for root in worktree_roots {
|
for root in worktree_roots {
|
||||||
let path = root.as_ref().as_os_str().as_bytes();
|
let path = root.as_ref().as_os_str().as_bytes();
|
||||||
|
// If you need to debug this, here's the string parsing:
|
||||||
|
// let path = root.as_ref().to_string_lossy().to_string();
|
||||||
|
|
||||||
tx.execute(
|
tx.execute(
|
||||||
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
|
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
|
||||||
|
@ -162,6 +166,7 @@ impl Db {
|
||||||
match logic(&mut lock, worktree_roots, workspace_id) {
|
match logic(&mut lock, worktree_roots, workspace_id) {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
dbg!(&err);
|
||||||
log::error!(
|
log::error!(
|
||||||
"Failed to update the worktree roots for {:?}, roots: {:?}, error: {}",
|
"Failed to update the worktree roots for {:?}, roots: {:?}, error: {}",
|
||||||
workspace_id,
|
workspace_id,
|
||||||
|
@ -222,6 +227,9 @@ impl Db {
|
||||||
.query_map([workspace_id.0], |row| {
|
.query_map([workspace_id.0], |row| {
|
||||||
let row = row.get::<_, Vec<u8>>(0)?;
|
let row = row.get::<_, Vec<u8>>(0)?;
|
||||||
Ok(PathBuf::from(OsStr::from_bytes(&row)).into())
|
Ok(PathBuf::from(OsStr::from_bytes(&row)).into())
|
||||||
|
// If you need to debug this, here's the string parsing:
|
||||||
|
// let row = row.get::<_, String>(0)?;
|
||||||
|
// Ok(PathBuf::from(row).into())
|
||||||
})?
|
})?
|
||||||
.collect::<Result<Vec<_>, rusqlite::Error>>()?;
|
.collect::<Result<Vec<_>, rusqlite::Error>>()?;
|
||||||
result.push((workspace_id, roots))
|
result.push((workspace_id, roots))
|
||||||
|
@ -260,6 +268,7 @@ where
|
||||||
where
|
where
|
||||||
P: AsRef<Path> + Debug,
|
P: AsRef<Path> + Debug,
|
||||||
{
|
{
|
||||||
|
// Short circuit if we can
|
||||||
if worktree_roots.len() == 0 {
|
if worktree_roots.len() == 0 {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
@ -297,7 +306,7 @@ where
|
||||||
// Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
|
// Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
|
||||||
// - We start with a join of this table on itself, generating every possible
|
// - We start with a join of this table on itself, generating every possible
|
||||||
// pair of ((path, ID), (path, ID)), and filtering the join down to just the
|
// pair of ((path, ID), (path, ID)), and filtering the join down to just the
|
||||||
// *overlapping but incorrect* workspace IDs. For this small data set,
|
// *overlapping but non-matching* workspace IDs. For this small data set,
|
||||||
// this would look like:
|
// this would look like:
|
||||||
//
|
//
|
||||||
// wt1.ID wt1.PATH | wt2.ID wt2.PATH
|
// wt1.ID wt1.PATH | wt2.ID wt2.PATH
|
||||||
|
@ -349,6 +358,8 @@ where
|
||||||
|
|
||||||
for i in 0..worktree_roots.len() {
|
for i in 0..worktree_roots.len() {
|
||||||
let path = &worktree_roots[i].as_ref().as_os_str().as_bytes();
|
let path = &worktree_roots[i].as_ref().as_os_str().as_bytes();
|
||||||
|
// If you need to debug this, here's the string parsing:
|
||||||
|
// let path = &worktree_roots[i].as_ref().to_string_lossy().to_string()
|
||||||
stmt.raw_bind_parameter(i + 1, path)?
|
stmt.raw_bind_parameter(i + 1, path)?
|
||||||
}
|
}
|
||||||
// No -1, because SQLite is 1 based
|
// No -1, because SQLite is 1 based
|
||||||
|
@ -402,22 +413,26 @@ mod tests {
|
||||||
|
|
||||||
assert_eq!(None, db.workspace_id::<String>(&[]));
|
assert_eq!(None, db.workspace_id::<String>(&[]));
|
||||||
|
|
||||||
db.make_new_workspace();
|
db.make_new_workspace(); //ID 1
|
||||||
|
db.make_new_workspace(); //ID 2
|
||||||
db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]);
|
db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]);
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"]));
|
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
|
||||||
|
|
||||||
db.update_worktree_roots::<String>(&WorkspaceId(1), &[]);
|
db.update_worktree_roots::<String>(&WorkspaceId(1), &[]);
|
||||||
|
|
||||||
// Make sure DB doesn't consider 'no worktrees' to be a query it can answer
|
// Make sure 'no worktrees' fails correctly. returning [1, 2] from this
|
||||||
assert_eq!(None, db.workspace_id::<String>(&[]));
|
// call would be semantically correct (as those are the workspaces that
|
||||||
|
// don't have roots) but I'd prefer that this API to either return exactly one
|
||||||
|
// workspace, and None otherwise
|
||||||
|
assert_eq!(db.workspace_id::<String>(&[]), None,);
|
||||||
|
|
||||||
assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id());
|
assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1)));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&(WorkspaceId(1), vec![]),
|
db.recent_workspaces(2),
|
||||||
db.recent_workspaces(1).get(0).unwrap()
|
vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),],
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue