Refined sqlez, implemented 60% of workspace serialization sql

This commit is contained in:
Mikayla Maki 2022-11-04 13:22:35 -07:00
parent 6b214acbc4
commit 0186289420
11 changed files with 569 additions and 433 deletions

View file

@ -1,5 +1,4 @@
pub mod kvp;
mod migrations;
pub mod workspace;
use std::fs;
@ -11,8 +10,9 @@ use indoc::indoc;
use kvp::KVP_MIGRATION;
use sqlez::connection::Connection;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use workspace::items::ITEM_MIGRATIONS;
use workspace::pane::PANE_MIGRATIONS;
pub use workspace::*;
#[derive(Clone)]
@ -35,32 +35,21 @@ impl Db {
.expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
Db(
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"})
.with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]),
)
}
pub fn persisting(&self) -> bool {
self.persistent()
Db(initialize_connection(ThreadSafeConnection::new(
db_path.to_string_lossy().as_ref(),
true,
)))
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory(db_name: &str) -> Self {
Db(ThreadSafeConnection::new(db_name, false)
.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"})
.with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]))
Db(initialize_connection(ThreadSafeConnection::new(
db_name, false,
)))
}
pub fn persisting(&self) -> bool {
self.persistent()
}
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
@ -68,3 +57,18 @@ impl Db {
self.backup_main(&destination)
}
}
fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection {
conn.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"})
.with_migrations(&[
KVP_MIGRATION,
WORKSPACES_MIGRATION,
PANE_MIGRATIONS,
ITEM_MIGRATIONS,
])
}

View file

@ -1,14 +0,0 @@
// // use crate::items::ITEMS_M_1;
// use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACES_MIGRATION};
// // This must be ordered by development time! Only ever add new migrations to the end!!
// // Bad things will probably happen if you don't monotonically edit this vec!!!!
// // And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's
// // file system and so everything we do here is locked in _f_o_r_e_v_e_r_.
// lazy_static::lazy_static! {
// pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
// M::up(KVP_M_1),
// M::up(WORKSPACE_M_1),
// M::up(PANE_M_1)
// ]);
// }

View file

@ -1,4 +1,4 @@
mod items;
pub(crate) mod items;
pub mod model;
pub(crate) mod pane;
@ -58,8 +58,14 @@ impl Db {
.flatten()?;
Some(SerializedWorkspace {
dock_pane: self.get_dock_pane(&workspace_id)?,
center_group: self.get_center_group(&workspace_id),
dock_pane: self
.get_dock_pane(&workspace_id)
.context("Getting dock pane")
.log_err()?,
center_group: self
.get_center_group(&workspace_id)
.context("Getting center group")
.log_err()?,
dock_anchor,
dock_visible,
})
@ -70,231 +76,152 @@ impl Db {
pub fn save_workspace<P: AsRef<Path>>(
&self,
worktree_roots: &[P],
workspace: SerializedWorkspace,
old_roots: Option<&[P]>,
workspace: &SerializedWorkspace,
) {
let workspace_id: WorkspaceId = worktree_roots.into();
self.with_savepoint("update_worktrees", |conn| {
self.with_savepoint("update_worktrees", || {
if let Some(old_roots) = old_roots {
let old_id: WorkspaceId = old_roots.into();
self.prepare("DELETE FROM WORKSPACES WHERE workspace_id = ?")?
.with_bindings(&old_id)?
.exec()?;
}
// Delete any previous workspaces with the same roots. This cascades to all
// other tables that are based on the same roots set.
// Insert new workspace into workspaces table if none were found
self.prepare(indoc!{"
DELETE FROM workspaces WHERE workspace_id = ?1;
INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})?
self.prepare("DELETE FROM workspaces WHERE workspace_id = ?;")?
.with_bindings(&workspace_id)?
.exec()?;
self.prepare(
"INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)",
)?
.with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?
.exec()?;
// Save center pane group and dock pane
Self::save_center_group(&workspace_id, &workspace.center_group, conn)?;
Self::save_dock_pane(&workspace_id, &workspace.dock_pane, conn)?;
self.save_center_group(&workspace_id, &workspace.center_group)?;
self.save_dock_pane(&workspace_id, &workspace.dock_pane)?;
Ok(())
})
.with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::<Vec<_>>()))
.with_context(|| {
format!(
"Update workspace with roots {:?}",
worktree_roots
.iter()
.map(|p| p.as_ref())
.collect::<Vec<_>>()
)
})
.log_err();
}
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
pub fn recent_workspaces(&self, limit: usize) -> Vec<Vec<PathBuf>> {
iife!({
Ok::<_, anyhow::Error>(self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
// TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html
Ok::<_, anyhow::Error>(
self.prepare(
"SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?",
)?
.with_bindings(limit)?
.rows::<WorkspaceId>()?
.into_iter().map(|id| id.0)
.collect::<Vec<Vec<PathBuf>>>())
}).log_err().unwrap_or_default()
.into_iter()
.map(|id| id.paths())
.collect::<Vec<Vec<PathBuf>>>(),
)
})
.log_err()
.unwrap_or_default()
}
}
#[cfg(test)]
mod tests {
use crate::{
model::{
DockAnchor::{Bottom, Expanded, Right},
SerializedWorkspace,
},
Db,
};
// use std::{path::PathBuf, thread::sleep, time::Duration};
#[test]
fn test_basic_functionality() {
env_logger::init();
// use crate::Db;
let db = Db::open_in_memory("test_basic_functionality");
// use super::WorkspaceId;
let workspace_1 = SerializedWorkspace {
dock_anchor: Bottom,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
// #[test]
// fn test_workspace_saving() {
// env_logger::init();
// let db = Db::open_in_memory("test_new_worktrees_for_roots");
let workspace_2 = SerializedWorkspace {
dock_anchor: Expanded,
dock_visible: false,
center_group: Default::default(),
dock_pane: Default::default(),
};
// // Test nothing returned with no roots at first
// assert_eq!(db.workspace_for_roots::<String>(&[]), None);
let workspace_3 = SerializedWorkspace {
dock_anchor: Right,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
// // Test creation
// let workspace_1 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1);
db.save_workspace(&["/tmp"], None, &workspace_2);
// // Ensure the timestamps are different
// sleep(Duration::from_secs(1));
// db.make_new_workspace::<String>(&[]);
db.write_to("test.db").unwrap();
// // Test pulling another value from recent workspaces
// let workspace_2 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
// Test that paths are treated as a set
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_1
);
assert_eq!(
db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
workspace_1
);
// // Ensure the timestamps are different
// sleep(Duration::from_secs(1));
// Make sure that other keys work
assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
// // Test creating a new workspace that doesn't exist already
// let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// Test 'mutate' case of updating a pre-existing id
db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2);
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_2
);
// // Make sure it's in the recent workspaces....
// let workspace_3 = db.workspace_for_roots::<String>(&[]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// Test other mechanism for mutating
db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3);
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_3
);
// // And that it can be pulled out again
// let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
// assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
// }
// #[test]
// fn test_empty_worktrees() {
// let db = Db::open_in_memory("test_empty_worktrees");
// assert_eq!(None, db.workspace::<String>(&[]));
// db.make_new_workspace::<String>(&[]); //ID 1
// db.make_new_workspace::<String>(&[]); //ID 2
// db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
// // Sanity check
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1));
// db.update_worktrees::<String>(&WorkspaceId(1), &[]);
// // Make sure 'no worktrees' fails correctly. returning [1, 2] from this
// // call would be semantically correct (as those are the workspaces that
// // don't have roots) but I'd prefer that this API to either return exactly one
// // workspace, and None otherwise
// assert_eq!(db.workspace::<String>(&[]), None,);
// assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1));
// assert_eq!(
// db.recent_workspaces(2),
// vec![Vec::<PathBuf>::new(), Vec::<PathBuf>::new()],
// )
// }
// #[test]
// fn test_more_workspace_ids() {
// let data = &[
// (WorkspaceId(1), vec!["/tmp1"]),
// (WorkspaceId(2), vec!["/tmp1", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]),
// (WorkspaceId(4), vec!["/tmp2", "/tmp3"]),
// (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]),
// (WorkspaceId(6), vec!["/tmp2", "/tmp4"]),
// (WorkspaceId(7), vec!["/tmp2"]),
// ];
// let db = Db::open_in_memory("test_more_workspace_ids");
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
// assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0);
// assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2));
// assert_eq!(
// db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0,
// WorkspaceId(3)
// );
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4));
// assert_eq!(
// db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0,
// WorkspaceId(5)
// );
// assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6));
// assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7));
// assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None);
// assert_eq!(db.workspace(&["/tmp5"]), None);
// assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
// }
// #[test]
// fn test_detect_workspace_id() {
// let data = &[
// (WorkspaceId(1), vec!["/tmp"]),
// (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]),
// ];
// let db = Db::open_in_memory("test_detect_workspace_id");
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
// assert_eq!(db.workspace(&["/tmp2"]), None);
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None);
// assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2));
// assert_eq!(
// db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0,
// WorkspaceId(3)
// );
// }
// #[test]
// fn test_tricky_overlapping_updates() {
// // DB state:
// // (/tree) -> ID: 1
// // (/tree, /tree2) -> ID: 2
// // (/tree2, /tree3) -> ID: 3
// // -> User updates 2 to: (/tree2, /tree3)
// // DB state:
// // (/tree) -> ID: 1
// // (/tree2, /tree3) -> ID: 2
// // Get rid of 3 for garbage collection
// let data = &[
// (WorkspaceId(1), vec!["/tmp"]),
// (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
// (WorkspaceId(3), vec!["/tmp2", "/tmp3"]),
// ];
// let db = Db::open_in_memory("test_tricky_overlapping_update");
// // Load in the test data
// for (workspace_id, entries) in data {
// db.make_new_workspace::<String>(&[]);
// db.update_worktrees(workspace_id, entries);
// }
// sleep(Duration::from_secs(1));
// // Execute the update
// db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
// // Make sure that workspace 3 doesn't exist
// assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2));
// // And that workspace 1 was untouched
// assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
// // And that workspace 2 is no longer registered under these roots
// assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None);
// assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2));
// let recent_workspaces = db.recent_workspaces(10);
// assert_eq!(
// recent_workspaces.get(0).unwrap(),
// &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")]
// );
// assert_eq!(
// recent_workspaces.get(1).unwrap(),
// &vec![PathBuf::from("/tmp")]
// );
// }
// Make sure that updating paths differently also works
db.save_workspace(
&["/tmp3", "/tmp4", "/tmp2"],
Some(&["/tmp", "/tmp2"]),
&workspace_3,
);
assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
assert_eq!(
db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
.unwrap(),
workspace_3
);
}
}

View file

@ -1,13 +1,11 @@
// use std::{
// ffi::OsStr,
// fmt::Display,
// hash::Hash,
// os::unix::prelude::OsStrExt,
// path::{Path, PathBuf},
// sync::Arc,
// };
use anyhow::{Context, Result};
use indoc::indoc;
use sqlez::migrations::Migration;
// use anyhow::Result;
use crate::{
model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId},
Db,
};
// use collections::HashSet;
// use rusqlite::{named_params, params, types::FromSql};
@ -65,45 +63,61 @@
// ) STRICT;
// ";
// enum SerializedItemKind {
// Editor,
// Diagnostics,
// ProjectSearch,
// Terminal,
// }
pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new(
"item",
&[indoc! {"
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
workspace_id BLOB NOT NULL,
pane_id INTEGER NOT NULL,
kind TEXT NOT NULL,
position INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
// struct SerializedItemRow {
// kind: SerializedItemKind,
// item_id: usize,
// path: Option<Arc<Path>>,
// query: Option<String>,
// }
impl Db {
pub(crate) fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(self
.prepare(indoc! {"
SELECT item_id, kind FROM items
WHERE pane_id = ?
ORDER BY position"})?
.with_bindings(pane_id)?
.rows::<(ItemId, SerializedItemKind)>()?
.into_iter()
.map(|(item_id, kind)| match kind {
SerializedItemKind::Terminal => SerializedItem::Terminal { item_id },
_ => unimplemented!(),
})
.collect())
}
// #[derive(Debug, PartialEq, Eq)]
// pub enum SerializedItem {
// Editor { item_id: usize, path: Arc<Path> },
// Diagnostics { item_id: usize },
// ProjectSearch { item_id: usize, query: String },
// Terminal { item_id: usize },
// }
pub(crate) fn save_items(
&self,
workspace_id: &WorkspaceId,
pane_id: PaneId,
items: &[SerializedItem],
) -> Result<()> {
let mut delete_old = self
.prepare("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?")
.context("Preparing deletion")?;
let mut insert_new = self.prepare(
"INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)",
).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() {
delete_old
.with_bindings((workspace_id, pane_id, item.item_id()))?
.exec()?;
// impl SerializedItem {
// pub fn item_id(&self) -> usize {
// match self {
// SerializedItem::Editor { item_id, .. } => *item_id,
// SerializedItem::Diagnostics { item_id } => *item_id,
// SerializedItem::ProjectSearch { item_id, .. } => *item_id,
// SerializedItem::Terminal { item_id } => *item_id,
// }
// }
// }
insert_new
.with_bindings((item.item_id(), workspace_id, pane_id, item.kind(), position))?
.exec()?;
}
// impl Db {
// pub fn get_item(&self, item_id: ItemId) -> SerializedItem {
// unimplemented!()
// }
// pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {}
// pub fn close_item(&self, item_id: ItemId) {}
// }
Ok(())
}
}

View file

@ -1,4 +1,7 @@
use std::path::{Path, PathBuf};
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::{bail, Result};
@ -8,8 +11,14 @@ use sqlez::{
statement::Statement,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) struct WorkspaceId(pub(crate) Vec<PathBuf>);
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct WorkspaceId(Vec<PathBuf>);
impl WorkspaceId {
pub fn paths(self) -> Vec<PathBuf> {
self.0
}
}
impl<P: AsRef<Path>, T: IntoIterator<Item = P>> From<T> for WorkspaceId {
fn from(iterator: T) -> Self {
@ -74,7 +83,7 @@ impl Column for DockAnchor {
pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool);
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq)]
pub struct SerializedWorkspace {
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
@ -82,19 +91,134 @@ pub struct SerializedWorkspace {
pub dock_pane: SerializedPane,
}
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq, Eq, Default)]
pub struct SerializedPaneGroup {
axis: Axis,
children: Vec<SerializedPaneGroup>,
}
#[derive(Debug)]
pub struct SerializedPane {
_children: Vec<SerializedItem>,
impl SerializedPaneGroup {
pub fn new() -> Self {
SerializedPaneGroup {
axis: Axis::Horizontal,
children: Vec::new(),
}
}
}
#[derive(Debug)]
pub enum SerializedItemKind {}
#[derive(Debug, PartialEq, Eq, Default)]
pub struct SerializedPane {
pub(crate) children: Vec<SerializedItem>,
}
#[derive(Debug)]
pub enum SerializedItem {}
impl SerializedPane {
pub fn new(children: Vec<SerializedItem>) -> Self {
SerializedPane { children }
}
}
pub type GroupId = i64;
pub type PaneId = i64;
pub type ItemId = usize;
pub(crate) enum SerializedItemKind {
Editor,
Diagnostics,
ProjectSearch,
Terminal,
}
impl Bind for SerializedItemKind {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
SerializedItemKind::Editor => "Editor",
SerializedItemKind::Diagnostics => "Diagnostics",
SerializedItemKind::ProjectSearch => "ProjectSearch",
SerializedItemKind::Terminal => "Terminal",
}
.bind(statement, start_index)
}
}
impl Column for SerializedItemKind {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Editor" => SerializedItemKind::Editor,
"Diagnostics" => SerializedItemKind::Diagnostics,
"ProjectSearch" => SerializedItemKind::ProjectSearch,
"Terminal" => SerializedItemKind::Terminal,
_ => bail!("Stored serialized item kind is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum SerializedItem {
Editor { item_id: usize, path: Arc<Path> },
Diagnostics { item_id: usize },
ProjectSearch { item_id: usize, query: String },
Terminal { item_id: usize },
}
impl SerializedItem {
pub fn item_id(&self) -> usize {
match self {
SerializedItem::Editor { item_id, .. } => *item_id,
SerializedItem::Diagnostics { item_id } => *item_id,
SerializedItem::ProjectSearch { item_id, .. } => *item_id,
SerializedItem::Terminal { item_id } => *item_id,
}
}
pub(crate) fn kind(&self) -> SerializedItemKind {
match self {
SerializedItem::Editor { .. } => SerializedItemKind::Editor,
SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics,
SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch,
SerializedItem::Terminal { .. } => SerializedItemKind::Terminal,
}
}
}
#[cfg(test)]
mod tests {
use sqlez::connection::Connection;
use crate::model::DockAnchor;
use super::WorkspaceId;
#[test]
fn test_workspace_round_trips() {
let db = Connection::open_memory("workspace_id_round_trips");
db.exec(indoc::indoc! {"
CREATE TABLE workspace_id_test(
workspace_id BLOB,
dock_anchor TEXT
);"})
.unwrap();
let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]);
db.prepare("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)")
.unwrap()
.with_bindings((&workspace_id, DockAnchor::Bottom))
.unwrap()
.exec()
.unwrap();
assert_eq!(
db.prepare("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1")
.unwrap()
.row::<(WorkspaceId, DockAnchor)>()
.unwrap(),
(WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)
);
}
}

View file

@ -1,8 +1,9 @@
use anyhow::Result;
use anyhow::{Context, Result};
use indoc::indoc;
use sqlez::{connection::Connection, migrations::Migration};
use sqlez::migrations::Migration;
use util::unzip_option;
use crate::model::SerializedPane;
use crate::model::{GroupId, PaneId, SerializedPane};
use super::{
model::{SerializedPaneGroup, WorkspaceId},
@ -19,79 +20,31 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
PRIMARY KEY(group_id, workspace_id)
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
group_id INTEGER, -- If null, this is a dock pane
idx INTEGER NOT NULL,
position INTEGER, -- If null, this is a dock pane
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
PRIMARY KEY(pane_id, workspace_id)
) STRICT;
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
pane_id INTEGER NOT NULL,
workspace_id BLOB NOT NULL,
kind TEXT NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Db {
pub(crate) fn get_center_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup {
unimplemented!()
pub(crate) fn get_center_group(
&self,
_workspace_id: &WorkspaceId,
) -> Result<SerializedPaneGroup> {
Ok(SerializedPaneGroup::new())
}
pub(crate) fn _get_pane_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup {
unimplemented!()
// let axis = self.get_pane_group_axis(pane_group_id);
// let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
// for child_row in self.get_pane_group_children(pane_group_id) {
// if let Some(child_pane_id) = child_row.child_pane_id {
// children.push((
// child_row.index,
// PaneGroupChild::Pane(self.get_pane(PaneId {
// workspace_id: pane_group_id.workspace_id,
// pane_id: child_pane_id,
// })),
// ));
// } else if let Some(child_group_id) = child_row.child_group_id {
// children.push((
// child_row.index,
// PaneGroupChild::Group(self.get_pane_group(PaneGroupId {
// workspace_id: pane_group_id.workspace_id,
// group_id: child_group_id,
// })),
// ));
// }
// }
// children.sort_by_key(|(index, _)| *index);
// SerializedPaneGroup {
// group_id: pane_group_id,
// axis,
// children: children.into_iter().map(|(_, child)| child).collect(),
// }
}
// fn _get_pane_group_children(
// &self,
// _pane_group_id: PaneGroupId,
// ) -> impl Iterator<Item = PaneGroupChildRow> {
// Vec::new().into_iter()
// }
pub(crate) fn save_center_group(
_workspace: &WorkspaceId,
&self,
_workspace_id: &WorkspaceId,
_center_pane_group: &SerializedPaneGroup,
_connection: &Connection,
) -> Result<()> {
// Delete the center pane group for this workspace and any of its children
// Generate new pane group IDs as we go through
@ -99,51 +52,86 @@ impl Db {
Ok(())
}
pub(crate) fn get_dock_pane(&self, _workspace: &WorkspaceId) -> Option<SerializedPane> {
unimplemented!()
pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result<SerializedPane> {
let pane_id = self
.prepare(indoc! {"
SELECT pane_id FROM panes
WHERE workspace_id = ? AND group_id IS NULL AND position IS NULL"})?
.with_bindings(workspace_id)?
.row::<PaneId>()?;
Ok(SerializedPane::new(
self.get_items(pane_id).context("Reading items")?,
))
}
pub(crate) fn save_dock_pane(
_workspace: &WorkspaceId,
_dock_pane: &SerializedPane,
_connection: &Connection,
&self,
workspace: &WorkspaceId,
dock_pane: &SerializedPane,
) -> Result<()> {
// iife!({
// self.prepare(
// "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);",
// )?
// .with_bindings(dock_pane.to_row(workspace))?
// .insert()
// })
// .log_err();
Ok(())
self.save_pane(workspace, &dock_pane, None)
}
pub(crate) fn save_pane(
&self,
workspace_id: &WorkspaceId,
pane: &SerializedPane,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
let (parent_id, order) = unzip_option(parent);
let pane_id = self
.prepare("INSERT INTO panes(workspace_id, group_id, position) VALUES (?, ?, ?)")?
.with_bindings((workspace_id, parent_id, order))?
.insert()? as PaneId;
self.save_items(workspace_id, pane_id, &pane.children)
.context("Saving items")
}
}
#[cfg(test)]
mod tests {
// use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor};
use crate::{
model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace},
Db,
};
// use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup};
fn default_workspace(
dock_pane: SerializedPane,
center_group: SerializedPaneGroup,
) -> SerializedWorkspace {
SerializedWorkspace {
dock_anchor: crate::model::DockAnchor::Right,
dock_visible: false,
center_group,
dock_pane,
}
}
// #[test]
// fn test_basic_dock_pane() {
// let db = Db::open_in_memory("basic_dock_pane");
#[test]
fn test_basic_dock_pane() {
let db = Db::open_in_memory("basic_dock_pane");
// let workspace = db.workspace_for_roots(&["/tmp"]);
let dock_pane = crate::model::SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 2 },
SerializedItem::Terminal { item_id: 3 },
],
};
// let dock_pane = SerializedDockPane {
// anchor_position: DockAnchor::Expanded,
// visible: true,
// };
let workspace = default_workspace(dock_pane, SerializedPaneGroup::new());
// db.save_dock_pane(&workspace.workspace_id, &dock_pane);
db.save_workspace(&["/tmp"], None, &workspace);
// let new_workspace = db.workspace_for_roots(&["/tmp"]);
let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
// assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane);
// }
assert_eq!(workspace.dock_pane, new_workspace.dock_pane);
}
// #[test]
// fn test_dock_simple_split() {