WIP: fixing up behavior of workspace initialization
This commit is contained in:
parent
5505a776e6
commit
b9cbd4084e
5 changed files with 317 additions and 222 deletions
|
@ -1,5 +1,8 @@
|
||||||
use std::{fs::File, path::Path, thread::sleep, time::Duration};
|
use std::{fs::File, path::Path, thread::sleep, time::Duration};
|
||||||
|
|
||||||
|
use db::pane::SerializedDockPane;
|
||||||
|
use settings::DockAnchor;
|
||||||
|
|
||||||
const TEST_FILE: &'static str = "test-db.db";
|
const TEST_FILE: &'static str = "test-db.db";
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
|
@ -12,12 +15,17 @@ fn main() -> anyhow::Result<()> {
|
||||||
let f = File::create(file)?;
|
let f = File::create(file)?;
|
||||||
drop(f);
|
drop(f);
|
||||||
|
|
||||||
let workspace = db.make_new_workspace();
|
let workspace = db.make_new_workspace::<String>(&[]);
|
||||||
|
|
||||||
db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]);
|
db.update_worktrees(&workspace.workspace_id, &["/tmp"]);
|
||||||
|
|
||||||
db.save_pane_splits(center_pane_group);
|
db.save_dock_pane(SerializedDockPane {
|
||||||
db.save_dock_pane();
|
workspace: workspace.workspace_id,
|
||||||
|
anchor_position: DockAnchor::Expanded,
|
||||||
|
shown: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
let new_workspace = db.workspace_for_roots(&["/tmp"]);
|
||||||
|
|
||||||
db.write_to(file).ok();
|
db.write_to(file).ok();
|
||||||
|
|
||||||
|
|
|
@ -15,29 +15,29 @@ fn main() -> anyhow::Result<()> {
|
||||||
db.write_kvp("test", "1")?;
|
db.write_kvp("test", "1")?;
|
||||||
db.write_kvp("test-2", "2")?;
|
db.write_kvp("test-2", "2")?;
|
||||||
|
|
||||||
let workspace_1 = db.make_new_workspace();
|
let workspace_1 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_2 = db.make_new_workspace();
|
let workspace_2 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_3 = db.make_new_workspace();
|
let workspace_3 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_4 = db.make_new_workspace();
|
let workspace_4 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_5 = db.make_new_workspace();
|
let workspace_5 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_6 = db.make_new_workspace();
|
let workspace_6 = db.make_new_workspace::<String>(&[]);
|
||||||
let workspace_7 = db.make_new_workspace();
|
let workspace_7 = db.make_new_workspace::<String>(&[]);
|
||||||
|
|
||||||
// Order scrambled + sleeps added because sqlite only has 1 second resolution on
|
// Order scrambled + sleeps added because sqlite only has 1 second resolution on
|
||||||
// their timestamps
|
// their timestamps
|
||||||
db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]);
|
db.update_worktrees(&workspace_7.workspace_id, &["/tmp2"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]);
|
db.update_worktrees(&workspace_1.workspace_id, &["/tmp1"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]);
|
db.update_worktrees(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]);
|
db.update_worktrees(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]);
|
db.update_worktrees(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]);
|
db.update_worktrees(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]);
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]);
|
db.update_worktrees(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]);
|
||||||
|
|
||||||
db.write_to(file).ok();
|
db.write_to(file).ok();
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
mod items;
|
pub mod items;
|
||||||
mod kvp;
|
pub mod kvp;
|
||||||
mod migrations;
|
mod migrations;
|
||||||
mod pane;
|
pub mod pane;
|
||||||
mod workspace;
|
pub mod workspace;
|
||||||
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
|
@ -136,8 +136,11 @@ CREATE TABLE dock_items(
|
||||||
COMMIT;
|
COMMIT;
|
||||||
";
|
";
|
||||||
|
|
||||||
struct SerializedDockPane {
|
#[derive(Default, Debug)]
|
||||||
//Cols
|
pub struct SerializedDockPane {
|
||||||
|
pub workspace: WorkspaceId,
|
||||||
|
pub anchor_position: DockAnchor,
|
||||||
|
pub shown: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Db {
|
impl Db {
|
||||||
|
@ -194,7 +197,35 @@ impl Db {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_dock_pane() {}
|
pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option<SerializedDockPane> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save_dock_pane(&self, dock_pane: SerializedDockPane) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod tests {}
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use settings::DockAnchor;
|
||||||
|
|
||||||
|
use crate::Db;
|
||||||
|
|
||||||
|
use super::SerializedDockPane;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic_dock_pane() {
|
||||||
|
let db = Db::open_in_memory();
|
||||||
|
|
||||||
|
let workspace = db.make_new_workspace::<String>(&[]);
|
||||||
|
|
||||||
|
db.update_worktrees(&workspace.workspace_id, &["/tmp"]);
|
||||||
|
|
||||||
|
db.save_dock_pane(SerializedDockPane {
|
||||||
|
workspace: workspace.workspace_id,
|
||||||
|
anchor_position: DockAnchor::Expanded,
|
||||||
|
shown: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
let new_workspace = db.workspace_for_roots(&["/tmp"]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup};
|
use crate::pane::SerializedDockPane;
|
||||||
|
|
||||||
use super::Db;
|
use super::Db;
|
||||||
|
|
||||||
|
@ -41,16 +41,16 @@ pub struct WorkspaceId(i64);
|
||||||
pub struct SerializedWorkspace {
|
pub struct SerializedWorkspace {
|
||||||
pub workspace_id: WorkspaceId,
|
pub workspace_id: WorkspaceId,
|
||||||
// pub center_group: SerializedPaneGroup,
|
// pub center_group: SerializedPaneGroup,
|
||||||
// pub dock_pane: Option<SerializedPane>,
|
pub dock_pane: Option<SerializedDockPane>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Db {
|
impl Db {
|
||||||
/// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the
|
/// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the
|
||||||
/// the last workspace id
|
/// the last workspace id
|
||||||
pub fn workspace_for_worktree_roots(
|
pub fn workspace_for_roots<P>(&self, worktree_roots: &[P]) -> SerializedWorkspace
|
||||||
&self,
|
where
|
||||||
worktree_roots: &[Arc<Path>],
|
P: AsRef<Path> + Debug,
|
||||||
) -> SerializedWorkspace {
|
{
|
||||||
// Find the workspace id which is uniquely identified by this set of paths
|
// Find the workspace id which is uniquely identified by this set of paths
|
||||||
// return it if found
|
// return it if found
|
||||||
let mut workspace_id = self.workspace_id(worktree_roots);
|
let mut workspace_id = self.workspace_id(worktree_roots);
|
||||||
|
@ -59,31 +59,50 @@ impl Db {
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(workspace_id) = workspace_id {
|
if let Some(workspace_id) = workspace_id {
|
||||||
// TODO
|
|
||||||
// let workspace_row = self.get_workspace_row(workspace_id);
|
|
||||||
// let center_group = self.get_pane_group(workspace_row.center_group_id);
|
|
||||||
// let dock_pane = self.get_pane(workspace_row.dock_pane_id);
|
|
||||||
|
|
||||||
SerializedWorkspace {
|
SerializedWorkspace {
|
||||||
workspace_id,
|
workspace_id,
|
||||||
// center_group,
|
dock_pane: self.get_dock_pane(workspace_id),
|
||||||
// dock_pane: Some(dock_pane),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.make_new_workspace()
|
self.make_new_workspace(worktree_roots)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_new_workspace(&self) -> SerializedWorkspace {
|
pub fn make_new_workspace<P>(&self, worktree_roots: &[P]) -> SerializedWorkspace
|
||||||
|
where
|
||||||
|
P: AsRef<Path> + Debug,
|
||||||
|
{
|
||||||
|
fn logic<P>(
|
||||||
|
connection: &mut Connection,
|
||||||
|
worktree_roots: &[P],
|
||||||
|
) -> Result<SerializedWorkspace>
|
||||||
|
where
|
||||||
|
P: AsRef<Path> + Debug,
|
||||||
|
{
|
||||||
|
let tx = connection.transaction()?;
|
||||||
|
tx.execute("INSERT INTO workspaces DEFAULT VALUES", [])?;
|
||||||
|
|
||||||
|
let id = WorkspaceId(tx.last_insert_rowid());
|
||||||
|
|
||||||
|
update_worktree_roots(&tx, &id, worktree_roots)?;
|
||||||
|
|
||||||
|
Ok(SerializedWorkspace {
|
||||||
|
workspace_id: id,
|
||||||
|
dock_pane: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
self.real()
|
self.real()
|
||||||
.map(|db| {
|
.map(|db| {
|
||||||
let lock = db.connection.lock();
|
let mut lock = db.connection.lock();
|
||||||
|
|
||||||
// No need to waste the memory caching this, should happen rarely.
|
// No need to waste the memory caching this, should happen rarely.
|
||||||
match lock.execute("INSERT INTO workspaces DEFAULT VALUES", []) {
|
match logic(&mut lock, worktree_roots) {
|
||||||
Ok(_) => SerializedWorkspace {
|
Ok(serialized_workspace) => serialized_workspace,
|
||||||
workspace_id: WorkspaceId(lock.last_insert_rowid()),
|
Err(err) => {
|
||||||
},
|
log::error!("Failed to insert new workspace into DB: {}", err);
|
||||||
Err(_) => Default::default(),
|
Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
|
@ -97,7 +116,13 @@ impl Db {
|
||||||
.map(|db| {
|
.map(|db| {
|
||||||
let lock = db.connection.lock();
|
let lock = db.connection.lock();
|
||||||
|
|
||||||
get_workspace_id(worktree_roots, &lock)
|
match get_workspace_id(worktree_roots, &lock) {
|
||||||
|
Ok(workspace_id) => workspace_id,
|
||||||
|
Err(err) => {
|
||||||
|
log::error!("Failed ot get workspace_id: {}", err);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or(None)
|
.unwrap_or(None)
|
||||||
}
|
}
|
||||||
|
@ -109,61 +134,16 @@ impl Db {
|
||||||
/// Updates the open paths for the given workspace id. Will garbage collect items from
|
/// Updates the open paths for the given workspace id. Will garbage collect items from
|
||||||
/// any workspace ids which are no replaced by the new workspace id. Updates the timestamps
|
/// any workspace ids which are no replaced by the new workspace id. Updates the timestamps
|
||||||
/// in the workspace id table
|
/// in the workspace id table
|
||||||
pub fn update_worktree_roots<P>(&self, workspace_id: &WorkspaceId, worktree_roots: &[P])
|
pub fn update_worktrees<P>(&self, workspace_id: &WorkspaceId, worktree_roots: &[P])
|
||||||
where
|
where
|
||||||
P: AsRef<Path> + Debug,
|
P: AsRef<Path> + Debug,
|
||||||
{
|
{
|
||||||
fn logic<P>(
|
|
||||||
connection: &mut Connection,
|
|
||||||
worktree_roots: &[P],
|
|
||||||
workspace_id: &WorkspaceId,
|
|
||||||
) -> Result<()>
|
|
||||||
where
|
|
||||||
P: AsRef<Path> + Debug,
|
|
||||||
{
|
|
||||||
let tx = connection.transaction()?;
|
|
||||||
{
|
|
||||||
// Lookup any old WorkspaceIds which have the same set of roots, and delete them.
|
|
||||||
let preexisting_id = get_workspace_id(worktree_roots, &tx);
|
|
||||||
if let Some(preexisting_id) = preexisting_id {
|
|
||||||
if preexisting_id != *workspace_id {
|
|
||||||
// Should also delete fields in other tables with cascading updates
|
|
||||||
tx.execute(
|
|
||||||
"DELETE FROM workspaces WHERE workspace_id = ?",
|
|
||||||
[preexisting_id.0],
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.execute(
|
|
||||||
"DELETE FROM worktree_roots WHERE workspace_id = ?",
|
|
||||||
[workspace_id.0],
|
|
||||||
)?;
|
|
||||||
|
|
||||||
for root in worktree_roots {
|
|
||||||
let path = root.as_ref().as_os_str().as_bytes();
|
|
||||||
// If you need to debug this, here's the string parsing:
|
|
||||||
// let path = root.as_ref().to_string_lossy().to_string();
|
|
||||||
|
|
||||||
tx.execute(
|
|
||||||
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
|
|
||||||
params![workspace_id.0, path],
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.execute(
|
|
||||||
"UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?",
|
|
||||||
[workspace_id.0],
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
tx.commit()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
self.real().map(|db| {
|
self.real().map(|db| {
|
||||||
let mut lock = db.connection.lock();
|
let mut lock = db.connection.lock();
|
||||||
|
|
||||||
match logic(&mut lock, worktree_roots, workspace_id) {
|
let tx = lock.transaction();
|
||||||
|
|
||||||
|
match tx.map(|tx| update_worktree_roots(&tx, workspace_id, worktree_roots)) {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
dbg!(&err);
|
dbg!(&err);
|
||||||
|
@ -257,86 +237,130 @@ impl Db {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_workspace_id<P>(worktree_roots: &[P], connection: &Connection) -> Option<WorkspaceId>
|
fn update_worktree_roots<P>(
|
||||||
|
connection: &Connection,
|
||||||
|
workspace_id: &WorkspaceId,
|
||||||
|
worktree_roots: &[P],
|
||||||
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
P: AsRef<Path> + Debug,
|
P: AsRef<Path> + Debug,
|
||||||
{
|
{
|
||||||
fn logic<P>(
|
// Lookup any old WorkspaceIds which have the same set of roots, and delete them.
|
||||||
worktree_roots: &[P],
|
let preexisting_id = get_workspace_id(worktree_roots, &connection)?;
|
||||||
connection: &Connection,
|
if let Some(preexisting_id) = preexisting_id {
|
||||||
) -> Result<Option<WorkspaceId>, anyhow::Error>
|
if preexisting_id != *workspace_id {
|
||||||
where
|
// Should also delete fields in other tables with cascading updates
|
||||||
P: AsRef<Path> + Debug,
|
connection.execute(
|
||||||
{
|
"DELETE FROM workspaces WHERE workspace_id = ?",
|
||||||
// Short circuit if we can
|
[preexisting_id.0],
|
||||||
if worktree_roots.len() == 0 {
|
)?;
|
||||||
return Ok(None);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Prepare the array binding string. SQL doesn't have syntax for this, so
|
connection.execute(
|
||||||
// we have to do it ourselves.
|
"DELETE FROM worktree_roots WHERE workspace_id = ?",
|
||||||
let mut array_binding_stmt = "(".to_string();
|
[workspace_id.0],
|
||||||
for i in 0..worktree_roots.len() {
|
)?;
|
||||||
// This uses ?NNN for numbered placeholder syntax
|
|
||||||
array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based
|
for root in worktree_roots {
|
||||||
if i < worktree_roots.len() - 1 {
|
let path = root.as_ref().as_os_str().as_bytes();
|
||||||
array_binding_stmt.push(',');
|
// If you need to debug this, here's the string parsing:
|
||||||
array_binding_stmt.push(' ');
|
// let path = root.as_ref().to_string_lossy().to_string();
|
||||||
}
|
|
||||||
|
connection.execute(
|
||||||
|
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
|
||||||
|
params![workspace_id.0, path],
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
connection.execute(
|
||||||
|
"UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?",
|
||||||
|
[workspace_id.0],
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_workspace_id<P>(worktree_roots: &[P], connection: &Connection) -> Result<Option<WorkspaceId>>
|
||||||
|
where
|
||||||
|
P: AsRef<Path> + Debug,
|
||||||
|
{
|
||||||
|
// fn logic<P>(
|
||||||
|
// worktree_roots: &[P],
|
||||||
|
// connection: &Connection,
|
||||||
|
// ) -> Result<Option<WorkspaceId>, anyhow::Error>
|
||||||
|
// where
|
||||||
|
// P: AsRef<Path> + Debug,
|
||||||
|
// {
|
||||||
|
// Short circuit if we can
|
||||||
|
if worktree_roots.len() == 0 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare the array binding string. SQL doesn't have syntax for this, so
|
||||||
|
// we have to do it ourselves.
|
||||||
|
let mut array_binding_stmt = "(".to_string();
|
||||||
|
for i in 0..worktree_roots.len() {
|
||||||
|
// This uses ?NNN for numbered placeholder syntax
|
||||||
|
array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based
|
||||||
|
if i < worktree_roots.len() - 1 {
|
||||||
|
array_binding_stmt.push(',');
|
||||||
|
array_binding_stmt.push(' ');
|
||||||
}
|
}
|
||||||
array_binding_stmt.push(')');
|
}
|
||||||
// Any workspace can have multiple independent paths, and these paths
|
array_binding_stmt.push(')');
|
||||||
// can overlap in the database. Take this test data for example:
|
// Any workspace can have multiple independent paths, and these paths
|
||||||
//
|
// can overlap in the database. Take this test data for example:
|
||||||
// [/tmp, /tmp2] -> 1
|
//
|
||||||
// [/tmp] -> 2
|
// [/tmp, /tmp2] -> 1
|
||||||
// [/tmp2, /tmp3] -> 3
|
// [/tmp] -> 2
|
||||||
//
|
// [/tmp2, /tmp3] -> 3
|
||||||
// This would be stred in the database like so:
|
//
|
||||||
//
|
// This would be stred in the database like so:
|
||||||
// ID PATH
|
//
|
||||||
// 1 /tmp
|
// ID PATH
|
||||||
// 1 /tmp2
|
// 1 /tmp
|
||||||
// 2 /tmp
|
// 1 /tmp2
|
||||||
// 3 /tmp2
|
// 2 /tmp
|
||||||
// 3 /tmp3
|
// 3 /tmp2
|
||||||
//
|
// 3 /tmp3
|
||||||
// Note how both /tmp and /tmp2 are associated with multiple workspace IDs.
|
//
|
||||||
// So, given an array of worktree roots, how can we find the exactly matching ID?
|
// Note how both /tmp and /tmp2 are associated with multiple workspace IDs.
|
||||||
// Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
|
// So, given an array of worktree roots, how can we find the exactly matching ID?
|
||||||
// - We start with a join of this table on itself, generating every possible
|
// Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
|
||||||
// pair of ((path, ID), (path, ID)), and filtering the join down to just the
|
// - We start with a join of this table on itself, generating every possible
|
||||||
// *overlapping but non-matching* workspace IDs. For this small data set,
|
// pair of ((path, ID), (path, ID)), and filtering the join down to just the
|
||||||
// this would look like:
|
// *overlapping but non-matching* workspace IDs. For this small data set,
|
||||||
//
|
// this would look like:
|
||||||
// wt1.ID wt1.PATH | wt2.ID wt2.PATH
|
//
|
||||||
// 3 /tmp3 3 /tmp2
|
// wt1.ID wt1.PATH | wt2.ID wt2.PATH
|
||||||
//
|
// 3 /tmp3 3 /tmp2
|
||||||
// - Moving one SELECT out, we use the first pair's ID column to invert the selection,
|
//
|
||||||
// meaning we now have a list of all the entries for our array, minus overlapping sets,
|
// - Moving one SELECT out, we use the first pair's ID column to invert the selection,
|
||||||
// but including *subsets* of our worktree roots:
|
// meaning we now have a list of all the entries for our array, minus overlapping sets,
|
||||||
//
|
// but including *subsets* of our worktree roots:
|
||||||
// ID PATH
|
//
|
||||||
// 1 /tmp
|
// ID PATH
|
||||||
// 1 /tmp2
|
// 1 /tmp
|
||||||
// 2 /tmp
|
// 1 /tmp2
|
||||||
//
|
// 2 /tmp
|
||||||
// - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no
|
//
|
||||||
// duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of
|
// - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no
|
||||||
// our keys:
|
// duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of
|
||||||
//
|
// our keys:
|
||||||
// ID num_matching
|
//
|
||||||
// 1 2
|
// ID num_matching
|
||||||
// 2 1
|
// 1 2
|
||||||
//
|
// 2 1
|
||||||
// - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the
|
//
|
||||||
// matching ID correctly :D
|
// - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the
|
||||||
//
|
// matching ID correctly :D
|
||||||
// Note: due to limitations in SQLite's query binding, we have to generate the prepared
|
//
|
||||||
// statement with string substitution (the {array_bind}) below, and then bind the
|
// Note: due to limitations in SQLite's query binding, we have to generate the prepared
|
||||||
// parameters by number.
|
// statement with string substitution (the {array_bind}) below, and then bind the
|
||||||
let query = format!(
|
// parameters by number.
|
||||||
r#"
|
let query = format!(
|
||||||
|
r#"
|
||||||
SELECT workspace_id
|
SELECT workspace_id
|
||||||
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
|
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
|
||||||
WHERE worktree_root in {array_bind} AND workspace_id NOT IN
|
WHERE worktree_root in {array_bind} AND workspace_id NOT IN
|
||||||
|
@ -347,50 +371,50 @@ where
|
||||||
GROUP BY workspace_id)
|
GROUP BY workspace_id)
|
||||||
WHERE num_matching = ?
|
WHERE num_matching = ?
|
||||||
"#,
|
"#,
|
||||||
array_bind = array_binding_stmt
|
array_bind = array_binding_stmt
|
||||||
);
|
);
|
||||||
|
|
||||||
// This will only be called on start up and when root workspaces change, no need to waste memory
|
// This will only be called on start up and when root workspaces change, no need to waste memory
|
||||||
// caching it.
|
// caching it.
|
||||||
let mut stmt = connection.prepare(&query)?;
|
let mut stmt = connection.prepare(&query)?;
|
||||||
// Make sure we bound the parameters correctly
|
// Make sure we bound the parameters correctly
|
||||||
debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count());
|
debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count());
|
||||||
|
|
||||||
for i in 0..worktree_roots.len() {
|
for i in 0..worktree_roots.len() {
|
||||||
let path = &worktree_roots[i].as_ref().as_os_str().as_bytes();
|
let path = &worktree_roots[i].as_ref().as_os_str().as_bytes();
|
||||||
// If you need to debug this, here's the string parsing:
|
// If you need to debug this, here's the string parsing:
|
||||||
// let path = &worktree_roots[i].as_ref().to_string_lossy().to_string()
|
// let path = &worktree_roots[i].as_ref().to_string_lossy().to_string()
|
||||||
stmt.raw_bind_parameter(i + 1, path)?
|
stmt.raw_bind_parameter(i + 1, path)?
|
||||||
}
|
|
||||||
// No -1, because SQLite is 1 based
|
|
||||||
stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?;
|
|
||||||
|
|
||||||
let mut rows = stmt.raw_query();
|
|
||||||
let row = rows.next();
|
|
||||||
let result = if let Ok(Some(row)) = row {
|
|
||||||
Ok(Some(WorkspaceId(row.get(0)?)))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case
|
|
||||||
// but this is here to catch if someone refactors that constraint out.
|
|
||||||
debug_assert!(matches!(rows.next(), Ok(None)));
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
// No -1, because SQLite is 1 based
|
||||||
|
stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?;
|
||||||
|
|
||||||
match logic(worktree_roots, connection) {
|
let mut rows = stmt.raw_query();
|
||||||
Ok(result) => result,
|
let row = rows.next();
|
||||||
Err(err) => {
|
let result = if let Ok(Some(row)) = row {
|
||||||
log::error!(
|
Ok(Some(WorkspaceId(row.get(0)?)))
|
||||||
"Failed to get the workspace ID for paths {:?}, err: {}",
|
} else {
|
||||||
worktree_roots,
|
Ok(None)
|
||||||
err
|
};
|
||||||
);
|
|
||||||
None
|
// Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case
|
||||||
}
|
// but this is here to catch if someone refactors that constraint out.
|
||||||
}
|
debug_assert!(matches!(rows.next(), Ok(None)));
|
||||||
|
|
||||||
|
result
|
||||||
|
// }
|
||||||
|
|
||||||
|
// match logic(worktree_roots, connection) {
|
||||||
|
// Ok(result) => result,
|
||||||
|
// Err(err) => {
|
||||||
|
// log::error!(
|
||||||
|
// "Failed to get the workspace ID for paths {:?}, err: {}",
|
||||||
|
// worktree_roots,
|
||||||
|
// err
|
||||||
|
// );
|
||||||
|
// None
|
||||||
|
// }
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -407,20 +431,52 @@ mod tests {
|
||||||
|
|
||||||
use super::WorkspaceId;
|
use super::WorkspaceId;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_worktree_for_roots() {
|
||||||
|
let db = Db::open_in_memory();
|
||||||
|
|
||||||
|
// Test creation in 0 case
|
||||||
|
let workspace_1 = db.workspace_for_roots::<String>(&[]);
|
||||||
|
assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
|
||||||
|
|
||||||
|
// Test pulling from recent workspaces
|
||||||
|
let workspace_1 = db.workspace_for_roots::<String>(&[]);
|
||||||
|
assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
|
||||||
|
|
||||||
|
sleep(Duration::from_secs(1));
|
||||||
|
db.make_new_workspace::<String>(&[]);
|
||||||
|
|
||||||
|
// Test pulling another value from recent workspaces
|
||||||
|
let workspace_2 = db.workspace_for_roots::<String>(&[]);
|
||||||
|
assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
|
||||||
|
|
||||||
|
// Test creating a new workspace that doesn't exist already
|
||||||
|
let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
|
||||||
|
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
|
||||||
|
|
||||||
|
// Make sure it's in the recent workspaces....
|
||||||
|
let workspace_3 = db.workspace_for_roots::<String>(&[]);
|
||||||
|
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
|
||||||
|
|
||||||
|
// And that it can be pulled out again
|
||||||
|
let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
|
||||||
|
assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_worktrees() {
|
fn test_empty_worktrees() {
|
||||||
let db = Db::open_in_memory();
|
let db = Db::open_in_memory();
|
||||||
|
|
||||||
assert_eq!(None, db.workspace_id::<String>(&[]));
|
assert_eq!(None, db.workspace_id::<String>(&[]));
|
||||||
|
|
||||||
db.make_new_workspace(); //ID 1
|
db.make_new_workspace::<String>(&[]); //ID 1
|
||||||
db.make_new_workspace(); //ID 2
|
db.make_new_workspace::<String>(&[]); //ID 2
|
||||||
db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]);
|
db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
|
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
|
||||||
|
|
||||||
db.update_worktree_roots::<String>(&WorkspaceId(1), &[]);
|
db.update_worktrees::<String>(&WorkspaceId(1), &[]);
|
||||||
|
|
||||||
// Make sure 'no worktrees' fails correctly. returning [1, 2] from this
|
// Make sure 'no worktrees' fails correctly. returning [1, 2] from this
|
||||||
// call would be semantically correct (as those are the workspaces that
|
// call would be semantically correct (as those are the workspaces that
|
||||||
|
@ -451,8 +507,8 @@ mod tests {
|
||||||
let db = Db::open_in_memory();
|
let db = Db::open_in_memory();
|
||||||
|
|
||||||
for (workspace_id, entries) in data {
|
for (workspace_id, entries) in data {
|
||||||
db.make_new_workspace();
|
db.make_new_workspace::<String>(&[]);
|
||||||
db.update_worktree_roots(workspace_id, entries);
|
db.update_worktrees(workspace_id, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]));
|
assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]));
|
||||||
|
@ -485,8 +541,8 @@ mod tests {
|
||||||
let db = Db::open_in_memory();
|
let db = Db::open_in_memory();
|
||||||
|
|
||||||
for (workspace_id, entries) in data {
|
for (workspace_id, entries) in data {
|
||||||
db.make_new_workspace();
|
db.make_new_workspace::<String>(&[]);
|
||||||
db.update_worktree_roots(workspace_id, entries);
|
db.update_worktrees(workspace_id, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(db.workspace_id(&["/tmp2"]), None);
|
assert_eq!(db.workspace_id(&["/tmp2"]), None);
|
||||||
|
@ -527,15 +583,15 @@ mod tests {
|
||||||
|
|
||||||
// Load in the test data
|
// Load in the test data
|
||||||
for (workspace_id, entries) in data {
|
for (workspace_id, entries) in data {
|
||||||
db.make_new_workspace();
|
db.make_new_workspace::<String>(&[]);
|
||||||
db.update_worktree_roots(workspace_id, entries);
|
db.update_worktrees(workspace_id, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the timestamp updates
|
// Make sure the timestamp updates
|
||||||
sleep(Duration::from_secs(1));
|
sleep(Duration::from_secs(1));
|
||||||
|
|
||||||
// Execute the update
|
// Execute the update
|
||||||
db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
|
db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
|
||||||
|
|
||||||
// Make sure that workspace 3 doesn't exist
|
// Make sure that workspace 3 doesn't exist
|
||||||
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2)));
|
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2)));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue