Moved docks to a better position

This commit is contained in:
Mikayla Maki 2022-11-02 15:20:47 -07:00
parent 685bc9fed3
commit 19aac6a57f
4 changed files with 226 additions and 154 deletions

View file

@ -1,6 +1,6 @@
use std::{fs::File, path::Path};
use db::pane::{DockAnchor, SerializedDockPane};
use db::{pane::SerializedDockPane, DockAnchor};
const TEST_FILE: &'static str = "test-db.db";

View file

@ -67,8 +67,7 @@
#[derive(Debug, PartialEq, Eq)]
pub struct ItemId {
workspace_id: usize,
item_id: usize,
pub item_id: usize,
}
// enum SerializedItemKind {

View file

@ -1,4 +1,3 @@
use anyhow::bail;
use gpui::Axis;
use indoc::indoc;
use sqlez::{
@ -8,7 +7,7 @@ use sqlez::{
};
use util::{iife, ResultExt};
use crate::{items::ItemId, workspace::WorkspaceId};
use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor};
use super::Db;
@ -33,14 +32,15 @@ CREATE TABLE panes(
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE dock_panes(
pane_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded'
visible INTEGER NOT NULL, -- Boolean
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
) STRICT;
-- MOVE TO WORKSPACE TABLE
// CREATE TABLE dock_panes(
// pane_id INTEGER PRIMARY KEY,
// workspace_id INTEGER NOT NULL,
// anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded'
// visible INTEGER NOT NULL, -- Boolean
// FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
// FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
// ) STRICT;
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
@ -77,36 +77,34 @@ pub struct PaneId {
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PaneGroupId {
workspace_id: WorkspaceId,
group_id: usize,
}
impl PaneGroupId {
pub fn root(workspace_id: WorkspaceId) -> Self {
Self {
workspace_id,
group_id: 0,
// group_id: 0,
}
}
}
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq, Eq, Default)]
pub struct SerializedPaneGroup {
group_id: PaneGroupId,
axis: Axis,
children: Vec<PaneGroupChild>,
}
impl SerializedPaneGroup {
pub fn empty_root(workspace_id: WorkspaceId) -> Self {
pub fn empty_root(_workspace_id: WorkspaceId) -> Self {
Self {
group_id: PaneGroupId::root(workspace_id),
// group_id: PaneGroupId::root(workspace_id),
axis: Default::default(),
children: Default::default(),
}
}
}
struct PaneGroupChildRow {
struct _PaneGroupChildRow {
child_pane_id: Option<usize>,
child_group_id: Option<usize>,
index: usize,
@ -120,47 +118,11 @@ pub enum PaneGroupChild {
#[derive(Debug, PartialEq, Eq)]
pub struct SerializedPane {
pane_id: PaneId,
children: Vec<ItemId>,
items: Vec<ItemId>,
}
//********* CURRENTLY IN USE TYPES: *********
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
Right,
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub struct SerializedDockPane {
pub anchor_position: DockAnchor,
@ -227,56 +189,64 @@ impl Column for DockRow {
}
impl Db {
pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup {
let axis = self.get_pane_group_axis(pane_group_id);
let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
for child_row in self.get_pane_group_children(pane_group_id) {
if let Some(child_pane_id) = child_row.child_pane_id {
children.push((
child_row.index,
PaneGroupChild::Pane(self.get_pane(PaneId {
workspace_id: pane_group_id.workspace_id,
pane_id: child_pane_id,
})),
));
} else if let Some(child_group_id) = child_row.child_group_id {
children.push((
child_row.index,
PaneGroupChild::Group(self.get_pane_group(PaneGroupId {
workspace_id: pane_group_id.workspace_id,
group_id: child_group_id,
})),
));
}
}
children.sort_by_key(|(index, _)| *index);
SerializedPaneGroup {
group_id: pane_group_id,
axis,
children: children.into_iter().map(|(_, child)| child).collect(),
}
pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup {
unimplemented!()
}
fn get_pane_group_children(
pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup {
unimplemented!()
// let axis = self.get_pane_group_axis(pane_group_id);
// let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
// for child_row in self.get_pane_group_children(pane_group_id) {
// if let Some(child_pane_id) = child_row.child_pane_id {
// children.push((
// child_row.index,
// PaneGroupChild::Pane(self.get_pane(PaneId {
// workspace_id: pane_group_id.workspace_id,
// pane_id: child_pane_id,
// })),
// ));
// } else if let Some(child_group_id) = child_row.child_group_id {
// children.push((
// child_row.index,
// PaneGroupChild::Group(self.get_pane_group(PaneGroupId {
// workspace_id: pane_group_id.workspace_id,
// group_id: child_group_id,
// })),
// ));
// }
// }
// children.sort_by_key(|(index, _)| *index);
// SerializedPaneGroup {
// group_id: pane_group_id,
// axis,
// children: children.into_iter().map(|(_, child)| child).collect(),
// }
}
fn _get_pane_group_children(
&self,
_pane_group_id: PaneGroupId,
) -> impl Iterator<Item = PaneGroupChildRow> {
) -> impl Iterator<Item = _PaneGroupChildRow> {
Vec::new().into_iter()
}
fn get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis {
fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis {
unimplemented!();
}
pub fn save_pane_splits(&self, _center_pane_group: SerializedPaneGroup) {
pub fn save_pane_splits(
&self,
_workspace: &WorkspaceId,
_center_pane_group: &SerializedPaneGroup,
) {
// Delete the center pane group for this workspace and any of its children
// Generate new pane group IDs as we go through
// insert them
// Items garbage collect themselves when dropped
}
pub(crate) fn get_pane(&self, _pane_id: PaneId) -> SerializedPane {
pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane {
unimplemented!();
}
@ -305,9 +275,9 @@ impl Db {
#[cfg(test)]
mod tests {
use crate::{pane::SerializedPane, Db};
use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor};
use super::{DockAnchor, SerializedDockPane};
use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup};
#[test]
fn test_basic_dock_pane() {
@ -333,18 +303,18 @@ mod tests {
let workspace = db.workspace_for_roots(&["/tmp"]);
let center_pane = SerializedPane {
pane_id: crate::pane::PaneId {
workspace_id: workspace.workspace_id,
pane_id: 1,
},
children: vec![],
// Pane group -> Pane -> 10 , 20
let center_pane = SerializedPaneGroup {
axis: gpui::Axis::Horizontal,
children: vec![PaneGroupChild::Pane(SerializedPane {
items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }],
})],
};
db.save_dock_pane(&workspace.workspace_id, &dock_pane);
db.save_pane_splits(&workspace.workspace_id, &center_pane);
let new_workspace = db.workspace_for_roots(&["/tmp"]);
assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane);
assert_eq!(new_workspace.center_group, center_pane);
}
}

View file

@ -1,4 +1,4 @@
use anyhow::Result;
use anyhow::{bail, Result};
use std::{
ffi::OsStr,
@ -16,7 +16,7 @@ use sqlez::{
statement::Statement,
};
use crate::pane::SerializedDockPane;
use crate::pane::{SerializedDockPane, SerializedPaneGroup};
use super::Db;
@ -28,7 +28,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
&[indoc! {"
CREATE TABLE workspaces(
workspace_id INTEGER PRIMARY KEY,
center_pane_group INTEGER NOT NULL,
dock_anchor TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded'
dock_visible INTEGER NOT NULL, -- Boolean
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
FOREIGN KEY(center_pane_group) REFERENCES pane_groups(group_id)
) STRICT;
CREATE TABLE worktree_roots(
@ -54,10 +58,71 @@ impl Column for WorkspaceId {
}
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
Right,
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq)]
struct WorkspaceRow {
pub workspace_id: WorkspaceId,
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
}
impl Column for WorkspaceRow {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
<(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map(
|((id, anchor, visible), next_index)| {
(
WorkspaceRow {
workspace_id: id,
dock_anchor: anchor,
dock_visible: visible,
},
next_index,
)
},
)
}
}
#[derive(Default, Debug)]
pub struct SerializedWorkspace {
pub workspace_id: WorkspaceId,
// pub center_group: SerializedPaneGroup,
pub center_group: SerializedPaneGroup,
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
pub dock_pane: Option<SerializedDockPane>,
}
@ -70,15 +135,18 @@ impl Db {
{
// Find the workspace id which is uniquely identified by this set of paths
// return it if found
let mut workspace_id = self.workspace_id(worktree_roots);
if workspace_id.is_none() && worktree_roots.len() == 0 {
workspace_id = self.last_workspace_id();
let mut workspace_row = self.workspace(worktree_roots);
if workspace_row.is_none() && worktree_roots.len() == 0 {
workspace_row = self.last_workspace_id();
}
if let Some(workspace_id) = workspace_id {
if let Some(workspace_row) = workspace_row {
SerializedWorkspace {
workspace_id,
dock_pane: self.get_dock_pane(workspace_id),
dock_pane: self.get_dock_pane(workspace_row.workspace_id),
center_group: self.get_center_group(workspace_row.workspace_id),
workspace_id: workspace_row.workspace_id,
dock_anchor: workspace_row.dock_anchor,
dock_visible: workspace_row.dock_visible,
}
} else {
self.make_new_workspace(worktree_roots)
@ -99,7 +167,7 @@ impl Db {
Ok(SerializedWorkspace {
workspace_id,
dock_pane: None,
..Default::default()
})
});
@ -112,11 +180,11 @@ impl Db {
}
}
fn workspace_id<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceId>
fn workspace<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceRow>
where
P: AsRef<Path> + Debug,
{
match get_workspace_id(worktree_roots, &self) {
match get_workspace(worktree_roots, &self) {
Ok(workspace_id) => workspace_id,
Err(err) => {
log::error!("Failed to get workspace_id: {}", err);
@ -149,11 +217,10 @@ impl Db {
}
}
fn last_workspace_id(&self) -> Option<WorkspaceId> {
fn last_workspace_id(&self) -> Option<WorkspaceRow> {
let res = self
.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")
.and_then(|mut stmt| stmt.maybe_row())
.map(|row| row.map(|id| WorkspaceId(id)));
.prepare("SELECT workspace_id, dock FROM workspaces ORDER BY timestamp DESC LIMIT 1")
.and_then(|mut stmt| stmt.maybe_row::<WorkspaceRow>());
match res {
Ok(result) => result,
@ -206,13 +273,13 @@ where
P: AsRef<Path> + Debug,
{
// Lookup any old WorkspaceIds which have the same set of roots, and delete them.
let preexisting_id = get_workspace_id(worktree_roots, &connection)?;
if let Some(preexisting_id) = preexisting_id {
if preexisting_id != *workspace_id {
let preexisting_workspace = get_workspace(worktree_roots, &connection)?;
if let Some(preexisting_workspace) = preexisting_workspace {
if preexisting_workspace.workspace_id != *workspace_id {
// Should also delete fields in other tables with cascading updates
connection
.prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
.with_bindings(preexisting_id.0)?
.with_bindings(preexisting_workspace.workspace_id.0)?
.exec()?;
}
}
@ -241,7 +308,7 @@ where
Ok(())
}
fn get_workspace_id<P>(worktree_roots: &[P], connection: &Connection) -> Result<Option<WorkspaceId>>
fn get_workspace<P>(worktree_roots: &[P], connection: &Connection) -> Result<Option<WorkspaceRow>>
where
P: AsRef<Path> + Debug,
{
@ -315,7 +382,7 @@ where
// parameters by number.
let query = format!(
r#"
SELECT workspace_id
SELECT workspace_id, dock_anchor, dock_visible
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
WHERE worktree_root in {array_bind} AND workspace_id NOT IN
(SELECT wt1.workspace_id FROM worktree_roots as wt1
@ -331,6 +398,7 @@ where
// This will only be called on start up and when root workspaces change, no need to waste memory
// caching it.
let mut stmt = connection.prepare(&query)?;
// Make sure we bound the parameters correctly
debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count());
@ -339,11 +407,10 @@ where
.map(|root| root.as_ref().as_os_str().as_bytes())
.collect();
let len = root_bytes.len();
let num_of_roots = root_bytes.len();
stmt.with_bindings((root_bytes, len))?
.maybe_row()
.map(|row| row.map(|id| WorkspaceId(id)))
stmt.with_bindings((root_bytes, num_of_roots))?
.maybe_row::<WorkspaceRow>()
}
#[cfg(test)]
@ -401,14 +468,17 @@ mod tests {
fn test_empty_worktrees() {
let db = Db::open_in_memory("test_empty_worktrees");
assert_eq!(None, db.workspace_id::<String>(&[]));
assert_eq!(None, db.workspace::<String>(&[]));
db.make_new_workspace::<String>(&[]); //ID 1
db.make_new_workspace::<String>(&[]); //ID 2
db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
// Sanity check
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
assert_eq!(
db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id,
WorkspaceId(1)
);
db.update_worktrees::<String>(&WorkspaceId(1), &[]);
@ -416,9 +486,9 @@ mod tests {
// call would be semantically correct (as those are the workspaces that
// don't have roots) but I'd prefer that this API to either return exactly one
// workspace, and None otherwise
assert_eq!(db.workspace_id::<String>(&[]), None,);
assert_eq!(db.workspace::<String>(&[]), None,);
assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1)));
assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(1));
assert_eq!(
db.recent_workspaces(2),
@ -445,23 +515,42 @@ mod tests {
db.update_worktrees(workspace_id, entries);
}
assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]));
assert_eq!(db.workspace_id(&["/tmp1", "/tmp2"]), Some(WorkspaceId(2)));
assert_eq!(
db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]),
Some(WorkspaceId(3))
WorkspaceId(1),
db.workspace(&["/tmp1"]).unwrap().workspace_id
);
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(4)));
assert_eq!(
db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]),
Some(WorkspaceId(5))
db.workspace(&["/tmp1", "/tmp2"]).unwrap().workspace_id,
WorkspaceId(2)
);
assert_eq!(
db.workspace(&["/tmp1", "/tmp2", "/tmp3"])
.unwrap()
.workspace_id,
WorkspaceId(3)
);
assert_eq!(
db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id,
WorkspaceId(4)
);
assert_eq!(
db.workspace(&["/tmp2", "/tmp3", "/tmp4"])
.unwrap()
.workspace_id,
WorkspaceId(5)
);
assert_eq!(
db.workspace(&["/tmp2", "/tmp4"]).unwrap().workspace_id,
WorkspaceId(6)
);
assert_eq!(
db.workspace(&["/tmp2"]).unwrap().workspace_id,
WorkspaceId(7)
);
assert_eq!(db.workspace_id(&["/tmp2", "/tmp4"]), Some(WorkspaceId(6)));
assert_eq!(db.workspace_id(&["/tmp2"]), Some(WorkspaceId(7)));
assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]), None);
assert_eq!(db.workspace_id(&["/tmp5"]), None);
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None);
assert_eq!(db.workspace(&["/tmp5"]), None);
assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
}
#[test]
@ -479,13 +568,21 @@ mod tests {
db.update_worktrees(workspace_id, entries);
}
assert_eq!(db.workspace_id(&["/tmp2"]), None);
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), None);
assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1)));
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(2)));
assert_eq!(db.workspace(&["/tmp2"]), None);
assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None);
assert_eq!(
db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]),
Some(WorkspaceId(3))
db.workspace(&["/tmp"]).unwrap().workspace_id,
WorkspaceId(1)
);
assert_eq!(
db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id,
WorkspaceId(2)
);
assert_eq!(
db.workspace(&["/tmp", "/tmp2", "/tmp3"])
.unwrap()
.workspace_id,
WorkspaceId(3)
);
}
@ -526,15 +623,21 @@ mod tests {
db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
// Make sure that workspace 3 doesn't exist
assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2)));
assert_eq!(
db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id,
WorkspaceId(2)
);
// And that workspace 1 was untouched
assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1)));
assert_eq!(
db.workspace(&["/tmp"]).unwrap().workspace_id,
WorkspaceId(1)
);
// And that workspace 2 is no longer registered under these roots
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), None);
assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None);
assert_eq!(Some(WorkspaceId(2)), db.last_workspace_id());
assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(2));
let recent_workspaces = db.recent_workspaces(10);
assert_eq!(