replace worktree roots table with serialized worktree roots list
This commit is contained in:
parent
eb0598dac2
commit
d419f27d75
5 changed files with 97 additions and 271 deletions
2
Cargo.lock
generated
2
Cargo.lock
generated
|
@ -1550,6 +1550,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"bincode",
|
||||
"collections",
|
||||
"env_logger",
|
||||
"gpui",
|
||||
|
@ -1557,6 +1558,7 @@ dependencies = [
|
|||
"lazy_static",
|
||||
"log",
|
||||
"parking_lot 0.11.2",
|
||||
"serde",
|
||||
"sqlez",
|
||||
"tempdir",
|
||||
"util",
|
||||
|
|
|
@ -21,6 +21,8 @@ async-trait = "0.1"
|
|||
lazy_static = "1.4.0"
|
||||
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
||||
parking_lot = "0.11.1"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
bincode = "1.2.1"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
|
@ -3,12 +3,12 @@ pub mod model;
|
|||
pub(crate) mod pane;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use util::ResultExt;
|
||||
use util::{iife, ResultExt};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use indoc::{formatdoc, indoc};
|
||||
use sqlez::{connection::Connection, migrations::Migration};
|
||||
use indoc::indoc;
|
||||
use sqlez::migrations::Migration;
|
||||
|
||||
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
|
||||
// you might want to update some of the parsing code as well, I've left the variations in but commented
|
||||
|
@ -17,18 +17,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
|
|||
"workspace",
|
||||
&[indoc! {"
|
||||
CREATE TABLE workspaces(
|
||||
workspace_id INTEGER PRIMARY KEY,
|
||||
workspace_id BLOB PRIMARY KEY,
|
||||
dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded'
|
||||
dock_visible INTEGER, -- Boolean
|
||||
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
) STRICT;
|
||||
|
||||
CREATE TABLE worktree_roots(
|
||||
worktree_root BLOB NOT NULL,
|
||||
workspace_id INTEGER NOT NULL,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||
PRIMARY KEY(worktree_root, workspace_id)
|
||||
) STRICT;
|
||||
"}],
|
||||
);
|
||||
|
||||
|
@ -37,34 +30,39 @@ use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow};
|
|||
use super::Db;
|
||||
|
||||
impl Db {
|
||||
/// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty,
|
||||
/// returns the last workspace which was updated
|
||||
/// Returns a serialized workspace for the given worktree_roots. If the passed array
|
||||
/// is empty, the most recent workspace is returned instead. If no workspace for the
|
||||
/// passed roots is stored, returns none.
|
||||
pub fn workspace_for_roots<P: AsRef<Path>>(
|
||||
&self,
|
||||
worktree_roots: &[P],
|
||||
) -> Option<SerializedWorkspace> {
|
||||
// Find the workspace id which is uniquely identified by this set of paths
|
||||
// return it if found
|
||||
let mut workspace_row = get_workspace(worktree_roots, &self)
|
||||
.log_err()
|
||||
.unwrap_or_default();
|
||||
let workspace_id: WorkspaceId = worktree_roots.into();
|
||||
|
||||
if workspace_row.is_none() && worktree_roots.len() == 0 {
|
||||
// Return last workspace if no roots passed
|
||||
workspace_row = self.prepare(
|
||||
"SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1"
|
||||
).and_then(|mut stmt| stmt.maybe_row::<WorkspaceRow>())
|
||||
.log_err()
|
||||
.flatten();
|
||||
}
|
||||
let (_, dock_anchor, dock_visible) = iife!({
|
||||
if worktree_roots.len() == 0 {
|
||||
self.prepare(indoc! {"
|
||||
SELECT workspace_id, dock_anchor, dock_visible
|
||||
FROM workspaces
|
||||
ORDER BY timestamp DESC LIMIT 1"})?
|
||||
.maybe_row::<WorkspaceRow>()
|
||||
} else {
|
||||
self.prepare(indoc! {"
|
||||
SELECT workspace_id, dock_anchor, dock_visible
|
||||
FROM workspaces
|
||||
WHERE workspace_id = ?"})?
|
||||
.with_bindings(workspace_id)?
|
||||
.maybe_row::<WorkspaceRow>()
|
||||
}
|
||||
})
|
||||
.log_err()
|
||||
.flatten()?;
|
||||
|
||||
workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| {
|
||||
Some(SerializedWorkspace {
|
||||
dock_pane: self.get_dock_pane(workspace_id)?,
|
||||
center_group: self.get_center_group(workspace_id),
|
||||
dock_anchor,
|
||||
dock_visible,
|
||||
})
|
||||
Some(SerializedWorkspace {
|
||||
dock_pane: self.get_dock_pane(workspace_id)?,
|
||||
center_group: self.get_center_group(workspace_id),
|
||||
dock_anchor,
|
||||
dock_visible,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -75,146 +73,40 @@ impl Db {
|
|||
worktree_roots: &[P],
|
||||
workspace: SerializedWorkspace,
|
||||
) {
|
||||
let workspace_id: WorkspaceId = worktree_roots.into();
|
||||
|
||||
self.with_savepoint("update_worktrees", |conn| {
|
||||
// Lookup any old WorkspaceIds which have the same set of roots, and delete them.
|
||||
if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? {
|
||||
// Should also delete fields in other tables with cascading updates and insert
|
||||
// new entry
|
||||
conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
|
||||
.with_bindings(id_to_delete)?
|
||||
.exec()?;
|
||||
}
|
||||
|
||||
// Delete any previous workspaces with the same roots. This cascades to all
|
||||
// other tables that are based on the same roots set.
|
||||
// Insert new workspace into workspaces table if none were found
|
||||
let workspace_id = WorkspaceId(
|
||||
conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")?
|
||||
.with_bindings((workspace.dock_anchor, workspace.dock_visible))?
|
||||
.insert()?,
|
||||
);
|
||||
|
||||
// Write worktree_roots with new workspace_id
|
||||
for root in worktree_roots {
|
||||
conn.prepare(
|
||||
"INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
|
||||
)?
|
||||
.with_bindings((workspace_id, root.as_ref()))?
|
||||
.exec()?;
|
||||
}
|
||||
self.prepare(indoc!{"
|
||||
DELETE FROM workspaces WHERE workspace_id = ?1;
|
||||
INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})?
|
||||
.with_bindings((workspace_id, workspace.dock_anchor, workspace.dock_visible))?
|
||||
.exec()?;
|
||||
|
||||
// Save center pane group and dock pane
|
||||
Self::save_center_group(workspace_id, &workspace.center_group, conn)?;
|
||||
Self::save_dock_pane(workspace_id, &workspace.dock_pane, conn)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.context("Update workspace with roots {worktree_roots:?}")
|
||||
.with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::<Vec<_>>()))
|
||||
.log_err();
|
||||
}
|
||||
|
||||
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
|
||||
pub fn recent_workspaces(&self, limit: usize) -> Vec<Vec<PathBuf>> {
|
||||
self.with_savepoint("recent_workspaces", |conn| {
|
||||
let mut roots_by_id =
|
||||
conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?;
|
||||
|
||||
conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
|
||||
iife!({
|
||||
self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
|
||||
.with_bindings(limit)?
|
||||
.rows::<WorkspaceId>()?
|
||||
.iter()
|
||||
.map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::<PathBuf>())
|
||||
.collect::<Result<_>>()
|
||||
})
|
||||
.log_err()
|
||||
.unwrap_or_default()
|
||||
.into_iter().map(|id| id.0)
|
||||
.collect()
|
||||
}).log_err().unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_workspace<P: AsRef<Path>>(
|
||||
worktree_roots: &[P],
|
||||
connection: &Connection,
|
||||
) -> Result<Option<WorkspaceRow>> {
|
||||
// Short circuit if we can
|
||||
if worktree_roots.len() == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Any workspace can have multiple independent paths, and these paths
|
||||
// can overlap in the database. Take this test data for example:
|
||||
//
|
||||
// [/tmp, /tmp2] -> 1
|
||||
// [/tmp] -> 2
|
||||
// [/tmp2, /tmp3] -> 3
|
||||
//
|
||||
// This would be stred in the database like so:
|
||||
//
|
||||
// ID PATH
|
||||
// 1 /tmp
|
||||
// 1 /tmp2
|
||||
// 2 /tmp
|
||||
// 3 /tmp2
|
||||
// 3 /tmp3
|
||||
//
|
||||
// Note how both /tmp and /tmp2 are associated with multiple workspace IDs.
|
||||
// So, given an array of worktree roots, how can we find the exactly matching ID?
|
||||
// Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
|
||||
// - We start with a join of this table on itself, generating every possible
|
||||
// pair of ((path, ID), (path, ID)), and filtering the join down to just the
|
||||
// *overlapping but non-matching* workspace IDs. For this small data set,
|
||||
// this would look like:
|
||||
//
|
||||
// wt1.ID wt1.PATH | wt2.ID wt2.PATH
|
||||
// 3 /tmp3 3 /tmp2
|
||||
//
|
||||
// - Moving one SELECT out, we use the first pair's ID column to invert the selection,
|
||||
// meaning we now have a list of all the entries for our array, minus overlapping sets,
|
||||
// but including *subsets* of our worktree roots:
|
||||
//
|
||||
// ID PATH
|
||||
// 1 /tmp
|
||||
// 1 /tmp2
|
||||
// 2 /tmp
|
||||
//
|
||||
// - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no
|
||||
// duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of
|
||||
// our keys:
|
||||
//
|
||||
// ID num_matching
|
||||
// 1 2
|
||||
// 2 1
|
||||
//
|
||||
// - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the
|
||||
// matching ID correctly :D
|
||||
//
|
||||
// Note: due to limitations in SQLite's query binding, we have to generate the prepared
|
||||
// statement with string substitution (the {array_bind}) below, and then bind the
|
||||
// parameters by number.
|
||||
connection
|
||||
.prepare(formatdoc! {"
|
||||
SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible
|
||||
FROM (SELECT workspace_id
|
||||
FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
|
||||
WHERE worktree_root in ({roots}) AND workspace_id NOT IN
|
||||
(SELECT wt1.workspace_id FROM worktree_roots as wt1
|
||||
JOIN worktree_roots as wt2
|
||||
ON wt1.workspace_id = wt2.workspace_id
|
||||
WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots}))
|
||||
GROUP BY workspace_id)
|
||||
WHERE num_matching = ?) as matching_workspace
|
||||
JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id",
|
||||
roots =
|
||||
// Prepare the array binding string. SQL doesn't have syntax for this, so
|
||||
// we have to do it ourselves.
|
||||
(0..worktree_roots.len())
|
||||
.map(|index| format!("?{}", index + 1))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
})?
|
||||
.with_bindings((
|
||||
worktree_roots
|
||||
.into_iter()
|
||||
.map(|p| p.as_ref())
|
||||
.collect::<Vec<&Path>>(),
|
||||
worktree_roots.len(),
|
||||
))?
|
||||
.maybe_row::<WorkspaceRow>()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
use gpui::Axis;
|
||||
|
@ -6,18 +8,32 @@ use sqlez::{
|
|||
statement::Statement,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
|
||||
pub(crate) struct WorkspaceId(pub(crate) i64);
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub(crate) struct WorkspaceId(Vec<PathBuf>);
|
||||
|
||||
impl<P: AsRef<Path>, T: IntoIterator<Item = P>> From<T> for WorkspaceId {
|
||||
fn from(iterator: T) -> Self {
|
||||
let mut roots = iterator
|
||||
.into_iter()
|
||||
.map(|p| p.as_ref().to_path_buf())
|
||||
.collect::<Vec<_>>();
|
||||
roots.sort();
|
||||
Self(roots)
|
||||
}
|
||||
}
|
||||
|
||||
impl Bind for WorkspaceId {
|
||||
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
|
||||
self.0.bind(statement, start_index)
|
||||
bincode::serialize(&self.0)
|
||||
.expect("Bincode serialization of paths should not fail")
|
||||
.bind(statement, start_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column for WorkspaceId {
|
||||
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
|
||||
i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index))
|
||||
let blob = statement.column_blob(start_index)?;
|
||||
Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,116 +74,24 @@ impl Column for DockAnchor {
|
|||
|
||||
pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool);
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct SerializedWorkspace {
|
||||
pub center_group: SerializedPaneGroup,
|
||||
pub dock_anchor: DockAnchor,
|
||||
pub dock_visible: bool,
|
||||
pub dock_pane: SerializedDockPane,
|
||||
pub center_group: SerializedPaneGroup,
|
||||
pub dock_pane: SerializedPane,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct PaneId {
|
||||
workspace_id: WorkspaceId,
|
||||
pane_id: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct PaneGroupId {
|
||||
workspace_id: WorkspaceId,
|
||||
}
|
||||
|
||||
impl PaneGroupId {
|
||||
pub fn root(workspace_id: WorkspaceId) -> Self {
|
||||
Self {
|
||||
workspace_id,
|
||||
// group_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Default)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct SerializedPaneGroup {
|
||||
axis: Axis,
|
||||
children: Vec<PaneGroupChild>,
|
||||
}
|
||||
|
||||
impl SerializedPaneGroup {
|
||||
pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self {
|
||||
Self {
|
||||
// group_id: PaneGroupId::root(workspace_id),
|
||||
axis: Default::default(),
|
||||
children: Default::default(),
|
||||
}
|
||||
}
|
||||
pub struct SerializedPane {
|
||||
children: Vec<SerializedItem>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Eq)]
|
||||
pub struct SerializedDockPane {
|
||||
pub anchor_position: DockAnchor,
|
||||
pub visible: bool,
|
||||
}
|
||||
pub enum SerializedItemKind {}
|
||||
|
||||
impl SerializedDockPane {
|
||||
fn to_row(&self, workspace: &WorkspaceId) -> DockRow {
|
||||
DockRow {
|
||||
workspace_id: *workspace,
|
||||
anchor_position: self.anchor_position,
|
||||
visible: self.visible,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Column for SerializedDockPane {
|
||||
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
|
||||
<(DockAnchor, bool) as Column>::column(statement, start_index).map(
|
||||
|((anchor_position, visible), next_index)| {
|
||||
(
|
||||
SerializedDockPane {
|
||||
anchor_position,
|
||||
visible,
|
||||
},
|
||||
next_index,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Eq)]
|
||||
pub(crate) struct DockRow {
|
||||
workspace_id: WorkspaceId,
|
||||
anchor_position: DockAnchor,
|
||||
visible: bool,
|
||||
}
|
||||
|
||||
impl Bind for DockRow {
|
||||
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
|
||||
statement.bind(
|
||||
(self.workspace_id, self.anchor_position, self.visible),
|
||||
start_index,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Column for DockRow {
|
||||
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
|
||||
<(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map(
|
||||
|((workspace_id, anchor_position, visible), next_index)| {
|
||||
(
|
||||
DockRow {
|
||||
workspace_id,
|
||||
anchor_position,
|
||||
visible,
|
||||
},
|
||||
next_index,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct ItemId {
|
||||
pub item_id: usize,
|
||||
}
|
||||
pub enum SerializedItem {}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use gpui::Axis;
|
||||
use indoc::indoc;
|
||||
use sqlez::migrations::Migration;
|
||||
use sqlez::{connection::Connection, migrations::Migration};
|
||||
use util::{iife, ResultExt};
|
||||
|
||||
use super::{
|
||||
|
@ -13,26 +13,28 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
|
|||
&[indoc! {"
|
||||
CREATE TABLE pane_groups(
|
||||
group_id INTEGER PRIMARY KEY,
|
||||
workspace_id INTEGER NOT NULL,
|
||||
workspace_id BLOB NOT NULL,
|
||||
parent_group INTEGER, -- NULL indicates that this is a root node
|
||||
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
|
||||
PRIMARY KEY(group_id, workspace_id)
|
||||
) STRICT;
|
||||
|
||||
CREATE TABLE panes(
|
||||
pane_id INTEGER PRIMARY KEY,
|
||||
workspace_id INTEGER NOT NULL,
|
||||
workspace_id BLOB NOT NULL,
|
||||
group_id INTEGER, -- If null, this is a dock pane
|
||||
idx INTEGER NOT NULL,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
|
||||
PRIMARY KEY(pane_id, workspace_id)
|
||||
) STRICT;
|
||||
|
||||
CREATE TABLE items(
|
||||
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
|
||||
pane_id INTEGER NOT NULL,
|
||||
workspace_id INTEGER NOT NULL,
|
||||
workspace_id BLOB NOT NULL,
|
||||
kind TEXT NOT NULL,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
|
||||
|
@ -46,7 +48,7 @@ impl Db {
|
|||
unimplemented!()
|
||||
}
|
||||
|
||||
pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup {
|
||||
pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup {
|
||||
unimplemented!()
|
||||
// let axis = self.get_pane_group_axis(pane_group_id);
|
||||
// let mut children: Vec<(usize, PaneGroupChild)> = Vec::new();
|
||||
|
@ -85,17 +87,17 @@ impl Db {
|
|||
// Vec::new().into_iter()
|
||||
// }
|
||||
|
||||
pub(crate) fn save_pane_splits(
|
||||
&self,
|
||||
pub(crate) fn save_center_group(
|
||||
_workspace: &WorkspaceId,
|
||||
_center_pane_group: &SerializedPaneGroup,
|
||||
_connection: &Connection,
|
||||
) {
|
||||
// Delete the center pane group for this workspace and any of its children
|
||||
// Generate new pane group IDs as we go through
|
||||
// insert them
|
||||
}
|
||||
|
||||
pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane {
|
||||
pub fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
|
@ -109,7 +111,11 @@ impl Db {
|
|||
.flatten()
|
||||
}
|
||||
|
||||
pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) {
|
||||
pub(crate) fn save_dock_pane(
|
||||
workspace: &WorkspaceId,
|
||||
dock_pane: &SerializedDockPane,
|
||||
connection: &Connection,
|
||||
) {
|
||||
// iife!({
|
||||
// self.prepare(
|
||||
// "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue