Address some issues with the sqlez_macros

This commit is contained in:
Kay Simmons 2022-11-30 16:19:46 -08:00 committed by Mikayla Maki
parent 1b225fa37c
commit f68e8d4664
10 changed files with 183 additions and 174 deletions

1
Cargo.lock generated
View file

@ -5609,6 +5609,7 @@ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"sqlez", "sqlez",
"sqlformat",
"syn", "syn",
] ]

View file

@ -4,7 +4,6 @@ pub mod kvp;
pub use anyhow; pub use anyhow;
pub use indoc::indoc; pub use indoc::indoc;
pub use lazy_static; pub use lazy_static;
use parking_lot::Mutex;
pub use smol; pub use smol;
pub use sqlez; pub use sqlez;
pub use sqlez_macros; pub use sqlez_macros;
@ -34,7 +33,7 @@ lazy_static::lazy_static! {
} }
/// Open or create a database at the given directory path. /// Open or create a database at the given directory path.
pub async fn open_file_db<M: Migrator>() -> ThreadSafeConnection<M> { pub async fn open_db<M: Migrator>() -> ThreadSafeConnection<M> {
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM // Use 0 for now. Will implement incrementing and clearing of old db files soon TM
let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME)));
@ -56,18 +55,15 @@ pub async fn open_file_db<M: Migrator>() -> ThreadSafeConnection<M> {
.await .await
} }
pub async fn open_memory_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> { #[cfg(any(test, feature = "test-support"))]
pub async fn open_test_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> {
use sqlez::thread_safe_connection::locking_queue;
ThreadSafeConnection::<M>::builder(db_name, false) ThreadSafeConnection::<M>::builder(db_name, false)
.with_db_initialization_query(DB_INITIALIZE_QUERY) .with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
// Serialize queued writes via a mutex and run them synchronously // Serialize queued writes via a mutex and run them synchronously
.with_write_queue_constructor(Box::new(|connection| { .with_write_queue_constructor(locking_queue())
let connection = Mutex::new(connection);
Box::new(move |queued_write| {
let connection = connection.lock();
queued_write(&connection)
})
}))
.build() .build()
.await .await
} }
@ -76,22 +72,24 @@ pub async fn open_memory_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<
#[macro_export] #[macro_export]
macro_rules! connection { macro_rules! connection {
($id:ident: $t:ident<$d:ty>) => { ($id:ident: $t:ident<$d:ty>) => {
pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>);
impl ::std::ops::Deref for $t { impl ::std::ops::Deref for $t {
type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.0 &self.0
} }
} }
::db::lazy_static::lazy_static! { #[cfg(any(test, feature = "test-support"))]
pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { $crate::lazy_static::lazy_static! {
$crate::smol::block_on(::db::open_memory_db(stringify!($id))) pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
} else { }
$crate::smol::block_on(::db::open_file_db())
}); #[cfg(not(any(test, feature = "test-support")))]
$crate::lazy_static::lazy_static! {
pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db()));
} }
}; };
} }

View file

@ -1,25 +1,9 @@
use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; use sqlez::domain::Domain;
use sqlez_macros::sql; use sqlez_macros::sql;
use crate::{open_file_db, open_memory_db, query}; use crate::{connection, query};
pub struct KeyValueStore(ThreadSafeConnection<KeyValueStore>); connection!(KEY_VALUE_STORE: KeyValueStore<KeyValueStore>);
impl std::ops::Deref for KeyValueStore {
type Target = ThreadSafeConnection<KeyValueStore>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
lazy_static::lazy_static! {
pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) {
smol::block_on(open_memory_db("KEY_VALUE_STORE"))
} else {
smol::block_on(open_file_db())
});
}
impl Domain for KeyValueStore { impl Domain for KeyValueStore {
fn name() -> &'static str { fn name() -> &'static str {
@ -27,8 +11,10 @@ impl Domain for KeyValueStore {
} }
fn migrations() -> &'static [&'static str] { fn migrations() -> &'static [&'static str] {
// Legacy migrations using rusqlite may have already created kv_store during alpha,
// migrations must be infallible so this must have 'IF NOT EXISTS'
&[sql!( &[sql!(
CREATE TABLE kv_store( CREATE TABLE IF NOT EXISTS kv_store(
key TEXT PRIMARY KEY, key TEXT PRIMARY KEY,
value TEXT NOT NULL value TEXT NOT NULL
) STRICT; ) STRICT;
@ -62,7 +48,7 @@ mod tests {
#[gpui::test] #[gpui::test]
async fn test_kvp() { async fn test_kvp() {
let db = KeyValueStore(crate::open_memory_db("test_kvp").await); let db = KeyValueStore(crate::open_test_db("test_kvp").await);
assert_eq!(db.read_kvp("key-1").unwrap(), None); assert_eq!(db.read_kvp("key-1").unwrap(), None);

View file

@ -602,31 +602,37 @@ impl Item for Editor {
item_id: ItemId, item_id: ItemId,
cx: &mut ViewContext<Pane>, cx: &mut ViewContext<Pane>,
) -> Task<Result<ViewHandle<Self>>> { ) -> Task<Result<ViewHandle<Self>>> {
if let Some(project_item) = project.update(cx, |project, cx| { let project_item: Result<_> = project.update(cx, |project, cx| {
// Look up the path with this key associated, create a self with that path // Look up the path with this key associated, create a self with that path
let path = DB.get_path(item_id, workspace_id).ok()?; let path = DB
.get_path(item_id, workspace_id)?
.context("No path stored for this editor")?;
let (worktree, path) = project.find_local_worktree(&path, cx)?; let (worktree, path) = project
.find_local_worktree(&path, cx)
.with_context(|| format!("No worktree for path: {path:?}"))?;
let project_path = ProjectPath { let project_path = ProjectPath {
worktree_id: worktree.read(cx).id(), worktree_id: worktree.read(cx).id(),
path: path.into(), path: path.into(),
}; };
Some(project.open_path(project_path, cx)) Ok(project.open_path(project_path, cx))
}) { });
cx.spawn(|pane, mut cx| async move {
let (_, project_item) = project_item.await?;
let buffer = project_item
.downcast::<Buffer>()
.context("Project item at stored path was not a buffer")?;
Ok(cx.update(|cx| { project_item
cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) .map(|project_item| {
})) cx.spawn(|pane, mut cx| async move {
let (_, project_item) = project_item.await?;
let buffer = project_item
.downcast::<Buffer>()
.context("Project item at stored path was not a buffer")?;
Ok(cx.update(|cx| {
cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx))
}))
})
}) })
} else { .unwrap_or_else(|error| Task::ready(Err(error)))
Task::ready(Err(anyhow!("Could not load file from stored path")))
}
} }
} }

View file

@ -23,7 +23,6 @@ impl Domain for Editor {
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
ON DELETE CASCADE ON DELETE CASCADE
ON UPDATE CASCADE ON UPDATE CASCADE
) STRICT; ) STRICT;
)] )]
} }
@ -31,7 +30,7 @@ impl Domain for Editor {
impl EditorDb { impl EditorDb {
query! { query! {
pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result<PathBuf> { pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result<Option<PathBuf>> {
SELECT path FROM editors SELECT path FROM editors
WHERE item_id = ? AND workspace_id = ? WHERE item_id = ? AND workspace_id = ?
} }

View file

@ -1,6 +1,6 @@
use futures::{channel::oneshot, Future, FutureExt}; use futures::{channel::oneshot, Future, FutureExt};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use parking_lot::RwLock; use parking_lot::{Mutex, RwLock};
use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread};
use thread_local::ThreadLocal; use thread_local::ThreadLocal;
@ -73,37 +73,8 @@ impl<M: Migrator> ThreadSafeConnectionBuilder<M> {
} }
pub async fn build(self) -> ThreadSafeConnection<M> { pub async fn build(self) -> ThreadSafeConnection<M> {
if !QUEUES.read().contains_key(&self.connection.uri) { self.connection
let mut queues = QUEUES.write(); .initialize_queues(self.write_queue_constructor);
if !queues.contains_key(&self.connection.uri) {
let mut write_connection = self.connection.create_connection();
// Enable writes for this connection
write_connection.write = true;
if let Some(mut write_queue_constructor) = self.write_queue_constructor {
let write_channel = write_queue_constructor(write_connection);
queues.insert(self.connection.uri.clone(), write_channel);
} else {
use std::sync::mpsc::channel;
let (sender, reciever) = channel::<QueuedWrite>();
thread::spawn(move || {
while let Ok(write) = reciever.recv() {
write(&write_connection)
}
});
let sender = UnboundedSyncSender::new(sender);
queues.insert(
self.connection.uri.clone(),
Box::new(move |queued_write| {
sender
.send(queued_write)
.expect("Could not send write action to backgorund thread");
}),
);
}
}
}
let db_initialize_query = self.db_initialize_query; let db_initialize_query = self.db_initialize_query;
@ -134,6 +105,40 @@ impl<M: Migrator> ThreadSafeConnectionBuilder<M> {
} }
impl<M: Migrator> ThreadSafeConnection<M> { impl<M: Migrator> ThreadSafeConnection<M> {
fn initialize_queues(&self, write_queue_constructor: Option<WriteQueueConstructor>) {
if !QUEUES.read().contains_key(&self.uri) {
let mut queues = QUEUES.write();
if !queues.contains_key(&self.uri) {
let mut write_connection = self.create_connection();
// Enable writes for this connection
write_connection.write = true;
if let Some(mut write_queue_constructor) = write_queue_constructor {
let write_channel = write_queue_constructor(write_connection);
queues.insert(self.uri.clone(), write_channel);
} else {
use std::sync::mpsc::channel;
let (sender, reciever) = channel::<QueuedWrite>();
thread::spawn(move || {
while let Ok(write) = reciever.recv() {
write(&write_connection)
}
});
let sender = UnboundedSyncSender::new(sender);
queues.insert(
self.uri.clone(),
Box::new(move |queued_write| {
sender
.send(queued_write)
.expect("Could not send write action to backgorund thread");
}),
);
}
}
}
}
pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder<M> { pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder<M> {
ThreadSafeConnectionBuilder::<M> { ThreadSafeConnectionBuilder::<M> {
db_initialize_query: None, db_initialize_query: None,
@ -208,14 +213,18 @@ impl ThreadSafeConnection<()> {
uri: &str, uri: &str,
persistent: bool, persistent: bool,
connection_initialize_query: Option<&'static str>, connection_initialize_query: Option<&'static str>,
write_queue_constructor: Option<WriteQueueConstructor>,
) -> Self { ) -> Self {
Self { let connection = Self {
uri: Arc::from(uri), uri: Arc::from(uri),
persistent, persistent,
connection_initialize_query, connection_initialize_query,
connections: Default::default(), connections: Default::default(),
_migrator: PhantomData, _migrator: PhantomData,
} };
connection.initialize_queues(write_queue_constructor);
connection
} }
} }
@ -243,6 +252,16 @@ impl<M: Migrator> Deref for ThreadSafeConnection<M> {
} }
} }
pub fn locking_queue() -> WriteQueueConstructor {
Box::new(|connection| {
let connection = Mutex::new(connection);
Box::new(move |queued_write| {
let connection = connection.lock();
queued_write(&connection)
})
})
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use indoc::indoc; use indoc::indoc;

View file

@ -13,4 +13,5 @@ syn = "1.0"
quote = "1.0" quote = "1.0"
proc-macro2 = "1.0" proc-macro2 = "1.0"
lazy_static = "1.4" lazy_static = "1.4"
sqlez = { path = "../sqlez" } sqlez = { path = "../sqlez" }
sqlformat = "0.2"

View file

@ -1,9 +1,11 @@
use proc_macro::{Delimiter, Span, TokenStream, TokenTree}; use proc_macro::{Delimiter, Span, TokenStream, TokenTree};
use sqlez::thread_safe_connection::ThreadSafeConnection; use sqlez::thread_safe_connection::{locking_queue, ThreadSafeConnection};
use syn::Error; use syn::Error;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None); static ref SQLITE: ThreadSafeConnection = {
ThreadSafeConnection::new(":memory:", false, None, Some(locking_queue()))
};
} }
#[proc_macro] #[proc_macro]
@ -20,6 +22,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream {
} }
let error = SQLITE.sql_has_syntax_error(sql.trim()); let error = SQLITE.sql_has_syntax_error(sql.trim());
let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default());
if let Some((error, error_offset)) = error { if let Some((error, error_offset)) = error {
let error_span = spans let error_span = spans
@ -29,10 +32,10 @@ pub fn sql(tokens: TokenStream) -> TokenStream {
.next() .next()
.unwrap_or(Span::call_site()); .unwrap_or(Span::call_site());
let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql); let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql);
TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error())
} else { } else {
format!("r#\"{}\"#", &sql).parse().unwrap() format!("r#\"{}\"#", &formatted_sql).parse().unwrap()
} }
} }
@ -61,18 +64,18 @@ fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) {
fn open_delimiter(delimiter: Delimiter) -> String { fn open_delimiter(delimiter: Delimiter) -> String {
match delimiter { match delimiter {
Delimiter::Parenthesis => "(".to_string(), Delimiter::Parenthesis => "( ".to_string(),
Delimiter::Brace => "[".to_string(), Delimiter::Brace => "[ ".to_string(),
Delimiter::Bracket => "{".to_string(), Delimiter::Bracket => "{ ".to_string(),
Delimiter::None => "".to_string(), Delimiter::None => "".to_string(),
} }
} }
fn close_delimiter(delimiter: Delimiter) -> String { fn close_delimiter(delimiter: Delimiter) -> String {
match delimiter { match delimiter {
Delimiter::Parenthesis => ")".to_string(), Delimiter::Parenthesis => " ) ".to_string(),
Delimiter::Brace => "]".to_string(), Delimiter::Brace => " ] ".to_string(),
Delimiter::Bracket => "}".to_string(), Delimiter::Bracket => " } ".to_string(),
Delimiter::None => "".to_string(), Delimiter::None => "".to_string(),
} }
} }

View file

@ -7,7 +7,6 @@ use std::path::Path;
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql};
use gpui::Axis; use gpui::Axis;
use indoc::indoc;
use db::sqlez::domain::Domain; use db::sqlez::domain::Domain;
use util::{iife, unzip_option, ResultExt}; use util::{iife, unzip_option, ResultExt};
@ -106,15 +105,15 @@ impl WorkspaceDb {
DockPosition, DockPosition,
) = iife!({ ) = iife!({
if worktree_roots.len() == 0 { if worktree_roots.len() == 0 {
self.select_row(indoc! {" self.select_row(sql!(
SELECT workspace_id, workspace_location, dock_visible, dock_anchor SELECT workspace_id, workspace_location, dock_visible, dock_anchor
FROM workspaces FROM workspaces
ORDER BY timestamp DESC LIMIT 1"})?()? ORDER BY timestamp DESC LIMIT 1))?()?
} else { } else {
self.select_row_bound(indoc! {" self.select_row_bound(sql!(
SELECT workspace_id, workspace_location, dock_visible, dock_anchor SELECT workspace_id, workspace_location, dock_visible, dock_anchor
FROM workspaces FROM workspaces
WHERE workspace_location = ?"})?(&workspace_location)? WHERE workspace_location = ?))?(&workspace_location)?
} }
.context("No workspaces found") .context("No workspaces found")
}) })
@ -142,19 +141,15 @@ impl WorkspaceDb {
self.write(move |conn| { self.write(move |conn| {
conn.with_savepoint("update_worktrees", || { conn.with_savepoint("update_worktrees", || {
// Clear out panes and pane_groups // Clear out panes and pane_groups
conn.exec_bound(indoc! {" conn.exec_bound(sql!(
UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1;
DELETE FROM pane_groups WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1;
DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
.context("Clearing old panes")?; .context("Clearing old panes")?;
conn.exec_bound(indoc! {" conn.exec_bound(sql!(
DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?( DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?
( ))?((&workspace.location, workspace.id.clone()))
&workspace.location,
workspace.id.clone(),
)
)
.context("clearing out old locations")?; .context("clearing out old locations")?;
// Upsert // Upsert
@ -184,10 +179,11 @@ impl WorkspaceDb {
.context("save pane in save workspace")?; .context("save pane in save workspace")?;
// Complete workspace initialization // Complete workspace initialization
conn.exec_bound(indoc! {" conn.exec_bound(sql!(
UPDATE workspaces UPDATE workspaces
SET dock_pane = ? SET dock_pane = ?
WHERE workspace_id = ?"})?((dock_id, workspace.id)) WHERE workspace_id = ?
))?((dock_id, workspace.id))
.context("Finishing initialization with dock pane")?; .context("Finishing initialization with dock pane")?;
Ok(()) Ok(())
@ -203,20 +199,13 @@ impl WorkspaceDb {
} }
} }
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots query! {
pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { pub fn recent_workspaces(limit: usize) -> Result<Vec<(WorkspaceId, WorkspaceLocation)>> {
iife!({ SELECT workspace_id, workspace_location
// TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html FROM workspaces
Ok::<_, anyhow::Error>( ORDER BY timestamp DESC
self.select_bound::<usize, (WorkspaceId, WorkspaceLocation)>( LIMIT ?
"SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?", }
)?(limit)?
.into_iter()
.collect::<Vec<(WorkspaceId, WorkspaceLocation)>>(),
)
})
.log_err()
.unwrap_or_default()
} }
fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> { fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
@ -233,7 +222,7 @@ impl WorkspaceDb {
) -> Result<Vec<SerializedPaneGroup>> { ) -> Result<Vec<SerializedPaneGroup>> {
type GroupKey = (Option<GroupId>, WorkspaceId); type GroupKey = (Option<GroupId>, WorkspaceId);
type GroupOrPane = (Option<GroupId>, Option<Axis>, Option<PaneId>, Option<bool>); type GroupOrPane = (Option<GroupId>, Option<Axis>, Option<PaneId>, Option<bool>);
self.select_bound::<GroupKey, GroupOrPane>(indoc! {" self.select_bound::<GroupKey, GroupOrPane>(sql!(
SELECT group_id, axis, pane_id, active SELECT group_id, axis, pane_id, active
FROM (SELECT FROM (SELECT
group_id, group_id,
@ -243,7 +232,7 @@ impl WorkspaceDb {
position, position,
parent_group_id, parent_group_id,
workspace_id workspace_id
FROM pane_groups FROM pane_groups
UNION UNION
SELECT SELECT
NULL, NULL,
@ -257,7 +246,7 @@ impl WorkspaceDb {
JOIN panes ON center_panes.pane_id = panes.pane_id) JOIN panes ON center_panes.pane_id = panes.pane_id)
WHERE parent_group_id IS ? AND workspace_id = ? WHERE parent_group_id IS ? AND workspace_id = ?
ORDER BY position ORDER BY position
"})?((group_id, workspace_id))? ))?((group_id, workspace_id))?
.into_iter() .into_iter()
.map(|(group_id, axis, pane_id, active)| { .map(|(group_id, axis, pane_id, active)| {
if let Some((group_id, axis)) = group_id.zip(axis) { if let Some((group_id, axis)) = group_id.zip(axis) {
@ -293,10 +282,11 @@ impl WorkspaceDb {
SerializedPaneGroup::Group { axis, children } => { SerializedPaneGroup::Group { axis, children } => {
let (parent_id, position) = unzip_option(parent); let (parent_id, position) = unzip_option(parent);
let group_id = conn.select_row_bound::<_, i64>(indoc! {" let group_id = conn.select_row_bound::<_, i64>(sql!(
INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis)
VALUES (?, ?, ?, ?) VALUES (?, ?, ?, ?)
RETURNING group_id"})?(( RETURNING group_id
))?((
workspace_id, workspace_id,
parent_id, parent_id,
position, position,
@ -318,10 +308,11 @@ impl WorkspaceDb {
} }
fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result<SerializedPane> { fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result<SerializedPane> {
let (pane_id, active) = self.select_row_bound(indoc! {" let (pane_id, active) = self.select_row_bound(sql!(
SELECT pane_id, active SELECT pane_id, active
FROM panes FROM panes
WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?( WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)
))?(
workspace_id, workspace_id,
)? )?
.context("No dock pane for workspace")?; .context("No dock pane for workspace")?;
@ -339,17 +330,19 @@ impl WorkspaceDb {
parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane
dock: bool, dock: bool,
) -> Result<PaneId> { ) -> Result<PaneId> {
let pane_id = conn.select_row_bound::<_, i64>(indoc! {" let pane_id = conn.select_row_bound::<_, i64>(sql!(
INSERT INTO panes(workspace_id, active) INSERT INTO panes(workspace_id, active)
VALUES (?, ?) VALUES (?, ?)
RETURNING pane_id"})?((workspace_id, pane.active))? RETURNING pane_id
))?((workspace_id, pane.active))?
.ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?;
if !dock { if !dock {
let (parent_id, order) = unzip_option(parent); let (parent_id, order) = unzip_option(parent);
conn.exec_bound(indoc! {" conn.exec_bound(sql!(
INSERT INTO center_panes(pane_id, parent_group_id, position) INSERT INTO center_panes(pane_id, parent_group_id, position)
VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; VALUES (?, ?, ?)
))?((pane_id, parent_id, order))?;
} }
Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?; Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
@ -358,10 +351,11 @@ impl WorkspaceDb {
} }
fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> { fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(self.select_bound(indoc! {" Ok(self.select_bound(sql!(
SELECT kind, item_id FROM items SELECT kind, item_id FROM items
WHERE pane_id = ? WHERE pane_id = ?
ORDER BY position"})?(pane_id)?) ORDER BY position
))?(pane_id)?)
} }
fn save_items( fn save_items(
@ -370,10 +364,11 @@ impl WorkspaceDb {
pane_id: PaneId, pane_id: PaneId,
items: &[SerializedItem], items: &[SerializedItem],
) -> Result<()> { ) -> Result<()> {
let mut insert = conn.exec_bound( let mut insert = conn.exec_bound(sql!(
"INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)
).context("Preparing insertion")?; )).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() { for (position, item) in items.iter().enumerate() {
dbg!(item);
insert((workspace_id, pane_id, position, item))?; insert((workspace_id, pane_id, position, item))?;
} }
@ -386,7 +381,7 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use db::open_memory_db; use db::open_test_db;
use settings::DockAnchor; use settings::DockAnchor;
use super::*; use super::*;
@ -395,18 +390,19 @@ mod tests {
async fn test_next_id_stability() { async fn test_next_id_stability() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_next_id_stability").await); let db = WorkspaceDb(open_test_db("test_next_id_stability").await);
db.write(|conn| { db.write(|conn| {
conn.migrate( conn.migrate(
"test_table", "test_table",
&[indoc! {" &[sql!(
CREATE TABLE test_table( CREATE TABLE test_table(
text TEXT, text TEXT,
workspace_id INTEGER, workspace_id INTEGER,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
ON DELETE CASCADE ON DELETE CASCADE
) STRICT;"}], ) STRICT;
)],
) )
.unwrap(); .unwrap();
}) })
@ -416,22 +412,22 @@ mod tests {
// Assert the empty row got inserted // Assert the empty row got inserted
assert_eq!( assert_eq!(
Some(id), Some(id),
db.select_row_bound::<WorkspaceId, WorkspaceId>( db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
"SELECT workspace_id FROM workspaces WHERE workspace_id = ?" SELECT workspace_id FROM workspaces WHERE workspace_id = ?
) ))
.unwrap()(id) .unwrap()(id)
.unwrap() .unwrap()
); );
db.write(move |conn| { db.write(move |conn| {
conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
.unwrap()(("test-text-1", id)) .unwrap()(("test-text-1", id))
.unwrap() .unwrap()
}) })
.await; .await;
let test_text_1 = db let test_text_1 = db
.select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
.unwrap()(1) .unwrap()(1)
.unwrap() .unwrap()
.unwrap(); .unwrap();
@ -442,19 +438,19 @@ mod tests {
async fn test_workspace_id_stability() { async fn test_workspace_id_stability() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await); let db = WorkspaceDb(open_test_db("test_workspace_id_stability").await);
db.write(|conn| { db.write(|conn| {
conn.migrate( conn.migrate(
"test_table", "test_table",
&[indoc! {" &[sql!(
CREATE TABLE test_table( CREATE TABLE test_table(
text TEXT, text TEXT,
workspace_id INTEGER, workspace_id INTEGER,
FOREIGN KEY(workspace_id) FOREIGN KEY(workspace_id)
REFERENCES workspaces(workspace_id) REFERENCES workspaces(workspace_id)
ON DELETE CASCADE ON DELETE CASCADE
) STRICT;"}], ) STRICT;)],
) )
}) })
.await .await
@ -479,7 +475,7 @@ mod tests {
db.save_workspace(workspace_1.clone()).await; db.save_workspace(workspace_1.clone()).await;
db.write(|conn| { db.write(|conn| {
conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
.unwrap()(("test-text-1", 1)) .unwrap()(("test-text-1", 1))
.unwrap(); .unwrap();
}) })
@ -488,7 +484,7 @@ mod tests {
db.save_workspace(workspace_2.clone()).await; db.save_workspace(workspace_2.clone()).await;
db.write(|conn| { db.write(|conn| {
conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
.unwrap()(("test-text-2", 2)) .unwrap()(("test-text-2", 2))
.unwrap(); .unwrap();
}) })
@ -505,14 +501,14 @@ mod tests {
db.save_workspace(workspace_2).await; db.save_workspace(workspace_2).await;
let test_text_2 = db let test_text_2 = db
.select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
.unwrap()(2) .unwrap()(2)
.unwrap() .unwrap()
.unwrap(); .unwrap();
assert_eq!(test_text_2, "test-text-2"); assert_eq!(test_text_2, "test-text-2");
let test_text_1 = db let test_text_1 = db
.select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
.unwrap()(1) .unwrap()(1)
.unwrap() .unwrap()
.unwrap(); .unwrap();
@ -523,7 +519,7 @@ mod tests {
async fn test_full_workspace_serialization() { async fn test_full_workspace_serialization() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await); let db = WorkspaceDb(open_test_db("test_full_workspace_serialization").await);
let dock_pane = crate::persistence::model::SerializedPane { let dock_pane = crate::persistence::model::SerializedPane {
children: vec![ children: vec![
@ -597,7 +593,7 @@ mod tests {
async fn test_workspace_assignment() { async fn test_workspace_assignment() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_basic_functionality").await); let db = WorkspaceDb(open_test_db("test_basic_functionality").await);
let workspace_1 = SerializedWorkspace { let workspace_1 = SerializedWorkspace {
id: 1, id: 1,
@ -689,7 +685,7 @@ mod tests {
async fn test_basic_dock_pane() { async fn test_basic_dock_pane() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("basic_dock_pane").await); let db = WorkspaceDb(open_test_db("basic_dock_pane").await);
let dock_pane = crate::persistence::model::SerializedPane::new( let dock_pane = crate::persistence::model::SerializedPane::new(
vec![ vec![
@ -714,7 +710,7 @@ mod tests {
async fn test_simple_split() { async fn test_simple_split() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("simple_split").await); let db = WorkspaceDb(open_test_db("simple_split").await);
// ----------------- // -----------------
// | 1,2 | 5,6 | // | 1,2 | 5,6 |
@ -766,7 +762,7 @@ mod tests {
async fn test_cleanup_panes() { async fn test_cleanup_panes() {
env_logger::try_init().ok(); env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await); let db = WorkspaceDb(open_test_db("test_cleanup_panes").await);
let center_pane = SerializedPaneGroup::Group { let center_pane = SerializedPaneGroup::Group {
axis: gpui::Axis::Horizontal, axis: gpui::Axis::Horizontal,

View file

@ -2365,7 +2365,6 @@ impl Workspace {
.await; .await;
// Traverse the splits tree and add to things // Traverse the splits tree and add to things
let (root, active_pane) = serialized_workspace let (root, active_pane) = serialized_workspace
.center_group .center_group
.deserialize(&project, serialized_workspace.id, &workspace, &mut cx) .deserialize(&project, serialized_workspace.id, &workspace, &mut cx)
@ -2384,6 +2383,10 @@ impl Workspace {
cx.focus(active_pane); cx.focus(active_pane);
} }
if workspace.items(cx).next().is_none() {
cx.dispatch_action(NewFile);
}
cx.notify(); cx.notify();
}); });
} }
@ -2636,13 +2639,10 @@ pub fn open_paths(
pub fn open_new(app_state: &Arc<AppState>, cx: &mut MutableAppContext) -> Task<()> { pub fn open_new(app_state: &Arc<AppState>, cx: &mut MutableAppContext) -> Task<()> {
let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); let task = Workspace::new_local(Vec::new(), app_state.clone(), cx);
cx.spawn(|mut cx| async move { cx.spawn(|mut cx| async move {
eprintln!("Open new task spawned");
let (workspace, opened_paths) = task.await; let (workspace, opened_paths) = task.await;
eprintln!("workspace and path items created");
workspace.update(&mut cx, |_, cx| { workspace.update(&mut cx, |_, cx| {
if opened_paths.is_empty() { if opened_paths.is_empty() {
eprintln!("new file redispatched");
cx.dispatch_action(NewFile); cx.dispatch_action(NewFile);
} }
}) })