WIP switching to sqlez
This commit is contained in:
parent
7744c9ba45
commit
e3fdfe02e5
4 changed files with 428 additions and 501 deletions
712
Cargo.lock
generated
712
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,7 @@ doctest = false
|
||||||
test-support = []
|
test-support = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
indoc = "1.0.4"
|
||||||
collections = { path = "../collections" }
|
collections = { path = "../collections" }
|
||||||
gpui = { path = "../gpui" }
|
gpui = { path = "../gpui" }
|
||||||
anyhow = "1.0.57"
|
anyhow = "1.0.57"
|
||||||
|
@ -18,10 +19,7 @@ async-trait = "0.1"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
||||||
parking_lot = "0.11.1"
|
parking_lot = "0.11.1"
|
||||||
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] }
|
sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" }
|
||||||
rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_rusqlite = "0.31.0"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
gpui = { path = "../gpui", features = ["test-support"] }
|
gpui = { path = "../gpui", features = ["test-support"] }
|
||||||
|
|
|
@ -5,26 +5,25 @@ pub mod pane;
|
||||||
pub mod workspace;
|
pub mod workspace;
|
||||||
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::{Path, PathBuf};
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use log::error;
|
use indoc::indoc;
|
||||||
use parking_lot::Mutex;
|
use sqlez::connection::Connection;
|
||||||
use rusqlite::{backup, Connection};
|
use sqlez::thread_safe_connection::ThreadSafeConnection;
|
||||||
|
|
||||||
use migrations::MIGRATIONS;
|
|
||||||
pub use workspace::*;
|
pub use workspace::*;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum Db {
|
struct Db(ThreadSafeConnection);
|
||||||
Real(Arc<RealDb>),
|
|
||||||
Null,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RealDb {
|
impl Deref for Db {
|
||||||
connection: Mutex<Connection>,
|
type Target = sqlez::connection::Connection;
|
||||||
path: Option<PathBuf>,
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0.deref()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Db {
|
impl Db {
|
||||||
|
@ -36,104 +35,44 @@ impl Db {
|
||||||
.expect("Should be able to create the database directory");
|
.expect("Should be able to create the database directory");
|
||||||
let db_path = current_db_dir.join(Path::new("db.sqlite"));
|
let db_path = current_db_dir.join(Path::new("db.sqlite"));
|
||||||
|
|
||||||
Connection::open(db_path)
|
Db(
|
||||||
.map_err(Into::into)
|
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
|
||||||
.and_then(|connection| Self::initialize(connection))
|
.with_initialize_query(indoc! {"
|
||||||
.map(|connection| {
|
PRAGMA journal_mode=WAL;
|
||||||
Db::Real(Arc::new(RealDb {
|
PRAGMA synchronous=NORMAL;
|
||||||
connection,
|
PRAGMA foreign_keys=TRUE;
|
||||||
path: Some(db_dir.to_path_buf()),
|
PRAGMA case_sensitive_like=TRUE;
|
||||||
}))
|
"}),
|
||||||
})
|
)
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
error!(
|
|
||||||
"Connecting to file backed db failed. Reverting to null db. {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Self::Null
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
|
|
||||||
MIGRATIONS.to_latest(&mut conn)?;
|
|
||||||
|
|
||||||
conn.pragma_update(None, "journal_mode", "WAL")?;
|
|
||||||
conn.pragma_update(None, "synchronous", "NORMAL")?;
|
|
||||||
conn.pragma_update(None, "foreign_keys", true)?;
|
|
||||||
conn.pragma_update(None, "case_sensitive_like", true)?;
|
|
||||||
|
|
||||||
Ok(Mutex::new(conn))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn persisting(&self) -> bool {
|
pub fn persisting(&self) -> bool {
|
||||||
self.real().and_then(|db| db.path.as_ref()).is_some()
|
self.persistent()
|
||||||
}
|
|
||||||
|
|
||||||
pub fn real(&self) -> Option<&RealDb> {
|
|
||||||
match self {
|
|
||||||
Db::Real(db) => Some(&db),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a in memory database for testing and as a fallback.
|
/// Open a in memory database for testing and as a fallback.
|
||||||
pub fn open_in_memory() -> Self {
|
pub fn open_in_memory() -> Self {
|
||||||
Connection::open_in_memory()
|
Db(
|
||||||
.map_err(Into::into)
|
ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {"
|
||||||
.and_then(|connection| Self::initialize(connection))
|
PRAGMA journal_mode=WAL;
|
||||||
.map(|connection| {
|
PRAGMA synchronous=NORMAL;
|
||||||
Db::Real(Arc::new(RealDb {
|
PRAGMA foreign_keys=TRUE;
|
||||||
connection,
|
PRAGMA case_sensitive_like=TRUE;
|
||||||
path: None,
|
"}),
|
||||||
}))
|
)
|
||||||
})
|
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
error!(
|
|
||||||
"Connecting to in memory db failed. Reverting to null db. {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Self::Null
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
|
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
|
||||||
self.real()
|
let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref());
|
||||||
.map(|db| {
|
self.backup(&destination)
|
||||||
if db.path.is_some() {
|
|
||||||
panic!("DB already exists");
|
|
||||||
}
|
|
||||||
|
|
||||||
let lock = db.connection.lock();
|
|
||||||
let mut dst = Connection::open(dest)?;
|
|
||||||
let backup = backup::Backup::new(&lock, &mut dst)?;
|
|
||||||
backup.step(-1)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
.unwrap_or(Ok(()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for Db {
|
impl Drop for Db {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
match self {
|
self.exec(indoc! {"
|
||||||
Db::Real(real_db) => {
|
PRAGMA analysis_limit=500;
|
||||||
let lock = real_db.connection.lock();
|
PRAGMA optimize"})
|
||||||
|
.ok();
|
||||||
let _ = lock.pragma_update(None, "analysis_limit", "500");
|
|
||||||
let _ = lock.pragma_update(None, "optimize", "");
|
|
||||||
}
|
|
||||||
Db::Null => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::migrations::MIGRATIONS;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_migrations() {
|
|
||||||
assert!(MIGRATIONS.validate().is_ok());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use rusqlite::{params, Connection, OptionalExtension};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
ffi::OsStr,
|
ffi::OsStr,
|
||||||
fmt::Debug,
|
fmt::Debug,
|
||||||
|
@ -12,28 +9,34 @@ use std::{
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use indoc::indoc;
|
||||||
|
use sqlez::{connection::Connection, migrations::Migration};
|
||||||
|
|
||||||
use crate::pane::SerializedDockPane;
|
use crate::pane::SerializedDockPane;
|
||||||
|
|
||||||
use super::Db;
|
use super::Db;
|
||||||
|
|
||||||
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
|
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
|
||||||
// you might want to update some of the parsing code as well, I've left the variations in but commented
|
// you might want to update some of the parsing code as well, I've left the variations in but commented
|
||||||
// out
|
// out. This will panic if run on an existing db that has already been migrated
|
||||||
pub(crate) const WORKSPACE_M_1: &str = "
|
const WORKSPACES_MIGRATION: Migration = Migration::new(
|
||||||
CREATE TABLE workspaces(
|
"migrations",
|
||||||
workspace_id INTEGER PRIMARY KEY,
|
&[indoc! {"
|
||||||
last_opened_timestamp INTEGER NOT NULL
|
CREATE TABLE workspaces(
|
||||||
) STRICT;
|
workspace_id INTEGER PRIMARY KEY,
|
||||||
|
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE TABLE worktree_roots(
|
||||||
|
worktree_root BLOB NOT NULL,
|
||||||
|
workspace_id INTEGER NOT NULL,
|
||||||
|
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
||||||
|
PRIMARY KEY(worktree_root, workspace_id)
|
||||||
|
) STRICT;"}],
|
||||||
|
);
|
||||||
|
|
||||||
CREATE TABLE worktree_roots(
|
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
|
||||||
worktree_root BLOB NOT NULL,
|
|
||||||
workspace_id INTEGER NOT NULL,
|
|
||||||
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
|
|
||||||
PRIMARY KEY(worktree_root, workspace_id)
|
|
||||||
) STRICT;
|
|
||||||
";
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)]
|
|
||||||
pub struct WorkspaceId(i64);
|
pub struct WorkspaceId(i64);
|
||||||
|
|
||||||
impl WorkspaceId {
|
impl WorkspaceId {
|
||||||
|
@ -77,19 +80,9 @@ impl Db {
|
||||||
where
|
where
|
||||||
P: AsRef<Path> + Debug,
|
P: AsRef<Path> + Debug,
|
||||||
{
|
{
|
||||||
fn logic<P>(
|
let result = (|| {
|
||||||
connection: &mut Connection,
|
let tx = self.transaction()?;
|
||||||
worktree_roots: &[P],
|
tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?;
|
||||||
) -> Result<SerializedWorkspace>
|
|
||||||
where
|
|
||||||
P: AsRef<Path> + Debug,
|
|
||||||
{
|
|
||||||
let tx = connection.transaction()?;
|
|
||||||
|
|
||||||
tx.execute(
|
|
||||||
"INSERT INTO workspaces(last_opened_timestamp) VALUES (?)",
|
|
||||||
[current_millis()?],
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let id = WorkspaceId(tx.last_insert_rowid());
|
let id = WorkspaceId(tx.last_insert_rowid());
|
||||||
|
|
||||||
|
@ -101,22 +94,15 @@ impl Db {
|
||||||
workspace_id: id,
|
workspace_id: id,
|
||||||
dock_pane: None,
|
dock_pane: None,
|
||||||
})
|
})
|
||||||
|
})();
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(serialized_workspace) => serialized_workspace,
|
||||||
|
Err(err) => {
|
||||||
|
log::error!("Failed to insert new workspace into DB: {}", err);
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.real()
|
|
||||||
.map(|db| {
|
|
||||||
let mut lock = db.connection.lock();
|
|
||||||
|
|
||||||
// No need to waste the memory caching this, should happen rarely.
|
|
||||||
match logic(&mut lock, worktree_roots) {
|
|
||||||
Ok(serialized_workspace) => serialized_workspace,
|
|
||||||
Err(err) => {
|
|
||||||
log::error!("Failed to insert new workspace into DB: {}", err);
|
|
||||||
Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap_or_default()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn workspace_id<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceId>
|
fn workspace_id<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceId>
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue