parent
b06366ebb7
commit
84c7aa9cad
11 changed files with 136 additions and 60 deletions
|
@ -19,6 +19,8 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] }
|
|||
parking_lot = "0.11.1"
|
||||
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] }
|
||||
rusqlite_migration = "1.0.0"
|
||||
serde = { workspace = true }
|
||||
serde_rusqlite = "0.31.0"
|
||||
|
||||
[dev-dependencies]
|
||||
gpui = { path = "../gpui", features = ["test-support"] }
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
mod kvp;
|
||||
mod migrations;
|
||||
|
||||
use std::path::Path;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
|
@ -11,58 +12,108 @@ use rusqlite::Connection;
|
|||
|
||||
use migrations::MIGRATIONS;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum Db {
|
||||
Real {
|
||||
connection: Mutex<Connection>,
|
||||
in_memory: bool,
|
||||
},
|
||||
Real(Arc<RealDb>),
|
||||
Null,
|
||||
}
|
||||
|
||||
// To make a migration:
|
||||
// Add to the migrations directory, a file with the name:
|
||||
// <NUMBER>_<DESCRIPTION>.sql. Migrations are executed in order of number
|
||||
pub struct RealDb {
|
||||
connection: Mutex<Connection>,
|
||||
path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Db {
|
||||
/// Open or create a database at the given file path. Falls back to in memory database if the
|
||||
/// database at the given path is corrupted
|
||||
pub fn open(path: &Path) -> Arc<Self> {
|
||||
Connection::open(path)
|
||||
/// Open or create a database at the given directory path.
|
||||
pub fn open(db_dir: &Path) -> Self {
|
||||
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM
|
||||
let current_db_dir = db_dir.join(Path::new("0"));
|
||||
fs::create_dir_all(¤t_db_dir)
|
||||
.expect("Should be able to create the database directory");
|
||||
let db_path = current_db_dir.join(Path::new("db.sqlite"));
|
||||
|
||||
Connection::open(db_path)
|
||||
.map_err(Into::into)
|
||||
.and_then(|connection| Self::initialize(connection, false))
|
||||
.and_then(|connection| Self::initialize(connection))
|
||||
.map(|connection| {
|
||||
Db::Real(Arc::new(RealDb {
|
||||
connection,
|
||||
path: Some(db_dir.to_path_buf()),
|
||||
}))
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
error!(
|
||||
"Connecting to db failed. Falling back to in memory db. {}",
|
||||
"Connecting to file backed db failed. Reverting to null db. {}",
|
||||
e
|
||||
);
|
||||
Self::open_in_memory()
|
||||
Self::Null
|
||||
})
|
||||
}
|
||||
|
||||
/// Open a in memory database for testing and as a fallback.
|
||||
pub fn open_in_memory() -> Arc<Self> {
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
pub fn open_in_memory() -> Self {
|
||||
Connection::open_in_memory()
|
||||
.map_err(Into::into)
|
||||
.and_then(|connection| Self::initialize(connection, true))
|
||||
.and_then(|connection| Self::initialize(connection))
|
||||
.map(|connection| {
|
||||
Db::Real(Arc::new(RealDb {
|
||||
connection,
|
||||
path: None,
|
||||
}))
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
error!("Connecting to in memory db failed. Reverting to null db. {}");
|
||||
Arc::new(Self::Null)
|
||||
error!(
|
||||
"Connecting to in memory db failed. Reverting to null db. {}",
|
||||
e
|
||||
);
|
||||
Self::Null
|
||||
})
|
||||
}
|
||||
|
||||
fn initialize(mut conn: Connection, in_memory: bool) -> Result<Arc<Self>> {
|
||||
fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
|
||||
MIGRATIONS.to_latest(&mut conn)?;
|
||||
|
||||
Ok(Arc::new(Self::Real {
|
||||
connection: Mutex::new(conn),
|
||||
in_memory,
|
||||
}))
|
||||
conn.pragma_update(None, "journal_mode", "WAL")?;
|
||||
conn.pragma_update(None, "synchronous", "NORMAL")?;
|
||||
conn.pragma_update(None, "foreign_keys", true)?;
|
||||
conn.pragma_update(None, "case_sensitive_like", true)?;
|
||||
|
||||
Ok(Mutex::new(conn))
|
||||
}
|
||||
|
||||
fn persisting(&self) -> bool {
|
||||
pub fn persisting(&self) -> bool {
|
||||
self.real().and_then(|db| db.path.as_ref()).is_some()
|
||||
}
|
||||
|
||||
pub fn real(&self) -> Option<&RealDb> {
|
||||
match self {
|
||||
Db::Real { in_memory, .. } => *in_memory,
|
||||
_ => false,
|
||||
Db::Real(db) => Some(&db),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Db {
|
||||
fn drop(&mut self) {
|
||||
match self {
|
||||
Db::Real(real_db) => {
|
||||
let lock = real_db.connection.lock();
|
||||
|
||||
let _ = lock.pragma_update(None, "analysis_limit", "500");
|
||||
let _ = lock.pragma_update(None, "optimize", "");
|
||||
}
|
||||
Db::Null => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::migrations::MIGRATIONS;
|
||||
|
||||
#[test]
|
||||
fn test_migrations() {
|
||||
assert!(MIGRATIONS.validate().is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ use rusqlite::OptionalExtension;
|
|||
|
||||
use super::Db;
|
||||
|
||||
pub(crate) const KVP_M_1: &str = "
|
||||
pub(crate) const KVP_M_1_UP: &str = "
|
||||
CREATE TABLE kv_store(
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
|
@ -12,31 +12,44 @@ CREATE TABLE kv_store(
|
|||
|
||||
impl Db {
|
||||
pub fn read_kvp(&self, key: &str) -> Result<Option<String>> {
|
||||
let lock = self.connection.lock();
|
||||
let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?;
|
||||
self.real()
|
||||
.map(|db| {
|
||||
let lock = db.connection.lock();
|
||||
let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?;
|
||||
|
||||
Ok(stmt.query_row([key], |row| row.get(0)).optional()?)
|
||||
}
|
||||
|
||||
pub fn delete_kvp(&self, key: &str) -> Result<()> {
|
||||
let lock = self.connection.lock();
|
||||
|
||||
let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?;
|
||||
|
||||
stmt.execute([key])?;
|
||||
|
||||
Ok(())
|
||||
Ok(stmt.query_row([key], |row| row.get(0)).optional()?)
|
||||
})
|
||||
.unwrap_or(Ok(None))
|
||||
}
|
||||
|
||||
pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> {
|
||||
let lock = self.connection.lock();
|
||||
self.real()
|
||||
.map(|db| {
|
||||
let lock = db.connection.lock();
|
||||
|
||||
let mut stmt =
|
||||
lock.prepare_cached("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?;
|
||||
let mut stmt = lock.prepare_cached(
|
||||
"INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))",
|
||||
)?;
|
||||
|
||||
stmt.execute([key, value])?;
|
||||
stmt.execute([key, value])?;
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
.unwrap_or(Ok(()))
|
||||
}
|
||||
|
||||
pub fn delete_kvp(&self, key: &str) -> Result<()> {
|
||||
self.real()
|
||||
.map(|db| {
|
||||
let lock = db.connection.lock();
|
||||
|
||||
let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?;
|
||||
|
||||
stmt.execute([key])?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.unwrap_or(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,7 +61,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_kvp() -> Result<()> {
|
||||
let db = Db::open_in_memory()?;
|
||||
let db = Db::open_in_memory();
|
||||
|
||||
assert_eq!(db.read_kvp("key-1")?, None);
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use rusqlite_migration::{Migrations, M};
|
||||
|
||||
// use crate::items::ITEMS_M_1;
|
||||
use crate::kvp::KVP_M_1;
|
||||
use crate::kvp::KVP_M_1_UP;
|
||||
|
||||
// This must be ordered by development time! Only ever add new migrations to the end!!
|
||||
// Bad things will probably happen if you don't monotonically edit this vec!!!!
|
||||
|
@ -9,7 +9,7 @@ use crate::kvp::KVP_M_1;
|
|||
// file system and so everything we do here is locked in _f_o_r_e_v_e_r_.
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
|
||||
M::up(KVP_M_1),
|
||||
M::up(KVP_M_1_UP),
|
||||
// M::up(ITEMS_M_1),
|
||||
]);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue