feat(nbstore): improve nbstore (#9512)

This commit is contained in:
EYHN
2025-01-06 09:38:03 +00:00
parent a2563d2180
commit 46c8c4a408
103 changed files with 3337 additions and 3423 deletions

View File

@@ -1,41 +1,34 @@
/* auto-generated by NAPI-RS */
/* eslint-disable */
export declare class DocStorage {
constructor(path: string)
export declare class DocStoragePool {
constructor()
/** Initialize the database and run migrations. */
connect(): Promise<void>
close(): Promise<void>
get isClosed(): Promise<boolean>
/**
* Flush the WAL file to the database file.
* See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
*/
checkpoint(): Promise<void>
validate(): Promise<boolean>
setSpaceId(spaceId: string): Promise<void>
pushUpdate(docId: string, update: Uint8Array): Promise<Date>
getDocSnapshot(docId: string): Promise<DocRecord | null>
setDocSnapshot(snapshot: DocRecord): Promise<boolean>
getDocUpdates(docId: string): Promise<Array<DocUpdate>>
markUpdatesMerged(docId: string, updates: Array<Date>): Promise<number>
deleteDoc(docId: string): Promise<void>
getDocClocks(after?: Date | undefined | null): Promise<Array<DocClock>>
getDocClock(docId: string): Promise<DocClock | null>
getBlob(key: string): Promise<Blob | null>
setBlob(blob: SetBlob): Promise<void>
deleteBlob(key: string, permanently: boolean): Promise<void>
releaseBlobs(): Promise<void>
listBlobs(): Promise<Array<ListedBlob>>
getPeerRemoteClocks(peer: string): Promise<Array<DocClock>>
getPeerRemoteClock(peer: string, docId: string): Promise<DocClock>
setPeerRemoteClock(peer: string, docId: string, clock: Date): Promise<void>
getPeerPulledRemoteClocks(peer: string): Promise<Array<DocClock>>
getPeerPulledRemoteClock(peer: string, docId: string): Promise<DocClock>
setPeerPulledRemoteClock(peer: string, docId: string, clock: Date): Promise<void>
getPeerPushedClocks(peer: string): Promise<Array<DocClock>>
getPeerPushedClock(peer: string, docId: string): Promise<DocClock>
setPeerPushedClock(peer: string, docId: string, clock: Date): Promise<void>
clearClocks(): Promise<void>
connect(universalId: string, path: string): Promise<void>
disconnect(universalId: string): Promise<void>
setSpaceId(universalId: string, spaceId: string): Promise<void>
pushUpdate(universalId: string, docId: string, update: Uint8Array): Promise<Date>
getDocSnapshot(universalId: string, docId: string): Promise<DocRecord | null>
setDocSnapshot(universalId: string, snapshot: DocRecord): Promise<boolean>
getDocUpdates(universalId: string, docId: string): Promise<Array<DocUpdate>>
markUpdatesMerged(universalId: string, docId: string, updates: Array<Date>): Promise<number>
deleteDoc(universalId: string, docId: string): Promise<void>
getDocClocks(universalId: string, after?: Date | undefined | null): Promise<Array<DocClock>>
getDocClock(universalId: string, docId: string): Promise<DocClock | null>
getBlob(universalId: string, key: string): Promise<Blob | null>
setBlob(universalId: string, blob: SetBlob): Promise<void>
deleteBlob(universalId: string, key: string, permanently: boolean): Promise<void>
releaseBlobs(universalId: string): Promise<void>
listBlobs(universalId: string): Promise<Array<ListedBlob>>
getPeerRemoteClocks(universalId: string, peer: string): Promise<Array<DocClock>>
getPeerRemoteClock(universalId: string, peer: string, docId: string): Promise<DocClock>
setPeerRemoteClock(universalId: string, peer: string, docId: string, clock: Date): Promise<void>
getPeerPulledRemoteClocks(universalId: string, peer: string): Promise<Array<DocClock>>
getPeerPulledRemoteClock(universalId: string, peer: string, docId: string): Promise<DocClock>
setPeerPulledRemoteClock(universalId: string, peer: string, docId: string, clock: Date): Promise<void>
getPeerPushedClocks(universalId: string, peer: string): Promise<Array<DocClock>>
getPeerPushedClock(universalId: string, peer: string, docId: string): Promise<DocClock>
setPeerPushedClock(universalId: string, peer: string, docId: string, clock: Date): Promise<void>
clearClocks(universalId: string): Promise<void>
}
export declare class SqliteConnection {
@@ -96,14 +89,14 @@ export interface DocClock {
export interface DocRecord {
docId: string
data: Uint8Array
bin: Uint8Array
timestamp: Date
}
export interface DocUpdate {
docId: string
createdAt: Date
data: Uint8Array
timestamp: Date
bin: Uint8Array
}
export interface InsertRow {

View File

@@ -364,7 +364,7 @@ if (!nativeBinding) {
throw new Error(`Failed to load native binding`)
}
module.exports.DocStorage = nativeBinding.DocStorage
module.exports.DocStoragePool = nativeBinding.DocStoragePool
module.exports.SqliteConnection = nativeBinding.SqliteConnection
module.exports.mintChallengeResponse = nativeBinding.mintChallengeResponse
module.exports.ValidationResult = nativeBinding.ValidationResult

View File

@@ -13,8 +13,10 @@ use-as-lib = ["napi-derive/noop", "napi/noop"]
affine_schema = { path = "../schema" }
anyhow = { workspace = true }
chrono = { workspace = true }
dashmap = { workspace = true }
napi = { workspace = true }
napi-derive = { workspace = true }
thiserror = { workspace = true }
sqlx = { workspace = true, default-features = false, features = ["chrono", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
tokio = { workspace = true, features = ["full"] }

View File

@@ -1,18 +1,18 @@
use std::ops::Deref;
use super::{storage::SqliteDocStorage, Blob, ListedBlob, SetBlob};
type Result<T> = std::result::Result<T, sqlx::Error>;
use super::{error::Result, storage::SqliteDocStorage, Blob, ListedBlob, SetBlob};
impl SqliteDocStorage {
pub async fn get_blob(&self, key: String) -> Result<Option<Blob>> {
sqlx::query_as!(
let result = sqlx::query_as!(
Blob,
"SELECT key, data, size, mime, created_at FROM blobs WHERE key = ? AND deleted_at IS NULL",
key
)
.fetch_optional(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn set_blob(&self, blob: SetBlob) -> Result<()> {
@@ -58,12 +58,14 @@ impl SqliteDocStorage {
}
pub async fn list_blobs(&self) -> Result<Vec<ListedBlob>> {
sqlx::query_as!(
let result = sqlx::query_as!(
ListedBlob,
"SELECT key, size, mime, created_at FROM blobs WHERE deleted_at IS NULL ORDER BY created_at DESC;"
)
.fetch_all(&self.pool)
.await
.await?;
Ok(result)
}
}

View File

@@ -3,8 +3,7 @@ use std::ops::Deref;
use chrono::{DateTime, NaiveDateTime};
use sqlx::{QueryBuilder, Row};
use super::storage::{Result, SqliteDocStorage};
use super::{DocClock, DocRecord, DocUpdate};
use super::{error::Result, storage::SqliteDocStorage, DocClock, DocRecord, DocUpdate};
struct Meta {
space_id: String,
@@ -81,7 +80,7 @@ impl SqliteDocStorage {
Ok(()) => break,
Err(e) => {
if tried > 10 {
return Err(e);
return Err(e.into());
}
// Increment timestamp by 1ms and retry
@@ -126,13 +125,15 @@ impl SqliteDocStorage {
}
pub async fn get_doc_snapshot(&self, doc_id: String) -> Result<Option<DocRecord>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocRecord,
"SELECT doc_id, data, updated_at as timestamp FROM snapshots WHERE doc_id = ?",
"SELECT doc_id, data as bin, updated_at as timestamp FROM snapshots WHERE doc_id = ?",
doc_id
)
.fetch_optional(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> Result<bool> {
@@ -145,7 +146,7 @@ impl SqliteDocStorage {
WHERE updated_at <= $3;"#,
)
.bind(snapshot.doc_id)
.bind(snapshot.data.deref())
.bind(snapshot.bin.deref())
.bind(snapshot.timestamp)
.execute(&self.pool)
.await?;
@@ -154,13 +155,15 @@ impl SqliteDocStorage {
}
pub async fn get_doc_updates(&self, doc_id: String) -> Result<Vec<DocUpdate>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocUpdate,
"SELECT doc_id, created_at, data FROM updates WHERE doc_id = ?",
"SELECT doc_id, created_at as timestamp, data as bin FROM updates WHERE doc_id = ?",
doc_id
)
.fetch_all(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn mark_updates_merged(
@@ -204,7 +207,9 @@ impl SqliteDocStorage {
.execute(&mut *tx)
.await?;
tx.commit().await
tx.commit().await?;
Ok(())
}
pub async fn get_doc_clocks(&self, after: Option<NaiveDateTime>) -> Result<Vec<DocClock>> {
@@ -228,13 +233,15 @@ impl SqliteDocStorage {
}
pub async fn get_doc_clock(&self, doc_id: String) -> Result<Option<DocClock>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, timestamp FROM clocks WHERE doc_id = ?",
doc_id
)
.fetch_optional(&self.pool)
.await
.await?;
Ok(result)
}
}
@@ -286,7 +293,7 @@ mod tests {
storage
.set_doc_snapshot(DocRecord {
doc_id: "test".to_string(),
data: vec![0, 0].into(),
bin: vec![0, 0].into(),
timestamp: Utc::now().naive_utc(),
})
.await
@@ -350,7 +357,7 @@ mod tests {
assert_eq!(result.len(), 4);
assert_eq!(
result.iter().map(|u| u.data.as_ref()).collect::<Vec<_>>(),
result.iter().map(|u| u.bin.as_ref()).collect::<Vec<_>>(),
updates
);
}
@@ -365,7 +372,7 @@ mod tests {
let snapshot = DocRecord {
doc_id: "test".to_string(),
data: vec![0, 0].into(),
bin: vec![0, 0].into(),
timestamp: Utc::now().naive_utc(),
};
@@ -374,7 +381,7 @@ mod tests {
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]);
}
#[tokio::test]
@@ -383,7 +390,7 @@ mod tests {
let snapshot = DocRecord {
doc_id: "test".to_string(),
data: vec![0, 0].into(),
bin: vec![0, 0].into(),
timestamp: Utc::now().naive_utc(),
};
@@ -392,11 +399,11 @@ mod tests {
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]);
let snapshot = DocRecord {
doc_id: "test".to_string(),
data: vec![0, 1].into(),
bin: vec![0, 1].into(),
timestamp: DateTime::from_timestamp_millis(Utc::now().timestamp_millis() - 1000)
.unwrap()
.naive_utc(),
@@ -408,7 +415,7 @@ mod tests {
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]);
}
#[tokio::test]
@@ -468,7 +475,7 @@ mod tests {
updates
.iter()
.skip(1)
.map(|u| u.created_at)
.map(|u| u.timestamp)
.collect::<Vec<_>>(),
)
.await

View File

@@ -0,0 +1,11 @@
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Sqlite Error: {0}")]
SqlxError(#[from] sqlx::Error),
#[error("Migrate Error: {0}")]
MigrateError(#[from] sqlx::migrate::MigrateError),
#[error("Invalid operation")]
InvalidOperation,
}

View File

@@ -1,11 +1,14 @@
pub mod blob;
pub mod doc;
pub mod error;
pub mod pool;
pub mod storage;
pub mod sync;
use chrono::NaiveDateTime;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use pool::SqliteDocStoragePool;
#[cfg(feature = "use-as-lib")]
type Result<T> = anyhow::Result<T>;
@@ -14,13 +17,10 @@ type Result<T> = anyhow::Result<T>;
type Result<T> = napi::Result<T>;
#[cfg(not(feature = "use-as-lib"))]
fn map_err(err: sqlx::Error) -> Error {
Error::from(anyhow::Error::from(err))
}
#[cfg(feature = "use-as-lib")]
fn map_err(err: sqlx::Error) -> anyhow::Error {
anyhow::Error::from(err)
impl From<error::Error> for napi::Error {
fn from(err: error::Error) -> Self {
napi::Error::new(napi::Status::GenericFailure, err.to_string())
}
}
#[cfg(feature = "use-as-lib")]
@@ -32,16 +32,16 @@ pub type Data = Uint8Array;
#[napi(object)]
pub struct DocUpdate {
pub doc_id: String,
pub created_at: NaiveDateTime,
pub timestamp: NaiveDateTime,
#[napi(ts_type = "Uint8Array")]
pub data: Data,
pub bin: Data,
}
#[napi(object)]
pub struct DocRecord {
pub doc_id: String,
#[napi(ts_type = "Uint8Array")]
pub data: Data,
pub bin: Data,
pub timestamp: NaiveDateTime,
}
@@ -79,243 +79,354 @@ pub struct ListedBlob {
}
#[napi]
pub struct DocStorage {
storage: storage::SqliteDocStorage,
pub struct DocStoragePool {
pool: SqliteDocStoragePool,
}
#[napi]
impl DocStorage {
impl DocStoragePool {
#[napi(constructor, async_runtime)]
pub fn new(path: String) -> Result<Self> {
pub fn new() -> Result<Self> {
Ok(Self {
storage: storage::SqliteDocStorage::new(path),
pool: SqliteDocStoragePool::default(),
})
}
#[napi]
/// Initialize the database and run migrations.
pub async fn connect(&self) -> Result<()> {
self.storage.connect().await.map_err(map_err)
}
#[napi]
pub async fn close(&self) -> Result<()> {
self.storage.close().await;
pub async fn connect(&self, universal_id: String, path: String) -> Result<()> {
self.pool.connect(universal_id, path).await?;
Ok(())
}
#[napi(getter)]
pub async fn is_closed(&self) -> Result<bool> {
Ok(self.storage.is_closed())
}
/**
* Flush the WAL file to the database file.
* See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
*/
#[napi]
pub async fn checkpoint(&self) -> Result<()> {
self.storage.checkpoint().await.map_err(map_err)
pub async fn disconnect(&self, universal_id: String) -> Result<()> {
self.pool.disconnect(universal_id).await?;
Ok(())
}
#[napi]
pub async fn validate(&self) -> Result<bool> {
self.storage.validate().await.map_err(map_err)
}
#[napi]
pub async fn set_space_id(&self, space_id: String) -> Result<()> {
self.storage.set_space_id(space_id).await.map_err(map_err)
}
#[napi]
pub async fn push_update(&self, doc_id: String, update: Uint8Array) -> Result<NaiveDateTime> {
pub async fn set_space_id(&self, universal_id: String, space_id: String) -> Result<()> {
self
.storage
.push_update(doc_id, update)
.await
.map_err(map_err)
.pool
.ensure_storage(universal_id)?
.set_space_id(space_id)
.await?;
Ok(())
}
#[napi]
pub async fn get_doc_snapshot(&self, doc_id: String) -> Result<Option<DocRecord>> {
self.storage.get_doc_snapshot(doc_id).await.map_err(map_err)
pub async fn push_update(
&self,
universal_id: String,
doc_id: String,
update: Uint8Array,
) -> Result<NaiveDateTime> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.push_update(doc_id, update)
.await?,
)
}
#[napi]
pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> Result<bool> {
self
.storage
.set_doc_snapshot(snapshot)
.await
.map_err(map_err)
pub async fn get_doc_snapshot(
&self,
universal_id: String,
doc_id: String,
) -> Result<Option<DocRecord>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_doc_snapshot(doc_id)
.await?,
)
}
#[napi]
pub async fn get_doc_updates(&self, doc_id: String) -> Result<Vec<DocUpdate>> {
self.storage.get_doc_updates(doc_id).await.map_err(map_err)
pub async fn set_doc_snapshot(&self, universal_id: String, snapshot: DocRecord) -> Result<bool> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.set_doc_snapshot(snapshot)
.await?,
)
}
#[napi]
pub async fn get_doc_updates(
&self,
universal_id: String,
doc_id: String,
) -> Result<Vec<DocUpdate>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_doc_updates(doc_id)
.await?,
)
}
#[napi]
pub async fn mark_updates_merged(
&self,
universal_id: String,
doc_id: String,
updates: Vec<NaiveDateTime>,
) -> Result<u32> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.mark_updates_merged(doc_id, updates)
.await?,
)
}
#[napi]
pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<()> {
self
.storage
.mark_updates_merged(doc_id, updates)
.await
.map_err(map_err)
.pool
.ensure_storage(universal_id)?
.delete_doc(doc_id)
.await?;
Ok(())
}
#[napi]
pub async fn delete_doc(&self, doc_id: String) -> Result<()> {
self.storage.delete_doc(doc_id).await.map_err(map_err)
pub async fn get_doc_clocks(
&self,
universal_id: String,
after: Option<NaiveDateTime>,
) -> Result<Vec<DocClock>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_doc_clocks(after)
.await?,
)
}
#[napi]
pub async fn get_doc_clocks(&self, after: Option<NaiveDateTime>) -> Result<Vec<DocClock>> {
self.storage.get_doc_clocks(after).await.map_err(map_err)
pub async fn get_doc_clock(
&self,
universal_id: String,
doc_id: String,
) -> Result<Option<DocClock>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_doc_clock(doc_id)
.await?,
)
}
#[napi]
pub async fn get_doc_clock(&self, doc_id: String) -> Result<Option<DocClock>> {
self.storage.get_doc_clock(doc_id).await.map_err(map_err)
pub async fn get_blob(&self, universal_id: String, key: String) -> Result<Option<Blob>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_blob(key)
.await?,
)
}
#[napi]
pub async fn get_blob(&self, key: String) -> Result<Option<Blob>> {
self.storage.get_blob(key).await.map_err(map_err)
}
#[napi]
pub async fn set_blob(&self, blob: SetBlob) -> Result<()> {
self.storage.set_blob(blob).await.map_err(map_err)
}
#[napi]
pub async fn delete_blob(&self, key: String, permanently: bool) -> Result<()> {
pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<()> {
self
.storage
.pool
.ensure_storage(universal_id)?
.set_blob(blob)
.await?;
Ok(())
}
#[napi]
pub async fn delete_blob(
&self,
universal_id: String,
key: String,
permanently: bool,
) -> Result<()> {
self
.pool
.ensure_storage(universal_id)?
.delete_blob(key, permanently)
.await
.map_err(map_err)
.await?;
Ok(())
}
#[napi]
pub async fn release_blobs(&self) -> Result<()> {
self.storage.release_blobs().await.map_err(map_err)
}
#[napi]
pub async fn list_blobs(&self) -> Result<Vec<ListedBlob>> {
self.storage.list_blobs().await.map_err(map_err)
}
#[napi]
pub async fn get_peer_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
pub async fn release_blobs(&self, universal_id: String) -> Result<()> {
self
.storage
.get_peer_remote_clocks(peer)
.await
.map_err(map_err)
.pool
.ensure_storage(universal_id)?
.release_blobs()
.await?;
Ok(())
}
#[napi]
pub async fn get_peer_remote_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
self
.storage
.get_peer_remote_clock(peer, doc_id)
.await
.map_err(map_err)
pub async fn list_blobs(&self, universal_id: String) -> Result<Vec<ListedBlob>> {
Ok(self.pool.ensure_storage(universal_id)?.list_blobs().await?)
}
#[napi]
pub async fn get_peer_remote_clocks(
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_remote_clocks(peer)
.await?,
)
}
#[napi]
pub async fn get_peer_remote_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
) -> Result<DocClock> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_remote_clock(peer, doc_id)
.await?,
)
}
#[napi]
pub async fn set_peer_remote_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
clock: NaiveDateTime,
) -> Result<()> {
self
.storage
.pool
.ensure_storage(universal_id)?
.set_peer_remote_clock(peer, doc_id, clock)
.await
.map_err(map_err)
.await?;
Ok(())
}
#[napi]
pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
self
.storage
.get_peer_pulled_remote_clocks(peer)
.await
.map_err(map_err)
pub async fn get_peer_pulled_remote_clocks(
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_pulled_remote_clocks(peer)
.await?,
)
}
#[napi]
pub async fn get_peer_pulled_remote_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
) -> Result<DocClock> {
self
.storage
.get_peer_pulled_remote_clock(peer, doc_id)
.await
.map_err(map_err)
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_pulled_remote_clock(peer, doc_id)
.await?,
)
}
#[napi]
pub async fn set_peer_pulled_remote_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
clock: NaiveDateTime,
) -> Result<()> {
self
.storage
.pool
.ensure_storage(universal_id)?
.set_peer_pulled_remote_clock(peer, doc_id, clock)
.await
.map_err(map_err)
.await?;
Ok(())
}
#[napi]
pub async fn get_peer_pushed_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
self
.storage
.get_peer_pushed_clocks(peer)
.await
.map_err(map_err)
pub async fn get_peer_pushed_clocks(
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_pushed_clocks(peer)
.await?,
)
}
#[napi]
pub async fn get_peer_pushed_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
self
.storage
.get_peer_pushed_clock(peer, doc_id)
.await
.map_err(map_err)
pub async fn get_peer_pushed_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
) -> Result<DocClock> {
Ok(
self
.pool
.ensure_storage(universal_id)?
.get_peer_pushed_clock(peer, doc_id)
.await?,
)
}
#[napi]
pub async fn set_peer_pushed_clock(
&self,
universal_id: String,
peer: String,
doc_id: String,
clock: NaiveDateTime,
) -> Result<()> {
self
.storage
.pool
.ensure_storage(universal_id)?
.set_peer_pushed_clock(peer, doc_id, clock)
.await
.map_err(map_err)
.await?;
Ok(())
}
#[napi]
pub async fn clear_clocks(&self) -> Result<()> {
self.storage.clear_clocks().await.map_err(map_err)
pub async fn clear_clocks(&self, universal_id: String) -> Result<()> {
self
.pool
.ensure_storage(universal_id)?
.clear_clocks()
.await?;
Ok(())
}
}

View File

@@ -0,0 +1,54 @@
use dashmap::{mapref::one::RefMut, DashMap, Entry};
use super::{
error::{Error, Result},
storage::SqliteDocStorage,
};
#[derive(Default)]
pub struct SqliteDocStoragePool {
inner: DashMap<String, SqliteDocStorage>,
}
impl SqliteDocStoragePool {
fn get_or_create_storage<'a>(
&'a self,
universal_id: String,
path: &str,
) -> RefMut<'a, String, SqliteDocStorage> {
let entry = self.inner.entry(universal_id);
if let Entry::Occupied(storage) = entry {
return storage.into_ref();
}
let storage = SqliteDocStorage::new(path.to_string());
entry.or_insert(storage)
}
pub fn ensure_storage<'a>(
&'a self,
universal_id: String,
) -> Result<RefMut<'a, String, SqliteDocStorage>> {
let entry = self.inner.entry(universal_id);
if let Entry::Occupied(storage) = entry {
Ok(storage.into_ref())
} else {
Err(Error::InvalidOperation)
}
}
/// Initialize the database and run migrations.
pub async fn connect(&self, universal_id: String, path: String) -> Result<()> {
let storage = self.get_or_create_storage(universal_id.to_owned(), &path);
storage.connect().await?;
Ok(())
}
pub async fn disconnect(&self, universal_id: String) -> Result<()> {
let storage = self.ensure_storage(universal_id.to_owned())?;
storage.close().await;
self.inner.remove(&universal_id);
Ok(())
}
}

View File

@@ -5,7 +5,7 @@ use sqlx::{
Pool, Row,
};
pub type Result<T> = std::result::Result<T, sqlx::Error>;
use super::error::Result;
pub struct SqliteDocStorage {
pub pool: Pool<Sqlite>,
@@ -52,7 +52,7 @@ impl SqliteDocStorage {
}
pub async fn connect(&self) -> Result<()> {
if !Sqlite::database_exists(&self.path).await.unwrap_or(false) {
if !Sqlite::database_exists(&self.path).await? {
Sqlite::create_database(&self.path).await?;
};
@@ -79,7 +79,6 @@ impl SqliteDocStorage {
///
/// Flush the WAL file to the database file.
/// See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
///
pub async fn checkpoint(&self) -> Result<()> {
sqlx::query("PRAGMA wal_checkpoint(FULL);")
.execute(&self.pool)

View File

@@ -1,28 +1,32 @@
use chrono::NaiveDateTime;
use super::storage::{Result, SqliteDocStorage};
use super::DocClock;
use super::{error::Result, storage::SqliteDocStorage};
impl SqliteDocStorage {
pub async fn get_peer_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ?",
peer
)
.fetch_all(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn get_peer_remote_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
peer,
doc_id
)
.fetch_one(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn set_peer_remote_clock(
@@ -48,13 +52,15 @@ impl SqliteDocStorage {
}
pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ?",
peer
)
.fetch_all(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn get_peer_pulled_remote_clock(
@@ -62,14 +68,16 @@ impl SqliteDocStorage {
peer: String,
doc_id: String,
) -> Result<DocClock> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
peer,
doc_id
)
.fetch_one(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn set_peer_pulled_remote_clock(
@@ -95,24 +103,28 @@ impl SqliteDocStorage {
}
pub async fn get_peer_pushed_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ?",
peer
)
.fetch_all(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn get_peer_pushed_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
sqlx::query_as!(
let result = sqlx::query_as!(
DocClock,
"SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
peer,
doc_id
)
.fetch_one(&self.pool)
.await
.await?;
Ok(result)
}
pub async fn set_peer_pushed_clock(