mirror of
https://github.com/toeverything/AFFiNE.git
synced 2026-02-12 04:18:54 +00:00
feat(nbstore): add sqlite implementation (#8811)
This commit is contained in:
@@ -1 +0,0 @@
|
||||
DATABASE_URL="sqlite:affine.db"
|
||||
@@ -6,38 +6,17 @@ version = "0.0.0"
|
||||
[lib]
|
||||
crate-type = ["rlib", "cdylib"]
|
||||
|
||||
[features]
|
||||
noop = ["napi/noop", "napi-derive/noop"]
|
||||
|
||||
[[bench]]
|
||||
name = "hashcash"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
affine_common = { workspace = true }
|
||||
affine_schema = { path = "./schema" }
|
||||
anyhow = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
criterion2 = { workspace = true }
|
||||
affine_sqlite_v1 = { path = "./sqlite_v1" }
|
||||
affine_nbstore = { path = "./nbstore" }
|
||||
napi = { workspace = true }
|
||||
napi-derive = { workspace = true }
|
||||
notify = { workspace = true, features = ["serde"] }
|
||||
once_cell = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha3 = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
uuid = { workspace = true, features = ["fast-rng", "serde", "v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rayon = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
affine_schema = { path = "./schema" }
|
||||
dotenv = { workspace = true }
|
||||
napi-build = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import test from 'ava';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
|
||||
import test from 'ava';
|
||||
|
||||
import { SqliteConnection, ValidationResult } from '../index';
|
||||
|
||||
test('db validate', async t => {
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use affine_native::hashcash::Stamp;
|
||||
|
||||
fn bench_hashcash(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("hashcash");
|
||||
|
||||
group.bench_function(BenchmarkId::from_parameter("Generate"), |b| {
|
||||
b.iter(|| {
|
||||
black_box(Stamp::mint("test".to_string(), Some(20)).format());
|
||||
});
|
||||
});
|
||||
|
||||
group.bench_function(BenchmarkId::from_parameter("Verify"), |b| {
|
||||
b.iter(|| {
|
||||
black_box(
|
||||
Stamp::try_from("1:20:20241114061212:test::RsRAAkoxjr4FattQ:292f0d")
|
||||
.unwrap()
|
||||
.check(20, "test"),
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_hashcash);
|
||||
criterion_main!(benches);
|
||||
@@ -1,34 +1,5 @@
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use std::fs;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), std::io::Error> {
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
// always start with a fresh database to have
|
||||
// latest db schema
|
||||
let db_path = "../../../affine.db";
|
||||
|
||||
// check if db exists and then remove file
|
||||
if fs::metadata(db_path).is_ok() {
|
||||
fs::remove_file(db_path)?;
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "noop"))]
|
||||
napi_build::setup();
|
||||
let options = SqliteConnectOptions::new()
|
||||
.filename(db_path)
|
||||
.journal_mode(sqlx::sqlite::SqliteJournalMode::Off)
|
||||
.locking_mode(sqlx::sqlite::SqliteLockingMode::Exclusive)
|
||||
.create_if_missing(true);
|
||||
let pool = sqlx::sqlite::SqlitePoolOptions::new()
|
||||
.max_connections(1)
|
||||
.connect_with(options)
|
||||
.await
|
||||
.unwrap();
|
||||
sqlx::query(affine_schema::SCHEMA)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
43
packages/frontend/native/event.d.ts
vendored
43
packages/frontend/native/event.d.ts
vendored
@@ -1,43 +0,0 @@
|
||||
export interface NotifyEvent {
|
||||
type: EventKind;
|
||||
paths: string[];
|
||||
}
|
||||
|
||||
export type EventKind =
|
||||
| 'any'
|
||||
| 'other'
|
||||
| {
|
||||
remove: {
|
||||
kind: 'any' | 'file' | 'folder' | 'other';
|
||||
};
|
||||
}
|
||||
| {
|
||||
create: {
|
||||
kind: 'any' | 'file' | 'folder' | 'other';
|
||||
};
|
||||
}
|
||||
| {
|
||||
modify:
|
||||
| {
|
||||
kind: 'any' | 'other';
|
||||
}
|
||||
| {
|
||||
kind: 'data';
|
||||
mode: 'any' | 'size' | 'content' | 'other';
|
||||
}
|
||||
| {
|
||||
kind: 'metadata';
|
||||
mode:
|
||||
| 'any'
|
||||
| 'access-time'
|
||||
| 'write-time'
|
||||
| 'permissions'
|
||||
| 'ownership'
|
||||
| 'extended'
|
||||
| 'other';
|
||||
}
|
||||
| {
|
||||
kind: 'rename';
|
||||
mode: 'any' | 'to' | 'from' | 'both' | 'other';
|
||||
};
|
||||
};
|
||||
77
packages/frontend/native/index.d.ts
vendored
77
packages/frontend/native/index.d.ts
vendored
@@ -1,5 +1,43 @@
|
||||
/* auto-generated by NAPI-RS */
|
||||
/* eslint-disable */
|
||||
export declare class DocStorage {
|
||||
constructor(path: string)
|
||||
/** Initialize the database and run migrations. */
|
||||
connect(): Promise<void>
|
||||
close(): Promise<void>
|
||||
get isClosed(): Promise<boolean>
|
||||
/**
|
||||
* Flush the WAL file to the database file.
|
||||
* See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
|
||||
*/
|
||||
checkpoint(): Promise<void>
|
||||
validate(): Promise<boolean>
|
||||
setSpaceId(spaceId: string): Promise<void>
|
||||
pushUpdate(docId: string, update: Uint8Array): Promise<Date>
|
||||
getDocSnapshot(docId: string): Promise<DocRecord | null>
|
||||
setDocSnapshot(snapshot: DocRecord): Promise<boolean>
|
||||
getDocUpdates(docId: string): Promise<Array<DocUpdate>>
|
||||
markUpdatesMerged(docId: string, updates: Array<Date>): Promise<number>
|
||||
deleteDoc(docId: string): Promise<void>
|
||||
getDocClocks(after?: Date | undefined | null): Promise<Array<DocClock>>
|
||||
getDocClock(docId: string): Promise<DocClock | null>
|
||||
getBlob(key: string): Promise<Blob | null>
|
||||
setBlob(blob: SetBlob): Promise<void>
|
||||
deleteBlob(key: string, permanently: boolean): Promise<void>
|
||||
releaseBlobs(): Promise<void>
|
||||
listBlobs(): Promise<Array<ListedBlob>>
|
||||
getPeerRemoteClocks(peer: string): Promise<Array<DocClock>>
|
||||
getPeerRemoteClock(peer: string, docId: string): Promise<DocClock>
|
||||
setPeerRemoteClock(peer: string, docId: string, clock: Date): Promise<void>
|
||||
getPeerPulledRemoteClocks(peer: string): Promise<Array<DocClock>>
|
||||
getPeerPulledRemoteClock(peer: string, docId: string): Promise<DocClock>
|
||||
setPeerPulledRemoteClock(peer: string, docId: string, clock: Date): Promise<void>
|
||||
getPeerPushedClocks(peer: string): Promise<Array<DocClock>>
|
||||
getPeerPushedClock(peer: string, docId: string): Promise<DocClock>
|
||||
setPeerPushedClock(peer: string, docId: string, clock: Date): Promise<void>
|
||||
clearClocks(): Promise<void>
|
||||
}
|
||||
|
||||
export declare class SqliteConnection {
|
||||
constructor(path: string)
|
||||
connect(): Promise<void>
|
||||
@@ -37,19 +75,57 @@ export declare class SqliteConnection {
|
||||
checkpoint(): Promise<void>
|
||||
}
|
||||
|
||||
export interface Blob {
|
||||
key: string
|
||||
data: Uint8Array
|
||||
mime: string
|
||||
size: number
|
||||
createdAt: Date
|
||||
}
|
||||
|
||||
export interface BlobRow {
|
||||
key: string
|
||||
data: Buffer
|
||||
timestamp: Date
|
||||
}
|
||||
|
||||
export interface DocClock {
|
||||
docId: string
|
||||
timestamp: Date
|
||||
}
|
||||
|
||||
export interface DocRecord {
|
||||
docId: string
|
||||
data: Uint8Array
|
||||
timestamp: Date
|
||||
}
|
||||
|
||||
export interface DocUpdate {
|
||||
docId: string
|
||||
createdAt: Date
|
||||
data: Uint8Array
|
||||
}
|
||||
|
||||
export interface InsertRow {
|
||||
docId?: string
|
||||
data: Uint8Array
|
||||
}
|
||||
|
||||
export interface ListedBlob {
|
||||
key: string
|
||||
size: number
|
||||
mime: string
|
||||
createdAt: Date
|
||||
}
|
||||
|
||||
export declare function mintChallengeResponse(resource: string, bits?: number | undefined | null): Promise<string>
|
||||
|
||||
export interface SetBlob {
|
||||
key: string
|
||||
data: Uint8Array
|
||||
mime: string
|
||||
}
|
||||
|
||||
export interface UpdateRow {
|
||||
id: number
|
||||
timestamp: Date
|
||||
@@ -66,4 +142,3 @@ export declare enum ValidationResult {
|
||||
}
|
||||
|
||||
export declare function verifyChallengeResponse(response: string, bits: number, resource: string): Promise<boolean>
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
// prettier-ignore
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
const { readFileSync } = require('fs')
|
||||
const { createRequire } = require('node:module')
|
||||
require = createRequire(__filename)
|
||||
|
||||
const { readFileSync } = require('node:fs')
|
||||
let nativeBinding = null
|
||||
const loadErrors = []
|
||||
|
||||
@@ -361,6 +364,7 @@ if (!nativeBinding) {
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
module.exports.DocStorage = nativeBinding.DocStorage
|
||||
module.exports.SqliteConnection = nativeBinding.SqliteConnection
|
||||
module.exports.mintChallengeResponse = nativeBinding.mintChallengeResponse
|
||||
module.exports.ValidationResult = nativeBinding.ValidationResult
|
||||
|
||||
23
packages/frontend/native/nbstore/Cargo.toml
Normal file
23
packages/frontend/native/nbstore/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "affine_nbstore"
|
||||
version = "0.0.0"
|
||||
|
||||
[lib]
|
||||
crate-type = ["rlib", "cdylib"]
|
||||
|
||||
[dependencies]
|
||||
affine_schema = { path = "../schema" }
|
||||
anyhow = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
napi = { workspace = true }
|
||||
napi-derive = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
[build-dependencies]
|
||||
affine_schema = { path = "../schema" }
|
||||
dotenvy = { workspace = true }
|
||||
napi-build = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
33
packages/frontend/native/nbstore/build.rs
Normal file
33
packages/frontend/native/nbstore/build.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use affine_schema::get_migrator;
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use std::fs;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), std::io::Error> {
|
||||
napi_build::setup();
|
||||
|
||||
// always start with a fresh database to have latest db schema
|
||||
let cwd = std::env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
let db_path = format!("{cwd}/affine.db");
|
||||
|
||||
if fs::metadata(&db_path).is_ok() {
|
||||
fs::remove_file(&db_path)?;
|
||||
}
|
||||
|
||||
let options = SqliteConnectOptions::new()
|
||||
.filename(&db_path)
|
||||
.journal_mode(sqlx::sqlite::SqliteJournalMode::Off)
|
||||
.locking_mode(sqlx::sqlite::SqliteLockingMode::Exclusive)
|
||||
.create_if_missing(true);
|
||||
let pool = sqlx::sqlite::SqlitePoolOptions::new()
|
||||
.max_connections(1)
|
||||
.connect_with(options)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
get_migrator().run(&pool).await.unwrap();
|
||||
|
||||
println!("cargo::rustc-env=DATABASE_URL=sqlite://{db_path}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
198
packages/frontend/native/nbstore/src/blob.rs
Normal file
198
packages/frontend/native/nbstore/src/blob.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
use super::{storage::SqliteDocStorage, Blob, ListedBlob, SetBlob};
|
||||
|
||||
type Result<T> = std::result::Result<T, sqlx::Error>;
|
||||
|
||||
impl SqliteDocStorage {
|
||||
pub async fn get_blob(&self, key: String) -> Result<Option<Blob>> {
|
||||
sqlx::query_as!(
|
||||
Blob,
|
||||
"SELECT key, data, size, mime, created_at FROM blobs WHERE key = ? AND deleted_at IS NULL",
|
||||
key
|
||||
)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn set_blob(&self, blob: SetBlob) -> Result<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO blobs (key, data, mime, size)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT(key)
|
||||
DO UPDATE SET data=$2, mime=$3, size=$4, deleted_at=NULL;"#,
|
||||
)
|
||||
.bind(blob.key)
|
||||
.bind(blob.data.as_ref())
|
||||
.bind(blob.mime)
|
||||
.bind(blob.data.len() as i64)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_blob(&self, key: String, permanently: bool) -> Result<()> {
|
||||
if permanently {
|
||||
sqlx::query("DELETE FROM blobs WHERE key = ?")
|
||||
.bind(&key)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
} else {
|
||||
sqlx::query("UPDATE blobs SET deleted_at = CURRENT_TIMESTAMP WHERE key = ?")
|
||||
.bind(&key)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn release_blobs(&self) -> Result<()> {
|
||||
sqlx::query("DELETE FROM blobs WHERE deleted_at IS NOT NULL;")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_blobs(&self) -> Result<Vec<ListedBlob>> {
|
||||
sqlx::query_as!(
|
||||
ListedBlob,
|
||||
"SELECT key, size, mime, created_at FROM blobs WHERE deleted_at IS NULL ORDER BY created_at DESC;"
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use napi::bindgen_prelude::Uint8Array;
|
||||
use sqlx::Row;
|
||||
|
||||
use super::*;
|
||||
|
||||
async fn get_storage() -> SqliteDocStorage {
|
||||
let storage = SqliteDocStorage::new(":memory:".to_string());
|
||||
storage.connect().await.unwrap();
|
||||
|
||||
storage
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_blob() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
for i in 1..5u32 {
|
||||
storage
|
||||
.set_blob(SetBlob {
|
||||
key: format!("test_{}", i),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
mime: "text/plain".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let result = storage.get_blob("test_1".to_string()).await.unwrap();
|
||||
|
||||
assert!(result.is_some());
|
||||
|
||||
storage
|
||||
.delete_blob("test_".to_string(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = storage.get_blob("test".to_string()).await.unwrap();
|
||||
assert!(result.is_none());
|
||||
|
||||
storage
|
||||
.delete_blob("test_2".to_string(), true)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = storage.get_blob("test".to_string()).await.unwrap();
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_blobs() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let blobs = storage.list_blobs().await.unwrap();
|
||||
|
||||
assert_eq!(blobs.len(), 0);
|
||||
|
||||
for i in 1..5u32 {
|
||||
storage
|
||||
.set_blob(SetBlob {
|
||||
key: format!("test_{}", i),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
mime: "text/plain".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let blobs = storage.list_blobs().await.unwrap();
|
||||
|
||||
assert_eq!(blobs.len(), 4);
|
||||
assert_eq!(
|
||||
blobs.iter().map(|b| b.key.as_str()).collect::<Vec<_>>(),
|
||||
vec!["test_1", "test_2", "test_3", "test_4"]
|
||||
);
|
||||
|
||||
storage
|
||||
.delete_blob("test_2".to_string(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
storage
|
||||
.delete_blob("test_3".to_string(), true)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let query = sqlx::query("SELECT COUNT(*) as len FROM blobs;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(query.get::<i64, &str>("len"), 3);
|
||||
|
||||
let blobs = storage.list_blobs().await.unwrap();
|
||||
assert_eq!(blobs.len(), 2);
|
||||
assert_eq!(
|
||||
blobs.iter().map(|b| b.key.as_str()).collect::<Vec<_>>(),
|
||||
vec!["test_1", "test_4"]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn release_blobs() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
for i in 1..5u32 {
|
||||
storage
|
||||
.set_blob(SetBlob {
|
||||
key: format!("test_{}", i),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
mime: "text/plain".to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
storage
|
||||
.delete_blob("test_2".to_string(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
storage.release_blobs().await.unwrap();
|
||||
|
||||
let query = sqlx::query("SELECT COUNT(*) as len FROM blobs;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(query.get::<i64, &str>("len"), 3);
|
||||
}
|
||||
}
|
||||
449
packages/frontend/native/nbstore/src/doc.rs
Normal file
449
packages/frontend/native/nbstore/src/doc.rs
Normal file
@@ -0,0 +1,449 @@
|
||||
use chrono::NaiveDateTime;
|
||||
use sqlx::{QueryBuilder, Row};
|
||||
|
||||
use super::storage::{Result, SqliteDocStorage};
|
||||
use super::{DocClock, DocRecord, DocUpdate};
|
||||
|
||||
struct Meta {
|
||||
space_id: String,
|
||||
}
|
||||
|
||||
impl SqliteDocStorage {
|
||||
pub async fn set_space_id(&self, space_id: String) -> Result<()> {
|
||||
// ensure only one record exists in table
|
||||
let result = sqlx::query_as!(Meta, "SELECT * FROM meta;")
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
match result {
|
||||
Some(meta) => {
|
||||
if meta.space_id != space_id {
|
||||
sqlx::query("UPDATE meta SET space_id = $1;")
|
||||
.bind(&space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query("UPDATE updates SET doc_id = $1 WHERE doc_id = $2;")
|
||||
.bind(&space_id)
|
||||
.bind(&meta.space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query("UPDATE snapshots SET doc_id = $1 WHERE doc_id = $2;")
|
||||
.bind(&space_id)
|
||||
.bind(&meta.space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query("UPDATE clocks SET doc_id = $1 WHERE doc_id = $2;")
|
||||
.bind(&space_id)
|
||||
.bind(&meta.space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
sqlx::query("UPDATE peer_clocks SET doc_id = $1 WHERE doc_id = $2;")
|
||||
.bind(&space_id)
|
||||
.bind(&meta.space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
sqlx::query("INSERT INTO meta (space_id) VALUES ($1);")
|
||||
.bind(&space_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn push_update<Update: AsRef<[u8]>>(
|
||||
&self,
|
||||
doc_id: String,
|
||||
update: Update,
|
||||
) -> Result<NaiveDateTime> {
|
||||
let timestamp = chrono::Utc::now().naive_utc();
|
||||
let mut tx = self.pool.begin().await?;
|
||||
|
||||
sqlx::query(r#"INSERT INTO updates (doc_id, data, created_at) VALUES ($1, $2, $3);"#)
|
||||
.bind(&doc_id)
|
||||
.bind(update.as_ref())
|
||||
.bind(timestamp)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO clocks (doc_id, timestamp) VALUES ($1, $2)
|
||||
ON CONFLICT(doc_id)
|
||||
DO UPDATE SET timestamp=$2;"#,
|
||||
)
|
||||
.bind(&doc_id)
|
||||
.bind(timestamp)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(timestamp)
|
||||
}
|
||||
|
||||
pub async fn get_doc_snapshot(&self, doc_id: String) -> Result<Option<DocRecord>> {
|
||||
sqlx::query_as!(
|
||||
DocRecord,
|
||||
"SELECT doc_id, data, updated_at as timestamp FROM snapshots WHERE doc_id = ?",
|
||||
doc_id
|
||||
)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> Result<bool> {
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO snapshots (doc_id, data, updated_at)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT(doc_id)
|
||||
DO UPDATE SET data=$2, updated_at=$3
|
||||
WHERE updated_at <= $3;"#,
|
||||
)
|
||||
.bind(snapshot.doc_id)
|
||||
.bind(snapshot.data.as_ref())
|
||||
.bind(snapshot.timestamp)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(result.rows_affected() == 1)
|
||||
}
|
||||
|
||||
pub async fn get_doc_updates(&self, doc_id: String) -> Result<Vec<DocUpdate>> {
|
||||
sqlx::query_as!(
|
||||
DocUpdate,
|
||||
"SELECT doc_id, created_at, data FROM updates WHERE doc_id = ?",
|
||||
doc_id
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn mark_updates_merged(
|
||||
&self,
|
||||
doc_id: String,
|
||||
updates: Vec<NaiveDateTime>,
|
||||
) -> Result<u32> {
|
||||
let mut qb = QueryBuilder::new("DELETE FROM updates");
|
||||
|
||||
qb.push(" WHERE doc_id = ");
|
||||
qb.push_bind(doc_id);
|
||||
qb.push(" AND created_at IN (");
|
||||
let mut separated = qb.separated(", ");
|
||||
updates.iter().for_each(|update| {
|
||||
separated.push_bind(update);
|
||||
});
|
||||
qb.push(");");
|
||||
|
||||
let query = qb.build();
|
||||
|
||||
let result = query.execute(&self.pool).await?;
|
||||
|
||||
Ok(result.rows_affected() as u32)
|
||||
}
|
||||
|
||||
pub async fn delete_doc(&self, doc_id: String) -> Result<()> {
|
||||
let mut tx = self.pool.begin().await?;
|
||||
|
||||
sqlx::query("DELETE FROM updates WHERE doc_id = ?;")
|
||||
.bind(&doc_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query("DELETE FROM snapshots WHERE doc_id = ?;")
|
||||
.bind(&doc_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query("DELETE FROM clocks WHERE doc_id = ?;")
|
||||
.bind(&doc_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
tx.commit().await
|
||||
}
|
||||
|
||||
pub async fn get_doc_clocks(&self, after: Option<NaiveDateTime>) -> Result<Vec<DocClock>> {
|
||||
let query = if let Some(after) = after {
|
||||
sqlx::query("SELECT doc_id, timestamp FROM clocks WHERE timestamp > $1").bind(after)
|
||||
} else {
|
||||
sqlx::query("SELECT doc_id, timestamp FROM clocks")
|
||||
};
|
||||
|
||||
let clocks = query.fetch_all(&self.pool).await?;
|
||||
|
||||
Ok(
|
||||
clocks
|
||||
.iter()
|
||||
.map(|row| DocClock {
|
||||
doc_id: row.get("doc_id"),
|
||||
timestamp: row.get("timestamp"),
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_doc_clock(&self, doc_id: String) -> Result<Option<DocClock>> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, timestamp FROM clocks WHERE doc_id = ?",
|
||||
doc_id
|
||||
)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use chrono::{DateTime, Utc};
|
||||
use napi::bindgen_prelude::Uint8Array;
|
||||
|
||||
use super::*;
|
||||
|
||||
async fn get_storage() -> SqliteDocStorage {
|
||||
let storage = SqliteDocStorage::new(":memory:".to_string());
|
||||
storage.connect().await.unwrap();
|
||||
|
||||
storage
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_space_id() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
storage.set_space_id("test".to_string()).await.unwrap();
|
||||
|
||||
let result = sqlx::query!("SELECT space_id FROM meta;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.space_id, "test");
|
||||
|
||||
storage.set_space_id("test2".to_string()).await.unwrap();
|
||||
|
||||
let result = sqlx::query!("SELECT space_id FROM meta;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.space_id, "test2");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_space_id_with_existing_doc() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
storage.set_space_id("test".to_string()).await.unwrap();
|
||||
storage
|
||||
.push_update("test".to_string(), vec![0, 0])
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.set_doc_snapshot(DocRecord {
|
||||
doc_id: "test".to_string(),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
timestamp: Utc::now().naive_utc(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
storage
|
||||
.set_peer_pulled_remote_clock(
|
||||
"remote".to_string(),
|
||||
"test".to_string(),
|
||||
Utc::now().naive_utc(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
storage.set_space_id("new_id".to_string()).await.unwrap();
|
||||
|
||||
let result = sqlx::query!("SELECT space_id FROM meta;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.space_id, "new_id");
|
||||
|
||||
let clocks = storage.get_doc_clocks(None).await.unwrap();
|
||||
|
||||
assert_eq!(clocks[0].doc_id, "new_id");
|
||||
|
||||
let clocks = storage
|
||||
.get_peer_pulled_remote_clock("remote".to_string(), "new_id".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(clocks.doc_id, "new_id");
|
||||
|
||||
let updates = storage.get_doc_updates("new_id".to_string()).await.unwrap();
|
||||
|
||||
assert_eq!(updates.len(), 1);
|
||||
|
||||
let snapshot = storage
|
||||
.get_doc_snapshot("new_id".to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(snapshot.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn push_updates() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let updates = vec![vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]];
|
||||
|
||||
for update in updates.iter() {
|
||||
storage
|
||||
.push_update("test".to_string(), update)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let result = storage.get_doc_updates("test".to_string()).await.unwrap();
|
||||
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.iter().map(|u| u.data.as_ref()).collect::<Vec<_>>(),
|
||||
updates
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn get_doc_snapshot() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let none = storage.get_doc_snapshot("test".to_string()).await.unwrap();
|
||||
|
||||
assert!(none.is_none());
|
||||
|
||||
let snapshot = DocRecord {
|
||||
doc_id: "test".to_string(),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
timestamp: Utc::now().naive_utc(),
|
||||
};
|
||||
|
||||
storage.set_doc_snapshot(snapshot).await.unwrap();
|
||||
|
||||
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
|
||||
|
||||
assert!(result.is_some());
|
||||
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_doc_snapshot() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let snapshot = DocRecord {
|
||||
doc_id: "test".to_string(),
|
||||
data: Uint8Array::from(vec![0, 0]),
|
||||
timestamp: Utc::now().naive_utc(),
|
||||
};
|
||||
|
||||
storage.set_doc_snapshot(snapshot).await.unwrap();
|
||||
|
||||
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
|
||||
|
||||
assert!(result.is_some());
|
||||
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
|
||||
|
||||
let snapshot = DocRecord {
|
||||
doc_id: "test".to_string(),
|
||||
data: Uint8Array::from(vec![0, 1]),
|
||||
timestamp: DateTime::from_timestamp_millis(Utc::now().timestamp_millis() - 1000)
|
||||
.unwrap()
|
||||
.naive_utc(),
|
||||
};
|
||||
|
||||
// can't update because it's tempstamp is older
|
||||
storage.set_doc_snapshot(snapshot).await.unwrap();
|
||||
|
||||
let result = storage.get_doc_snapshot("test".to_string()).await.unwrap();
|
||||
|
||||
assert!(result.is_some());
|
||||
assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn get_doc_clocks() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let clocks = storage.get_doc_clocks(None).await.unwrap();
|
||||
|
||||
assert_eq!(clocks.len(), 0);
|
||||
|
||||
for i in 1..5u32 {
|
||||
storage
|
||||
.push_update(format!("test_{i}"), vec![0, 0])
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let clocks = storage.get_doc_clocks(None).await.unwrap();
|
||||
|
||||
assert_eq!(clocks.len(), 4);
|
||||
assert_eq!(
|
||||
clocks.iter().map(|c| c.doc_id.as_str()).collect::<Vec<_>>(),
|
||||
vec!["test_1", "test_2", "test_3", "test_4"]
|
||||
);
|
||||
|
||||
let clocks = storage
|
||||
.get_doc_clocks(Some(Utc::now().naive_utc()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(clocks.len(), 0);
|
||||
|
||||
let clock = storage.get_doc_clock("test_1".to_string()).await.unwrap();
|
||||
|
||||
assert!(clock.is_some());
|
||||
assert_eq!(clock.unwrap().doc_id, "test_1");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mark_updates_merged() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
let updates = [vec![0, 0], vec![0, 1], vec![1, 0], vec![1, 1]];
|
||||
|
||||
for update in updates.iter() {
|
||||
storage
|
||||
.push_update("test".to_string(), update)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let updates = storage.get_doc_updates("test".to_string()).await.unwrap();
|
||||
|
||||
let result = storage
|
||||
.mark_updates_merged(
|
||||
"test".to_string(),
|
||||
updates
|
||||
.iter()
|
||||
.skip(1)
|
||||
.map(|u| u.created_at)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, 3);
|
||||
|
||||
let updates = storage.get_doc_updates("test".to_string()).await.unwrap();
|
||||
|
||||
assert_eq!(updates.len(), 1);
|
||||
}
|
||||
}
|
||||
311
packages/frontend/native/nbstore/src/lib.rs
Normal file
311
packages/frontend/native/nbstore/src/lib.rs
Normal file
@@ -0,0 +1,311 @@
|
||||
mod blob;
|
||||
mod doc;
|
||||
mod storage;
|
||||
mod sync;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
fn map_err(err: sqlx::Error) -> napi::Error {
|
||||
napi::Error::from(anyhow::Error::from(err))
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DocUpdate {
|
||||
pub doc_id: String,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub data: Uint8Array,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DocRecord {
|
||||
pub doc_id: String,
|
||||
pub data: Uint8Array,
|
||||
pub timestamp: NaiveDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[napi(object)]
|
||||
pub struct DocClock {
|
||||
pub doc_id: String,
|
||||
pub timestamp: NaiveDateTime,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct SetBlob {
|
||||
pub key: String,
|
||||
pub data: Uint8Array,
|
||||
pub mime: String,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct Blob {
|
||||
pub key: String,
|
||||
pub data: Uint8Array,
|
||||
pub mime: String,
|
||||
pub size: i64,
|
||||
pub created_at: NaiveDateTime,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct ListedBlob {
|
||||
pub key: String,
|
||||
pub size: i64,
|
||||
pub mime: String,
|
||||
pub created_at: NaiveDateTime,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct DocStorage {
|
||||
storage: storage::SqliteDocStorage,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl DocStorage {
|
||||
#[napi(constructor, async_runtime)]
|
||||
pub fn new(path: String) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
storage: storage::SqliteDocStorage::new(path),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
/// Initialize the database and run migrations.
|
||||
pub async fn connect(&self) -> napi::Result<()> {
|
||||
self.storage.connect().await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn close(&self) -> napi::Result<()> {
|
||||
self.storage.close().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi(getter)]
|
||||
pub async fn is_closed(&self) -> napi::Result<bool> {
|
||||
Ok(self.storage.is_closed())
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush the WAL file to the database file.
|
||||
* See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
|
||||
*/
|
||||
#[napi]
|
||||
pub async fn checkpoint(&self) -> napi::Result<()> {
|
||||
self.storage.checkpoint().await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn validate(&self) -> napi::Result<bool> {
|
||||
self.storage.validate().await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_space_id(&self, space_id: String) -> napi::Result<()> {
|
||||
self.storage.set_space_id(space_id).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn push_update(
|
||||
&self,
|
||||
doc_id: String,
|
||||
update: Uint8Array,
|
||||
) -> napi::Result<NaiveDateTime> {
|
||||
self
|
||||
.storage
|
||||
.push_update(doc_id, update)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_doc_snapshot(&self, doc_id: String) -> napi::Result<Option<DocRecord>> {
|
||||
self.storage.get_doc_snapshot(doc_id).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> napi::Result<bool> {
|
||||
self
|
||||
.storage
|
||||
.set_doc_snapshot(snapshot)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_doc_updates(&self, doc_id: String) -> napi::Result<Vec<DocUpdate>> {
|
||||
self.storage.get_doc_updates(doc_id).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn mark_updates_merged(
|
||||
&self,
|
||||
doc_id: String,
|
||||
updates: Vec<NaiveDateTime>,
|
||||
) -> napi::Result<u32> {
|
||||
self
|
||||
.storage
|
||||
.mark_updates_merged(doc_id, updates)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn delete_doc(&self, doc_id: String) -> napi::Result<()> {
|
||||
self.storage.delete_doc(doc_id).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_doc_clocks(&self, after: Option<NaiveDateTime>) -> napi::Result<Vec<DocClock>> {
|
||||
self.storage.get_doc_clocks(after).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_doc_clock(&self, doc_id: String) -> napi::Result<Option<DocClock>> {
|
||||
self.storage.get_doc_clock(doc_id).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_blob(&self, key: String) -> napi::Result<Option<Blob>> {
|
||||
self.storage.get_blob(key).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_blob(&self, blob: SetBlob) -> napi::Result<()> {
|
||||
self.storage.set_blob(blob).await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn delete_blob(&self, key: String, permanently: bool) -> napi::Result<()> {
|
||||
self
|
||||
.storage
|
||||
.delete_blob(key, permanently)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn release_blobs(&self) -> napi::Result<()> {
|
||||
self.storage.release_blobs().await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn list_blobs(&self) -> napi::Result<Vec<ListedBlob>> {
|
||||
self.storage.list_blobs().await.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_remote_clocks(&self, peer: String) -> napi::Result<Vec<DocClock>> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_remote_clocks(peer)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
) -> napi::Result<DocClock> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_remote_clock(peer, doc_id)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_peer_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> napi::Result<()> {
|
||||
self
|
||||
.storage
|
||||
.set_peer_remote_clock(peer, doc_id, clock)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> napi::Result<Vec<DocClock>> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_pulled_remote_clocks(peer)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_pulled_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
) -> napi::Result<DocClock> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_pulled_remote_clock(peer, doc_id)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_peer_pulled_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> napi::Result<()> {
|
||||
self
|
||||
.storage
|
||||
.set_peer_pulled_remote_clock(peer, doc_id, clock)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_pushed_clocks(&self, peer: String) -> napi::Result<Vec<DocClock>> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_pushed_clocks(peer)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_peer_pushed_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
) -> napi::Result<DocClock> {
|
||||
self
|
||||
.storage
|
||||
.get_peer_pushed_clock(peer, doc_id)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn set_peer_pushed_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> napi::Result<()> {
|
||||
self
|
||||
.storage
|
||||
.set_peer_pushed_clock(peer, doc_id, clock)
|
||||
.await
|
||||
.map_err(map_err)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn clear_clocks(&self) -> napi::Result<()> {
|
||||
self.storage.clear_clocks().await.map_err(map_err)
|
||||
}
|
||||
}
|
||||
129
packages/frontend/native/nbstore/src/storage.rs
Normal file
129
packages/frontend/native/nbstore/src/storage.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use affine_schema::get_migrator;
|
||||
use sqlx::{
|
||||
migrate::MigrateDatabase,
|
||||
sqlite::{Sqlite, SqliteConnectOptions, SqlitePoolOptions},
|
||||
Pool, Row,
|
||||
};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, sqlx::Error>;
|
||||
|
||||
pub struct SqliteDocStorage {
|
||||
pub pool: Pool<Sqlite>,
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl SqliteDocStorage {
|
||||
pub fn new(path: String) -> Self {
|
||||
let sqlite_options = SqliteConnectOptions::new()
|
||||
.filename(&path)
|
||||
.foreign_keys(false)
|
||||
.journal_mode(sqlx::sqlite::SqliteJournalMode::Wal);
|
||||
|
||||
let mut pool_options = SqlitePoolOptions::new();
|
||||
|
||||
if cfg!(test) && path == ":memory:" {
|
||||
pool_options = pool_options
|
||||
.min_connections(1)
|
||||
.max_connections(1)
|
||||
.idle_timeout(None)
|
||||
.max_lifetime(None);
|
||||
} else {
|
||||
pool_options = pool_options.max_connections(4);
|
||||
}
|
||||
|
||||
Self {
|
||||
pool: pool_options.connect_lazy_with(sqlite_options),
|
||||
path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn validate(&self) -> Result<bool> {
|
||||
let record = sqlx::query("SELECT * FROM _sqlx_migrations ORDER BY installed_on ASC LIMIT 1;")
|
||||
.fetch_optional(&self.pool)
|
||||
.await;
|
||||
|
||||
match record {
|
||||
Ok(Some(row)) => {
|
||||
let name: &str = row.try_get("description")?;
|
||||
Ok(name == "init_v2")
|
||||
}
|
||||
_ => return Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn connect(&self) -> Result<()> {
|
||||
if !Sqlite::database_exists(&self.path).await.unwrap_or(false) {
|
||||
Sqlite::create_database(&self.path).await?;
|
||||
};
|
||||
|
||||
self.migrate().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn migrate(&self) -> Result<()> {
|
||||
let migrator = get_migrator();
|
||||
migrator.run(&self.pool).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(&self) {
|
||||
self.pool.close().await
|
||||
}
|
||||
|
||||
pub fn is_closed(&self) -> bool {
|
||||
self.pool.is_closed()
|
||||
}
|
||||
|
||||
///
|
||||
/// Flush the WAL file to the database file.
|
||||
/// See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B
|
||||
///
|
||||
pub async fn checkpoint(&self) -> Result<()> {
|
||||
sqlx::query("PRAGMA wal_checkpoint(FULL);")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
async fn get_storage() -> SqliteDocStorage {
|
||||
let storage = SqliteDocStorage::new(":memory:".to_string());
|
||||
storage.connect().await.unwrap();
|
||||
|
||||
storage
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn init_tables() {
|
||||
let storage = get_storage().await;
|
||||
|
||||
sqlx::query("INSERT INTO meta (space_id) VALUES ($1);")
|
||||
.bind("test")
|
||||
.execute(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let record = sqlx::query!("SELECT space_id FROM meta;")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(record.space_id, "test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn validate_db() {
|
||||
let storage = get_storage().await;
|
||||
assert!(storage.validate().await.unwrap());
|
||||
|
||||
let storage = SqliteDocStorage::new(":memory:".to_string());
|
||||
assert!(!storage.validate().await.unwrap());
|
||||
}
|
||||
}
|
||||
289
packages/frontend/native/nbstore/src/sync.rs
Normal file
289
packages/frontend/native/nbstore/src/sync.rs
Normal file
@@ -0,0 +1,289 @@
|
||||
use chrono::NaiveDateTime;
|
||||
|
||||
use super::storage::{Result, SqliteDocStorage};
|
||||
use super::DocClock;
|
||||
|
||||
impl SqliteDocStorage {
|
||||
pub async fn get_peer_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ?",
|
||||
peer
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_peer_remote_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
|
||||
peer,
|
||||
doc_id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn set_peer_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO peer_clocks (peer, doc_id, remote_clock)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT(peer, doc_id)
|
||||
DO UPDATE SET remote_clock=$3 WHERE remote_clock < $3;"#,
|
||||
)
|
||||
.bind(peer)
|
||||
.bind(doc_id)
|
||||
.bind(clock)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ?",
|
||||
peer
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_peer_pulled_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
) -> Result<DocClock> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
|
||||
peer,
|
||||
doc_id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn set_peer_pulled_remote_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO peer_clocks (peer, doc_id, pulled_remote_clock)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT(peer, doc_id)
|
||||
DO UPDATE SET pulled_remote_clock=$3 WHERE pulled_remote_clock < $3;"#,
|
||||
)
|
||||
.bind(peer)
|
||||
.bind(doc_id)
|
||||
.bind(clock)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_peer_pushed_clocks(&self, peer: String) -> Result<Vec<DocClock>> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ?",
|
||||
peer
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_peer_pushed_clock(&self, peer: String, doc_id: String) -> Result<DocClock> {
|
||||
sqlx::query_as!(
|
||||
DocClock,
|
||||
"SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?",
|
||||
peer,
|
||||
doc_id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn set_peer_pushed_clock(
|
||||
&self,
|
||||
peer: String,
|
||||
doc_id: String,
|
||||
clock: NaiveDateTime,
|
||||
) -> Result<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO peer_clocks (peer, doc_id, pushed_clock)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT(peer, doc_id)
|
||||
DO UPDATE SET pushed_clock=$3 WHERE pushed_clock < $3;"#,
|
||||
)
|
||||
.bind(peer)
|
||||
.bind(doc_id)
|
||||
.bind(clock)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn clear_clocks(&self) -> Result<()> {
|
||||
sqlx::query("DELETE FROM peer_clocks;")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::Row;
|
||||
|
||||
use super::*;
|
||||
|
||||
async fn get_storage() -> SqliteDocStorage {
|
||||
let storage = SqliteDocStorage::new(":memory:".to_string());
|
||||
storage.connect().await.unwrap();
|
||||
|
||||
storage
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_peer_clock() {
|
||||
let storage = get_storage().await;
|
||||
let peer = String::from("peer1");
|
||||
|
||||
let clocks = storage.get_peer_remote_clocks(peer.clone()).await.unwrap();
|
||||
|
||||
assert!(clocks.is_empty());
|
||||
|
||||
let clock = Utc::now().naive_utc();
|
||||
storage
|
||||
.set_peer_remote_clock(peer.clone(), "doc1".to_string(), clock)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let clocks = storage.get_peer_remote_clocks(peer.clone()).await.unwrap();
|
||||
|
||||
assert_eq!(clocks.len(), 1);
|
||||
assert_eq!(clocks.first().unwrap().doc_id, "doc1");
|
||||
assert_eq!(clocks.first().unwrap().timestamp, clock);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_peer_pushed_clock() {
|
||||
let storage = get_storage().await;
|
||||
let peer = String::from("peer1");
|
||||
|
||||
let clocks = storage.get_peer_pushed_clocks(peer.clone()).await.unwrap();
|
||||
|
||||
assert!(clocks.is_empty());
|
||||
|
||||
let clock = Utc::now().naive_utc();
|
||||
storage
|
||||
.set_peer_pushed_clock(peer.clone(), "doc1".to_string(), clock)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let clocks = storage.get_peer_pushed_clocks(peer.clone()).await.unwrap();
|
||||
|
||||
assert_eq!(clocks.len(), 1);
|
||||
assert_eq!(clocks.first().unwrap().doc_id, "doc1");
|
||||
assert_eq!(clocks.first().unwrap().timestamp, clock);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn default_clocks() {
|
||||
let storage = get_storage().await;
|
||||
let peer = String::from("peer1");
|
||||
|
||||
storage
|
||||
.set_peer_remote_clock(peer.clone(), "doc1".to_string(), Utc::now().naive_utc())
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.set_peer_pushed_clock(peer.clone(), "doc2".to_string(), Utc::now().naive_utc())
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.set_peer_pulled_remote_clock(peer.clone(), "doc3".to_string(), Utc::now().naive_utc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let record = sqlx::query("SELECT * FROM peer_clocks WHERE peer = ? AND doc_id = ?")
|
||||
.bind(peer.clone())
|
||||
.bind("doc1")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
record.get::<NaiveDateTime, &str>("pushed_clock"),
|
||||
DateTime::from_timestamp(0, 0).unwrap().naive_utc()
|
||||
);
|
||||
|
||||
let record = sqlx::query("SELECT * FROM peer_clocks WHERE peer = ? AND doc_id = ?")
|
||||
.bind(peer.clone())
|
||||
.bind("doc2")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
record.get::<NaiveDateTime, &str>("remote_clock"),
|
||||
DateTime::from_timestamp(0, 0).unwrap().naive_utc()
|
||||
);
|
||||
|
||||
let record = sqlx::query("SELECT * FROM peer_clocks WHERE peer = ? AND doc_id = ?")
|
||||
.bind(peer.clone())
|
||||
.bind("doc3")
|
||||
.fetch_one(&storage.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
record.get::<NaiveDateTime, &str>("remote_clock"),
|
||||
DateTime::from_timestamp(0, 0).unwrap().naive_utc()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn clear_clocks() {
|
||||
let storage = get_storage().await;
|
||||
let peer = String::from("peer1");
|
||||
|
||||
storage
|
||||
.set_peer_remote_clock(peer.clone(), "doc1".to_string(), Utc::now().naive_utc())
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.set_peer_pushed_clock(peer.clone(), "doc2".to_string(), Utc::now().naive_utc())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let clocks = storage.get_peer_remote_clocks(peer.clone()).await.unwrap();
|
||||
assert_eq!(clocks.len(), 2);
|
||||
|
||||
let clocks = storage.get_peer_pushed_clocks(peer.clone()).await.unwrap();
|
||||
assert_eq!(clocks.len(), 2);
|
||||
|
||||
storage.clear_clocks().await.unwrap();
|
||||
|
||||
let clocks = storage.get_peer_remote_clocks(peer.clone()).await.unwrap();
|
||||
assert!(clocks.is_empty());
|
||||
|
||||
let clocks = storage.get_peer_pushed_clocks(peer.clone()).await.unwrap();
|
||||
assert!(clocks.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -13,9 +13,7 @@
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"aarch64-pc-windows-msvc"
|
||||
],
|
||||
"ts": {
|
||||
"constEnum": false
|
||||
}
|
||||
"constEnum": false
|
||||
},
|
||||
"license": "MIT",
|
||||
"ava": {
|
||||
|
||||
@@ -2,3 +2,6 @@
|
||||
edition = "2021"
|
||||
name = "affine_schema"
|
||||
version = "0.0.0"
|
||||
|
||||
[dependencies]
|
||||
sqlx = { workspace = true, default-features = false, features = ["migrate"] }
|
||||
@@ -1,29 +1,96 @@
|
||||
// TODO
|
||||
// dynamic create it from JavaScript side
|
||||
// and remove this crate then.
|
||||
pub const SCHEMA: &str = r#"CREATE TABLE IF NOT EXISTS "updates" (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
doc_id TEXT
|
||||
use std::borrow::Cow;
|
||||
|
||||
use sqlx::migrate::{Migration, MigrationType, Migrator};
|
||||
|
||||
pub mod v1;
|
||||
|
||||
type SimpleMigration = (
|
||||
/* name */ &'static str,
|
||||
/* up */ &'static str,
|
||||
/* down */ Option<&'static str>,
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "blobs" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
|
||||
// ORDER MATTERS
|
||||
const MIGRATIONS: &[SimpleMigration] = &[
|
||||
// v2 db init
|
||||
(
|
||||
"init_v2",
|
||||
r#"
|
||||
CREATE TABLE "meta" (
|
||||
space_id VARCHAR PRIMARY KEY NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "version_info" (
|
||||
version NUMBER NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "server_clock" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
|
||||
CREATE TABLE "snapshots" (
|
||||
doc_id VARCHAR PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "sync_metadata" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
|
||||
CREATE TABLE "updates" (
|
||||
doc_id VARCHAR NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
)
|
||||
"#;
|
||||
PRIMARY KEY (doc_id, created_at)
|
||||
);
|
||||
|
||||
CREATE TABLE "clocks" (
|
||||
doc_id VARCHAR PRIMARY KEY NOT NULL,
|
||||
timestamp TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE "blobs" (
|
||||
key VARCHAR PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
mime VARCHAR NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
deleted_at TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE "peer_clocks" (
|
||||
peer VARCHAR NOT NULL,
|
||||
doc_id VARCHAR NOT NULL,
|
||||
remote_clock TIMESTAMP NOT NULL DEFAULT 0,
|
||||
pulled_remote_clock TIMESTAMP NOT NULL DEFAULT 0,
|
||||
pushed_clock TIMESTAMP NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY (peer, doc_id)
|
||||
);
|
||||
CREATE INDEX peer_clocks_doc_id ON peer_clocks (doc_id);
|
||||
"#,
|
||||
None,
|
||||
),
|
||||
];
|
||||
|
||||
pub fn get_migrator() -> Migrator {
|
||||
let mut migrations = vec![];
|
||||
|
||||
MIGRATIONS.iter().for_each(|&(name, up, down)| {
|
||||
migrations.push(Migration::new(
|
||||
migrations.len() as i64 + 1,
|
||||
Cow::from(name),
|
||||
if down.is_some() {
|
||||
MigrationType::ReversibleUp
|
||||
} else {
|
||||
MigrationType::Simple
|
||||
},
|
||||
Cow::from(up),
|
||||
false,
|
||||
));
|
||||
|
||||
if let Some(down) = down {
|
||||
migrations.push(Migration::new(
|
||||
migrations.len() as i64 + 1,
|
||||
Cow::from(name),
|
||||
MigrationType::ReversibleDown,
|
||||
Cow::from(down),
|
||||
false,
|
||||
));
|
||||
}
|
||||
});
|
||||
|
||||
Migrator {
|
||||
migrations: Cow::Owned(migrations),
|
||||
..Migrator::DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
26
packages/frontend/native/schema/src/v1.rs
Normal file
26
packages/frontend/native/schema/src/v1.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
pub const SCHEMA: &str = r#"CREATE TABLE IF NOT EXISTS "updates" (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
doc_id TEXT
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "blobs" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "version_info" (
|
||||
version NUMBER NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "server_clock" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "sync_metadata" (
|
||||
key TEXT PRIMARY KEY NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
|
||||
)
|
||||
"#;
|
||||
23
packages/frontend/native/sqlite_v1/Cargo.toml
Normal file
23
packages/frontend/native/sqlite_v1/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "affine_sqlite_v1"
|
||||
version = "0.0.0"
|
||||
|
||||
[lib]
|
||||
crate-type = ["rlib", "cdylib"]
|
||||
|
||||
[dependencies]
|
||||
affine_schema = { path = "../schema" }
|
||||
anyhow = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
napi = { workspace = true }
|
||||
napi-derive = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
[build-dependencies]
|
||||
affine_schema = { path = "../schema" }
|
||||
dotenvy = { workspace = true }
|
||||
napi-build = { workspace = true }
|
||||
sqlx = { workspace = true, default-features = false, features = ["chrono", "json", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
35
packages/frontend/native/sqlite_v1/build.rs
Normal file
35
packages/frontend/native/sqlite_v1/build.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use std::fs;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), std::io::Error> {
|
||||
napi_build::setup();
|
||||
|
||||
// always start with a fresh database to have latest db schema
|
||||
let cwd = std::env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
let db_path = format!("{cwd}/affine.db");
|
||||
|
||||
if fs::metadata(&db_path).is_ok() {
|
||||
fs::remove_file(&db_path)?;
|
||||
}
|
||||
|
||||
let options = SqliteConnectOptions::new()
|
||||
.filename(&db_path)
|
||||
.journal_mode(sqlx::sqlite::SqliteJournalMode::Off)
|
||||
.locking_mode(sqlx::sqlite::SqliteLockingMode::Exclusive)
|
||||
.create_if_missing(true);
|
||||
let pool = sqlx::sqlite::SqlitePoolOptions::new()
|
||||
.max_connections(1)
|
||||
.connect_with(options)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(affine_schema::v1::SCHEMA)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
println!("cargo::rustc-env=DATABASE_URL=sqlite://{db_path}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -68,7 +68,7 @@ impl SqliteConnection {
|
||||
.map_err(anyhow::Error::from)?;
|
||||
};
|
||||
let mut connection = self.pool.acquire().await.map_err(anyhow::Error::from)?;
|
||||
sqlx::query(affine_schema::SCHEMA)
|
||||
sqlx::query(affine_schema::v1::SCHEMA)
|
||||
.execute(connection.as_mut())
|
||||
.await
|
||||
.map_err(anyhow::Error::from)?;
|
||||
@@ -1,2 +1,4 @@
|
||||
pub mod hashcash;
|
||||
pub mod sqlite;
|
||||
|
||||
pub use affine_nbstore::*;
|
||||
pub use affine_sqlite_v1::*;
|
||||
|
||||
Reference in New Issue
Block a user