feat(nbstore): add blob sync storage (#10752)

This commit is contained in:
EYHN
2025-03-14 18:05:54 +08:00
committed by GitHub
parent a2eb3fe1b2
commit 05200ad7b7
56 changed files with 1441 additions and 404 deletions

View File

@@ -45,4 +45,6 @@ export const nbstoreHandlers: NativeDBApis = {
getPeerPushedClock: POOL.getPeerPushedClock.bind(POOL),
setPeerPushedClock: POOL.setPeerPushedClock.bind(POOL),
clearClocks: POOL.clearClocks.bind(POOL),
setBlobUploadedAt: POOL.setBlobUploadedAt.bind(POOL),
getBlobUploadedAt: POOL.getBlobUploadedAt.bind(POOL),
};

View File

@@ -34,6 +34,8 @@ public class NbStorePlugin: CAPPlugin, CAPBridgedPlugin {
CAPPluginMethod(name: "getPeerPushedClocks", returnType: CAPPluginReturnPromise),
CAPPluginMethod(name: "setPeerPushedClock", returnType: CAPPluginReturnPromise),
CAPPluginMethod(name: "clearClocks", returnType: CAPPluginReturnPromise),
CAPPluginMethod(name: "getBlobUploadedAt", returnType: CAPPluginReturnPromise),
CAPPluginMethod(name: "setBlobUploadedAt", returnType: CAPPluginReturnPromise),
]
@objc func connect(_ call: CAPPluginCall) {
@@ -490,6 +492,49 @@ public class NbStorePlugin: CAPPlugin, CAPBridgedPlugin {
}
}
@objc func getBlobUploadedAt(_ call: CAPPluginCall) {
Task {
do {
let id = try call.getStringEnsure("id")
let peer = try call.getStringEnsure("peer")
let blobId = try call.getStringEnsure("blobId")
let uploadedAt = try await docStoragePool.getBlobUploadedAt(
universalId: id,
peer: peer,
blobId: blobId
)
call.resolve([
"uploadedAt": uploadedAt as Any
])
} catch {
call.reject("Failed to get blob uploaded, \(error)", nil, error)
}
}
}
@objc func setBlobUploadedAt(_ call: CAPPluginCall) {
Task {
do {
let id = try call.getStringEnsure("id")
let peer = try call.getStringEnsure("peer")
let blobId = try call.getStringEnsure("blobId")
let uploadedAt = call.getInt("uploadedAt")
try await docStoragePool.setBlobUploadedAt(
universalId: id,
peer: peer,
blobId: blobId,
uploadedAt: uploadedAt == nil ? nil : Int64(uploadedAt!)
)
call.resolve()
} catch {
call.reject("Failed to set blob uploaded, \(error)", nil, error)
}
}
}
@objc func clearClocks(_ call: CAPPluginCall) {
Task {
do {

View File

@@ -514,6 +514,8 @@ public protocol DocStoragePoolProtocol: AnyObject {
func getBlob(universalId: String, key: String) async throws -> Blob?
func getBlobUploadedAt(universalId: String, peer: String, blobId: String) async throws -> Int64?
func getDocClock(universalId: String, docId: String) async throws -> DocClock?
func getDocClocks(universalId: String, after: Int64?) async throws -> [DocClock]
@@ -544,6 +546,8 @@ public protocol DocStoragePoolProtocol: AnyObject {
func setBlob(universalId: String, blob: SetBlob) async throws
func setBlobUploadedAt(universalId: String, peer: String, blobId: String, uploadedAt: Int64?) async throws
func setDocSnapshot(universalId: String, snapshot: DocRecord) async throws -> Bool
func setPeerPulledRemoteClock(universalId: String, peer: String, docId: String, clock: Int64) async throws
@@ -709,6 +713,23 @@ open func getBlob(universalId: String, key: String)async throws -> Blob? {
)
}
open func getBlobUploadedAt(universalId: String, peer: String, blobId: String)async throws -> Int64? {
return
try await uniffiRustCallAsync(
rustFutureFunc: {
uniffi_affine_mobile_native_fn_method_docstoragepool_get_blob_uploaded_at(
self.uniffiClonePointer(),
FfiConverterString.lower(universalId),FfiConverterString.lower(peer),FfiConverterString.lower(blobId)
)
},
pollFunc: ffi_affine_mobile_native_rust_future_poll_rust_buffer,
completeFunc: ffi_affine_mobile_native_rust_future_complete_rust_buffer,
freeFunc: ffi_affine_mobile_native_rust_future_free_rust_buffer,
liftFunc: FfiConverterOptionInt64.lift,
errorHandler: FfiConverterTypeUniffiError.lift
)
}
open func getDocClock(universalId: String, docId: String)async throws -> DocClock? {
return
try await uniffiRustCallAsync(
@@ -964,6 +985,23 @@ open func setBlob(universalId: String, blob: SetBlob)async throws {
)
}
open func setBlobUploadedAt(universalId: String, peer: String, blobId: String, uploadedAt: Int64?)async throws {
return
try await uniffiRustCallAsync(
rustFutureFunc: {
uniffi_affine_mobile_native_fn_method_docstoragepool_set_blob_uploaded_at(
self.uniffiClonePointer(),
FfiConverterString.lower(universalId),FfiConverterString.lower(peer),FfiConverterString.lower(blobId),FfiConverterOptionInt64.lower(uploadedAt)
)
},
pollFunc: ffi_affine_mobile_native_rust_future_poll_void,
completeFunc: ffi_affine_mobile_native_rust_future_complete_void,
freeFunc: ffi_affine_mobile_native_rust_future_free_void,
liftFunc: { $0 },
errorHandler: FfiConverterTypeUniffiError.lift
)
}
open func setDocSnapshot(universalId: String, snapshot: DocRecord)async throws -> Bool {
return
try await uniffiRustCallAsync(
@@ -1972,6 +2010,9 @@ private let initializationResult: InitializationResult = {
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_get_blob() != 56927) {
return InitializationResult.apiChecksumMismatch
}
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_get_blob_uploaded_at() != 41270) {
return InitializationResult.apiChecksumMismatch
}
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_get_doc_clock() != 48394) {
return InitializationResult.apiChecksumMismatch
}
@@ -2017,6 +2058,9 @@ private let initializationResult: InitializationResult = {
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_set_blob() != 31398) {
return InitializationResult.apiChecksumMismatch
}
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_set_blob_uploaded_at() != 7188) {
return InitializationResult.apiChecksumMismatch
}
if (uniffi_affine_mobile_native_checksum_method_docstoragepool_set_doc_snapshot() != 5287) {
return InitializationResult.apiChecksumMismatch
}

View File

@@ -291,6 +291,11 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_disconnect(void*_N
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_blob(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer key
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_BLOB_UPLOADED_AT
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_BLOB_UPLOADED_AT
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_blob_uploaded_at(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer blob_id
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_DOC_CLOCK
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_DOC_CLOCK
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_doc_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer doc_id
@@ -366,6 +371,11 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_release_blobs(void
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_blob(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer blob
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_BLOB_UPLOADED_AT
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_BLOB_UPLOADED_AT
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_blob_uploaded_at(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer blob_id, RustBuffer uploaded_at
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_DOC_SNAPSHOT
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_DOC_SNAPSHOT
uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_doc_snapshot(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer snapshot
@@ -728,6 +738,12 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_disconnect(v
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_BLOB
uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_get_blob(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_BLOB_UPLOADED_AT
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_BLOB_UPLOADED_AT
uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_get_blob_uploaded_at(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_DOC_CLOCK
@@ -818,6 +834,12 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_release_blob
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_SET_BLOB
uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_set_blob(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_SET_BLOB_UPLOADED_AT
#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_SET_BLOB_UPLOADED_AT
uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_set_blob_uploaded_at(void
);
#endif
#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_SET_DOC_SNAPSHOT

View File

@@ -137,5 +137,16 @@ export interface NbStorePlugin {
docId: string;
timestamp: number;
}) => Promise<void>;
getBlobUploadedAt: (options: {
id: string;
peer: string;
blobId: string;
}) => Promise<{ uploadedAt: number | null }>;
setBlobUploadedAt: (options: {
id: string;
peer: string;
blobId: string;
uploadedAt: number | null;
}) => Promise<void>;
clearClocks: (options: { id: string }) => Promise<void>;
}

View File

@@ -311,4 +311,29 @@ export const NbStoreNativeDBApis: NativeDBApis = {
id,
});
},
getBlobUploadedAt: async function (
id: string,
peer: string,
blobId: string
): Promise<Date | null> {
const result = await NbStore.getBlobUploadedAt({
id,
peer,
blobId,
});
return result.uploadedAt ? new Date(result.uploadedAt) : null;
},
setBlobUploadedAt: async function (
id: string,
peer: string,
blobId: string,
uploadedAt: Date | null
): Promise<void> {
await NbStore.setBlobUploadedAt({
id,
peer,
blobId,
uploadedAt: uploadedAt ? uploadedAt.getTime() : null,
});
},
};

View File

@@ -3,6 +3,7 @@ import './array-to-spliced';
import './dispose';
import './iterator-helpers';
import './promise-with-resolvers';
import './set-union';
import { polyfillEventLoop } from './request-idle-callback';
import { polyfillResizeObserver } from './resize-observer';

View File

@@ -0,0 +1 @@
import 'core-js/es/set/union.js';

View File

@@ -33,8 +33,8 @@ export const OverCapacityNotification = () => {
useEffect(() => {
const disposableOverCapacity =
currentWorkspace.engine.blob.state$.subscribe(
debounce(({ isStorageOverCapacity }: BlobSyncState) => {
const isOver = isStorageOverCapacity;
debounce(({ overCapacity }: BlobSyncState) => {
const isOver = overCapacity;
if (!isOver) {
return;
}

View File

@@ -8,8 +8,8 @@ import type { Workspace } from '@affine/core/modules/workspace';
import { useI18n } from '@affine/i18n';
import { universalId } from '@affine/nbstore';
import track from '@affine/track';
import { LiveData, useLiveData, useService } from '@toeverything/infra';
import { useMemo, useState } from 'react';
import { useService } from '@toeverything/infra';
import { useState } from 'react';
interface ExportPanelProps {
workspace: Workspace;
@@ -22,39 +22,18 @@ export const DesktopExportPanel = ({ workspace }: ExportPanelProps) => {
const desktopApi = useService(DesktopApiService);
const isLocalWorkspace = workspace.flavour === 'local';
const docSyncState = useLiveData(
useMemo(() => {
return workspace
? LiveData.from(workspace.engine.doc.state$, null).throttleTime(500)
: null;
}, [workspace])
);
const blobSyncState = useLiveData(
useMemo(() => {
return workspace
? LiveData.from(workspace.engine.blob.state$, null).throttleTime(500)
: null;
}, [workspace])
);
const docSynced = !docSyncState?.syncing;
const blobSynced =
!blobSyncState || blobSyncState.synced === blobSyncState.total;
const [fullSyncing, setFullSyncing] = useState(false);
const [fullSynced, setFullSynced] = useState(false);
const shouldWaitForFullSync =
isLocalWorkspace || !isOnline || (fullSynced && docSynced && blobSynced);
const fullSyncing = fullSynced && (!docSynced || !blobSynced);
const shouldWaitForFullSync = !isLocalWorkspace && isOnline && !fullSynced;
const fullSync = useAsyncCallback(async () => {
// NOTE: doc full sync is always started by default
// await workspace.engine.doc.waitForSynced();
workspace.engine.blob.fullDownload().catch(() => {
/* noop */
});
setFullSyncing(true);
await workspace.engine.blob.fullDownload();
await workspace.engine.doc.waitForSynced();
setFullSynced(true);
}, [workspace.engine.blob]);
setFullSyncing(false);
}, [workspace.engine.blob, workspace.engine.doc]);
const onExport = useAsyncCallback(async () => {
if (saving) {
@@ -86,7 +65,7 @@ export const DesktopExportPanel = ({ workspace }: ExportPanelProps) => {
}
}, [desktopApi, saving, t, workspace]);
if (!shouldWaitForFullSync) {
if (shouldWaitForFullSync) {
return (
<SettingRow name={t['Export']()} desc={t['Full Sync Description']()}>
<Button

View File

@@ -106,6 +106,7 @@ export class CMDKQuickSearchService extends Service {
primaryMode: 'page',
docProps,
});
this.workbenchService.workbench.openDoc(newDoc.id);
} else if (result.id === 'creation:create-edgeless') {
const newDoc = this.docsService.createDoc({

View File

@@ -13,6 +13,7 @@ import type {
import { CloudBlobStorage, StaticCloudDocStorage } from '@affine/nbstore/cloud';
import {
IndexedDBBlobStorage,
IndexedDBBlobSyncStorage,
IndexedDBDocStorage,
IndexedDBDocSyncStorage,
} from '@affine/nbstore/idb';
@@ -22,6 +23,7 @@ import {
} from '@affine/nbstore/idb/v1';
import {
SqliteBlobStorage,
SqliteBlobSyncStorage,
SqliteDocStorage,
SqliteDocSyncStorage,
} from '@affine/nbstore/sqlite';
@@ -115,6 +117,10 @@ class CloudWorkspaceFlavourProvider implements WorkspaceFlavourProvider {
BUILD_CONFIG.isElectron || BUILD_CONFIG.isIOS
? SqliteDocSyncStorage
: IndexedDBDocSyncStorage;
BlobSyncStorageType =
BUILD_CONFIG.isElectron || BUILD_CONFIG.isIOS
? SqliteBlobSyncStorage
: IndexedDBBlobSyncStorage;
async deleteWorkspace(id: string): Promise<void> {
await this.graphqlService.gql({
@@ -439,6 +445,14 @@ class CloudWorkspaceFlavourProvider implements WorkspaceFlavourProvider {
id: workspaceId,
},
},
blobSync: {
name: this.BlobSyncStorageType.identifier,
opts: {
flavour: this.flavour,
type: 'workspace',
id: workspaceId,
},
},
awareness: {
name: 'BroadcastChannelAwarenessStorage',
opts: {

View File

@@ -7,6 +7,7 @@ import {
} from '@affine/nbstore';
import {
IndexedDBBlobStorage,
IndexedDBBlobSyncStorage,
IndexedDBDocStorage,
IndexedDBDocSyncStorage,
} from '@affine/nbstore/idb';
@@ -16,6 +17,7 @@ import {
} from '@affine/nbstore/idb/v1';
import {
SqliteBlobStorage,
SqliteBlobSyncStorage,
SqliteDocStorage,
SqliteDocSyncStorage,
} from '@affine/nbstore/sqlite';
@@ -101,6 +103,10 @@ class LocalWorkspaceFlavourProvider implements WorkspaceFlavourProvider {
BUILD_CONFIG.isElectron || BUILD_CONFIG.isIOS
? SqliteDocSyncStorage
: IndexedDBDocSyncStorage;
BlobSyncStorageType =
BUILD_CONFIG.isElectron || BUILD_CONFIG.isIOS
? SqliteBlobSyncStorage
: IndexedDBBlobSyncStorage;
async deleteWorkspace(id: string): Promise<void> {
setLocalWorkspaceIds(ids => ids.filter(x => x !== id));
@@ -321,6 +327,14 @@ class LocalWorkspaceFlavourProvider implements WorkspaceFlavourProvider {
id: workspaceId,
},
},
blobSync: {
name: this.BlobSyncStorageType.identifier,
opts: {
flavour: this.flavour,
type: 'workspace',
id: workspaceId,
},
},
docSync: {
name: this.DocSyncStorageType.identifier,
opts: {

View File

@@ -63,5 +63,10 @@ export class WorkspaceEngine extends Entity<{
this.doc.addPriority(rootDoc.guid, 100);
this.doc.start();
this.disposables.push(() => this.doc.stop());
// fully migrate blobs from v1 to v2, its won't do anything if v1 storage is not exist
store.blobFrontend.fullDownload('v1').catch(() => {
// should never reach here
});
}
}

View File

@@ -599,4 +599,48 @@ impl DocStoragePool {
pub async fn clear_clocks(&self, universal_id: String) -> Result<()> {
Ok(self.inner.get(universal_id).await?.clear_clocks().await?)
}
pub async fn set_blob_uploaded_at(
&self,
universal_id: String,
peer: String,
blob_id: String,
uploaded_at: Option<i64>,
) -> Result<()> {
Ok(
self
.inner
.get(universal_id)
.await?
.set_blob_uploaded_at(
peer,
blob_id,
uploaded_at
.map(|t| {
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(t)
.ok_or(UniffiError::TimestampDecodingError)
.map(|t| t.naive_utc())
})
.transpose()?,
)
.await?,
)
}
pub async fn get_blob_uploaded_at(
&self,
universal_id: String,
peer: String,
blob_id: String,
) -> Result<Option<i64>> {
Ok(
self
.inner
.get(universal_id)
.await?
.get_blob_uploaded_at(peer, blob_id)
.await?
.map(|t| t.and_utc().timestamp_millis()),
)
}
}

View File

@@ -59,6 +59,8 @@ export declare class DocStoragePool {
getPeerPushedClock(universalId: string, peer: string, docId: string): Promise<DocClock | null>
setPeerPushedClock(universalId: string, peer: string, docId: string, clock: Date): Promise<void>
clearClocks(universalId: string): Promise<void>
setBlobUploadedAt(universalId: string, peer: string, blobId: string, uploadedAt?: Date | undefined | null): Promise<void>
getBlobUploadedAt(universalId: string, peer: string, blobId: string): Promise<Date | null>
}
export declare class Mp3Encoder {

View File

@@ -0,0 +1,92 @@
use chrono::NaiveDateTime;
use super::{error::Result, storage::SqliteDocStorage};
impl SqliteDocStorage {
pub async fn set_blob_uploaded_at(
&self,
peer: String,
blob_id: String,
uploaded_at: Option<NaiveDateTime>,
) -> Result<()> {
sqlx::query(
r#"
INSERT INTO peer_blob_sync (peer, blob_id, uploaded_at)
VALUES ($1, $2, $3)
ON CONFLICT(peer, blob_id)
DO UPDATE SET uploaded_at=$3;"#,
)
.bind(peer)
.bind(blob_id)
.bind(uploaded_at)
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn get_blob_uploaded_at(
&self,
peer: String,
blob_id: String,
) -> Result<Option<NaiveDateTime>> {
let result = sqlx::query_scalar!(
"SELECT uploaded_at FROM peer_blob_sync WHERE peer = ? AND blob_id = ?",
peer,
blob_id
)
.fetch_optional(&self.pool)
.await?;
Ok(result.flatten())
}
}
#[cfg(test)]
mod tests {
use chrono::Utc;
use super::*;
async fn get_storage() -> SqliteDocStorage {
let storage = SqliteDocStorage::new(":memory:".to_string());
storage.connect().await.unwrap();
storage
}
#[tokio::test]
async fn blob_uploaded_at() {
let storage = get_storage().await;
let peer = String::from("peer1");
let blob_id = String::from("blob1");
let uploaded_at = storage
.get_blob_uploaded_at(peer.clone(), blob_id.clone())
.await
.unwrap();
assert!(uploaded_at.is_none());
let now = Utc::now().naive_utc();
storage
.set_blob_uploaded_at(peer.clone(), blob_id.clone(), Some(now))
.await
.unwrap();
let uploaded_at = storage
.get_blob_uploaded_at(peer.clone(), blob_id.clone())
.await
.unwrap();
assert!(uploaded_at.is_some());
assert_eq!(uploaded_at.unwrap(), now);
storage
.set_blob_uploaded_at(peer.clone(), blob_id.clone(), None)
.await
.unwrap();
let uploaded_at = storage
.get_blob_uploaded_at(peer.clone(), blob_id.clone())
.await
.unwrap();
assert!(uploaded_at.is_none());
}
}

View File

@@ -1,9 +1,10 @@
pub mod blob;
pub mod blob_sync;
pub mod doc;
pub mod doc_sync;
pub mod error;
pub mod pool;
pub mod storage;
pub mod sync;
use chrono::NaiveDateTime;
use napi::bindgen_prelude::*;
@@ -402,6 +403,38 @@ impl DocStoragePool {
self.get(universal_id).await?.clear_clocks().await?;
Ok(())
}
#[napi]
pub async fn set_blob_uploaded_at(
&self,
universal_id: String,
peer: String,
blob_id: String,
uploaded_at: Option<NaiveDateTime>,
) -> Result<()> {
self
.get(universal_id)
.await?
.set_blob_uploaded_at(peer, blob_id, uploaded_at)
.await?;
Ok(())
}
#[napi]
pub async fn get_blob_uploaded_at(
&self,
universal_id: String,
peer: String,
blob_id: String,
) -> Result<Option<NaiveDateTime>> {
let result = self
.get(universal_id)
.await?
.get_blob_uploaded_at(peer, blob_id)
.await?;
Ok(result)
}
}
#[napi]

View File

@@ -57,6 +57,20 @@ CREATE TABLE "peer_clocks" (
PRIMARY KEY (peer, doc_id)
);
CREATE INDEX peer_clocks_doc_id ON peer_clocks (doc_id);
"#,
None,
),
// add blob_sync table
(
"add_blob_sync",
r#"
CREATE TABLE "peer_blob_sync" (
peer VARCHAR NOT NULL,
blob_id VARCHAR NOT NULL,
uploaded_at TIMESTAMP,
PRIMARY KEY (peer, blob_id)
);
CREATE INDEX peer_blob_sync_peer ON peer_blob_sync (peer);
"#,
None,
),