feat(infra): framework

This commit is contained in:
EYHN
2024-04-17 14:12:29 +08:00
parent ab17a05df3
commit 06fda3b62c
467 changed files with 9996 additions and 8697 deletions

View File

@@ -0,0 +1,16 @@
export interface AwarenessConnection {
connect(): void;
disconnect(): void;
}
export class AwarenessEngine {
constructor(public readonly connections: AwarenessConnection[]) {}
connect() {
this.connections.forEach(connection => connection.connect());
}
disconnect() {
this.connections.forEach(connection => connection.disconnect());
}
}

View File

@@ -0,0 +1,254 @@
import { DebugLogger } from '@affine/debug';
import { Slot } from '@blocksuite/global/utils';
import { difference } from 'lodash-es';
import { LiveData } from '../../livedata';
import type { Memento } from '../../storage';
import { BlobStorageOverCapacity } from './error';
const logger = new DebugLogger('affine:blob-engine');
export interface BlobStorage {
name: string;
readonly: boolean;
get: (key: string) => Promise<Blob | null>;
set: (key: string, value: Blob) => Promise<string>;
delete: (key: string) => Promise<void>;
list: () => Promise<string[]>;
}
export interface BlobStatus {
isStorageOverCapacity: boolean;
}
/**
* # BlobEngine
*
* sync blobs between storages in background.
*
* all operations priority use local, then use remote.
*/
export class BlobEngine {
private abort: AbortController | null = null;
readonly isStorageOverCapacity$ = new LiveData(false);
singleBlobSizeLimit: number = 100 * 1024 * 1024;
onAbortLargeBlob = new Slot<Blob>();
constructor(
private readonly local: BlobStorage,
private readonly remotes: BlobStorage[]
) {}
start() {
if (this.abort || this.isStorageOverCapacity$.value) {
return;
}
this.abort = new AbortController();
const abortSignal = this.abort.signal;
const sync = () => {
if (abortSignal.aborted) {
return;
}
this.sync()
.catch(error => {
logger.error('sync blob error', error);
})
.finally(() => {
// sync every 1 minute
setTimeout(sync, 60000);
});
};
sync();
}
stop() {
this.abort?.abort();
this.abort = null;
}
get storages() {
return [this.local, ...this.remotes];
}
async sync() {
if (this.local.readonly) {
return;
}
logger.debug('start syncing blob...');
for (const remote of this.remotes) {
let localList: string[] = [];
let remoteList: string[] = [];
if (!remote.readonly) {
try {
localList = await this.local.list();
remoteList = await remote.list();
} catch (err) {
logger.error(`error when sync`, err);
continue;
}
const needUpload = difference(localList, remoteList);
for (const key of needUpload) {
try {
const data = await this.local.get(key);
if (data) {
await remote.set(key, data);
}
} catch (err) {
logger.error(
`error when sync ${key} from [${this.local.name}] to [${remote.name}]`,
err
);
}
}
}
const needDownload = difference(remoteList, localList);
for (const key of needDownload) {
try {
const data = await remote.get(key);
if (data) {
await this.local.set(key, data);
}
} catch (err) {
if (err instanceof BlobStorageOverCapacity) {
this.isStorageOverCapacity$.value = true;
}
logger.error(
`error when sync ${key} from [${remote.name}] to [${this.local.name}]`,
err
);
}
}
}
logger.debug('finish syncing blob');
}
async get(key: string) {
logger.debug('get blob', key);
for (const storage of this.storages) {
const data = await storage.get(key);
if (data) {
return data;
}
}
return null;
}
async set(key: string, value: Blob) {
if (this.local.readonly) {
throw new Error('local peer is readonly');
}
if (value.size > this.singleBlobSizeLimit) {
this.onAbortLargeBlob.emit(value);
logger.error('blob over limit, abort set');
return key;
}
// await upload to the local peer
await this.local.set(key, value);
// uploads to other peers in the background
Promise.allSettled(
this.remotes
.filter(r => !r.readonly)
.map(peer =>
peer.set(key, value).catch(err => {
logger.error('Error when uploading to peer', err);
})
)
)
.then(result => {
if (result.some(({ status }) => status === 'rejected')) {
logger.error(
`blob ${key} update finish, but some peers failed to update`
);
} else {
logger.debug(`blob ${key} update finish`);
}
})
.catch(() => {
// Promise.allSettled never reject
});
return key;
}
async delete(_key: string) {
// not supported
}
async list() {
const blobList = new Set<string>();
for (const peer of this.storages) {
const list = await peer.list();
if (list) {
for (const blob of list) {
blobList.add(blob);
}
}
}
return Array.from(blobList);
}
}
export const EmptyBlobStorage: BlobStorage = {
name: 'empty',
readonly: true,
async get(_key: string) {
return null;
},
async set(_key: string, _value: Blob) {
throw new Error('not supported');
},
async delete(_key: string) {
throw new Error('not supported');
},
async list() {
return [];
},
};
export class MemoryBlobStorage implements BlobStorage {
name = 'testing';
readonly = false;
constructor(private readonly state: Memento) {}
get(key: string) {
return Promise.resolve(this.state.get<Blob>(key) ?? null);
}
set(key: string, value: Blob) {
this.state.set(key, value);
const list = this.state.get<Set<string>>('list') ?? new Set<string>();
list.add(key);
this.state.set('list', list);
return Promise.resolve(key);
}
delete(key: string) {
this.state.set(key, null);
const list = this.state.get<Set<string>>('list') ?? new Set<string>();
list.delete(key);
this.state.set('list', list);
return Promise.resolve();
}
list() {
const list = this.state.get<Set<string>>('list');
return Promise.resolve(list ? Array.from(list) : []);
}
}

View File

@@ -0,0 +1,5 @@
export class BlobStorageOverCapacity extends Error {
constructor(public originError?: any) {
super('Blob storage over capacity.');
}
}

View File

@@ -0,0 +1,127 @@
# DocEngine
The synchronization algorithm for yjs docs.
```
┌─────────┐ ┌───────────┐ ┌────────┐
│ Storage ◄──┤ DocEngine ├──► Server │
└─────────┘ └───────────┘ └────────┘
```
# Core Components
## DocStorage
```ts
export interface DocStorage {
eventBus: DocEventBus;
doc: ByteKV;
syncMetadata: ByteKV;
serverClock: ByteKV;
}
```
Represents the local storage used, Specific implementations are replaceable, such as `IndexedDBDocStorage` on the `browser` and `SqliteDocStorage` on the `desktop`.
### DocEventBus
Each `DocStorage` contains a `DocEventBus`, which is used to communicate with other engines that share the same storage.
With `DocEventBus` we can sync updates between engines without connecting to the server.
For example, on the `browser`, we have multiple tabs, all tabs share the same `IndexedDBDocStorage`, so we use `BroadcastChannel` to implement `DocEventBus`, which allows us to broadcast events to all tabs.
On the `desktop` app, if we have multiple Windows sharing the same `SqliteDocStorage`, we must build a mechanism to broadcast events between all Windows (currently not implemented).
## DocServer
```ts
export interface DocServer {
pullDoc(
docId: string,
stateVector: Uint8Array
): Promise<{
data: Uint8Array;
serverClock: number;
stateVector?: Uint8Array;
} | null>;
pushDoc(docId: string, data: Uint8Array): Promise<{ serverClock: number }>;
subscribeAllDocs(cb: (updates: { docId: string; data: Uint8Array; serverClock: number }) => void): Promise<() => void>;
loadServerClock(after: number): Promise<Map<string, number>>;
waitForConnectingServer(signal: AbortSignal): Promise<void>;
disconnectServer(): void;
onInterrupted(cb: (reason: string) => void): void;
}
```
Represents the server we want to synchronize, there is a simulated implementation in `tests/sync.spec.ts`, and the real implementation is in `packages/backend/server`.
### ServerClock
`ServerClock` is a clock generated after each updates is stored in the Server. It is used to determine the order in which updates are stored in the Server.
The `DocEngine` decides whether to pull updates from the server based on the `ServerClock`.
The `ServerClock` written later must be **greater** than all previously. So on the client side, we can use `loadServerClock(the largest ServerClock previously received)` to obtain all changed `ServerClock`.
## DocEngine
The `DocEngine` is where all the synchronization logic actually happens.
Due to the complexity of the implementation, we divide it into 2 parts.
## DocEngine - LocalPart
Synchronizing **the `YDoc` instance** and **storage**.
The typical workflow is:
1. load data from storage, apply to `YDoc` instance.
2. track `YDoc` changes
3. write the changes back to storage.
### SeqNum
There is a `SeqNum` on each Doc data in `Storage`. Every time `LocalPart` writes data, `SeqNum` will be +1.
There is also a `PushedSeqNum`, which is used for RemotePart later.
## DocEngine - RemotePart
Synchronizing `Storage` and `Server`.
The typical workflow is:
1. Connect with the server, Load `ServerClocks` for all docs, Start subscribing to server-side updates.
2. Check whether each doc requires `push` and `pull`
3. Execute all push and pull
4. Listen for updates from `LocalPart` and push the updates to the server
5. Listen for server-side updates and write them to storage.
### PushedSeqNum
Each Doc will record a `PushedSeqNum`, used to determine whether the doc has unpush updates.
After each `push` is completed, `PushedSeqNum` + 1
If `PushedSeqNum` and `SeqNum` are still different after we complete the push (usually means the previous `push` failed)
Then do a full pull and push and set `pushedSeqNum` = `SeqNum`
### PulledServerClock
Each Doc also record `PulledServerClock`, Used to compare with ServerClock to determine whether to `pull` doc.
When the `pull` is completed, set `PulledServerClock` = `ServerClock` returned by the server.
### Retry
The `RemotePart` may fail at any time, and `RemotePart`'s built-in retry mechanism will restart the process in 5 seconds after failure.

View File

@@ -0,0 +1,41 @@
import { describe, expect, test } from 'vitest';
import { PriorityQueue } from '../priority-queue';
describe('Priority Queue', () => {
test('priority', () => {
const queue = new PriorityQueue();
queue.push('foo', 1);
queue.push('bar', 2);
queue.push('baz', 0);
expect(queue.pop()).toBe('bar');
expect(queue.pop()).toBe('foo');
expect(queue.pop()).toBe('baz');
expect(queue.pop()).toBe(null);
queue.push('B', 1);
queue.push('A', 1);
// if priority same then follow id binary order
expect(queue.pop()).toBe('B');
expect(queue.pop()).toBe('A');
expect(queue.pop()).toBe(null);
queue.push('A', 1);
queue.push('B', 2);
queue.push('A', 3); // same id but different priority, update the priority
expect(queue.pop()).toBe('A');
expect(queue.pop()).toBe('B');
expect(queue.pop()).toBe(null);
queue.push('A', 1);
queue.push('B', 2);
queue.remove('B');
expect(queue.pop()).toBe('A');
expect(queue.pop()).toBe(null);
});
});

View File

@@ -0,0 +1,239 @@
import { nanoid } from 'nanoid';
import { describe, expect, test, vitest } from 'vitest';
import {
diffUpdate,
Doc as YDoc,
encodeStateAsUpdate,
encodeStateVectorFromUpdate,
mergeUpdates,
} from 'yjs';
import { AsyncLock } from '../../../utils';
import { DocEngine } from '..';
import type { DocServer } from '../server';
import { MemoryStorage } from '../storage';
import { isEmptyUpdate } from '../utils';
class MiniServer {
lock = new AsyncLock();
db = new Map<string, { data: Uint8Array; clock: number }>();
listeners = new Set<{
cb: (updates: {
docId: string;
data: Uint8Array;
serverClock: number;
}) => void;
clientId: string;
}>();
client() {
return new MiniServerClient(nanoid(), this);
}
}
class MiniServerClient implements DocServer {
constructor(
private readonly id: string,
private readonly server: MiniServer
) {}
async pullDoc(docId: string, stateVector: Uint8Array) {
using _lock = await this.server.lock.acquire();
const doc = this.server.db.get(docId);
if (!doc) {
return null;
}
const data = doc.data;
return {
data:
!isEmptyUpdate(data) && stateVector.length > 0
? diffUpdate(data, stateVector)
: data,
serverClock: 0,
stateVector: !isEmptyUpdate(data)
? encodeStateVectorFromUpdate(data)
: new Uint8Array(),
};
}
async pushDoc(
docId: string,
data: Uint8Array
): Promise<{ serverClock: number }> {
using _lock = await this.server.lock.acquire();
const doc = this.server.db.get(docId);
const oldData = doc?.data ?? new Uint8Array();
const newClock = (doc?.clock ?? 0) + 1;
this.server.db.set(docId, {
data: !isEmptyUpdate(data)
? !isEmptyUpdate(oldData)
? mergeUpdates([oldData, data])
: data
: oldData,
clock: newClock,
});
for (const { clientId, cb } of this.server.listeners) {
if (clientId !== this.id) {
cb({
docId,
data,
serverClock: newClock,
});
}
}
return { serverClock: newClock };
}
async loadServerClock(after: number): Promise<Map<string, number>> {
using _lock = await this.server.lock.acquire();
const map = new Map<string, number>();
for (const [docId, { clock }] of this.server.db) {
if (clock > after) {
map.set(docId, clock);
}
}
return map;
}
async subscribeAllDocs(
cb: (updates: {
docId: string;
data: Uint8Array;
serverClock: number;
}) => void
): Promise<() => void> {
const listener = { cb, clientId: this.id };
this.server.listeners.add(listener);
return () => {
this.server.listeners.delete(listener);
};
}
async waitForConnectingServer(): Promise<void> {}
disconnectServer(): void {}
onInterrupted(_cb: (reason: string) => void): void {}
}
describe('sync', () => {
test('basic sync', async () => {
const storage = new MemoryStorage();
const server = new MiniServer();
const engine = new DocEngine(storage, server.client()).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('a', 1);
await engine.waitForSynced();
expect(server.db.size).toBe(1);
expect(storage.docDb.keys().length).toBe(1);
});
test('can pull from server', async () => {
const server = new MiniServer();
{
const engine = new DocEngine(
new MemoryStorage(),
server.client()
).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('a', 1);
await engine.waitForSynced();
expect(server.db.size).toBe(1);
}
{
const engine = new DocEngine(
new MemoryStorage(),
server.client()
).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
await engine.waitForSynced();
expect(doc.getMap('aaa').get('a')).toBe(1);
}
});
test('2 client', async () => {
const server = new MiniServer();
await Promise.all([
(async () => {
const engine = new DocEngine(
new MemoryStorage(),
server.client()
).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('a', 1);
await vitest.waitUntil(() => {
return map.get('b') === 2;
});
})(),
(async () => {
const engine = new DocEngine(
new MemoryStorage(),
server.client()
).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('b', 2);
await vitest.waitUntil(() => {
return map.get('a') === 1;
});
})(),
]);
});
test('2 client share storage and eventBus (simulate different tabs in same browser)', async () => {
const server = new MiniServer();
const storage = new MemoryStorage();
await Promise.all([
(async () => {
const engine = new DocEngine(storage, server.client()).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('a', 1);
await vitest.waitUntil(() => map.get('b') === 2);
})(),
(async () => {
const engine = new DocEngine(storage, server.client()).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
const map = doc.getMap('aaa');
map.set('b', 2);
await vitest.waitUntil(() => map.get('a') === 1);
})(),
]);
});
test('legacy data', async () => {
const server = new MiniServer();
const storage = new MemoryStorage();
{
// write legacy data to storage
const doc = new YDoc({ guid: 'a' });
const map = doc.getMap('aaa');
map.set('a', 1);
await storage.doc.set('a', encodeStateAsUpdate(doc));
}
const engine = new DocEngine(storage, server.client()).start();
const doc = new YDoc({ guid: 'a' });
engine.addDoc(doc);
// should load to ydoc and save to server
await vitest.waitUntil(
() => doc.getMap('aaa').get('a') === 1 && server.db.size === 1
);
});
});

View File

@@ -0,0 +1,43 @@
import { PriorityQueue } from './priority-queue';
export class AsyncPriorityQueue extends PriorityQueue {
private _resolveUpdate: (() => void) | null = null;
private _waitForUpdate: Promise<void> | null = null;
async asyncPop(abort?: AbortSignal): Promise<string> {
const update = this.pop();
if (update) {
return update;
} else {
if (!this._waitForUpdate) {
this._waitForUpdate = new Promise(resolve => {
this._resolveUpdate = resolve;
});
}
await Promise.race([
this._waitForUpdate,
new Promise((_, reject) => {
if (abort?.aborted) {
reject(abort?.reason);
}
abort?.addEventListener('abort', () => {
reject(abort.reason);
});
}),
]);
return this.asyncPop(abort);
}
}
override push(id: string, priority: number = 0) {
super.push(id, priority);
if (this._resolveUpdate) {
const resolve = this._resolveUpdate;
this._resolveUpdate = null;
this._waitForUpdate = null;
resolve();
}
}
}

View File

@@ -0,0 +1,32 @@
export class ClockMap {
max: number = 0;
constructor(private readonly map: Map<string, number>) {
for (const value of map.values()) {
if (value > this.max) {
this.max = value;
}
}
}
get(id: string): number {
return this.map.get(id) ?? 0;
}
set(id: string, value: number) {
this.map.set(id, value);
if (value > this.max) {
this.max = value;
}
}
setIfBigger(id: string, value: number) {
if (value > this.get(id)) {
this.set(id, value);
}
}
clear() {
this.map.clear();
this.max = 0;
}
}

View File

@@ -0,0 +1,55 @@
export type DocEvent =
| {
type: 'ClientUpdateCommitted';
clientId: string;
docId: string;
update: Uint8Array;
seqNum: number;
}
| {
type: 'ServerUpdateCommitted';
docId: string;
update: Uint8Array;
clientId: string;
}
| {
type: 'LegacyClientUpdateCommitted';
docId: string;
update: Uint8Array;
};
export interface DocEventBus {
emit(event: DocEvent): void;
on(cb: (event: DocEvent) => void): () => void;
}
export class MemoryDocEventBus implements DocEventBus {
listeners = new Set<(event: DocEvent) => void>();
emit(event: DocEvent): void {
for (const listener of this.listeners) {
try {
listener(event);
} catch (e) {
console.error(e);
}
}
}
on(cb: (event: DocEvent) => void): () => void {
this.listeners.add(cb);
return () => {
this.listeners.delete(cb);
};
}
}
export class DocEventBusInner implements DocEventBus {
constructor(private readonly eventBusBehavior: DocEventBus) {}
emit(event: DocEvent) {
this.eventBusBehavior.emit(event);
}
on(cb: (event: DocEvent) => void) {
return this.eventBusBehavior.on(cb);
}
}

View File

@@ -0,0 +1,183 @@
import { DebugLogger } from '@affine/debug';
import { nanoid } from 'nanoid';
import { map } from 'rxjs';
import type { Doc as YDoc } from 'yjs';
import { LiveData } from '../../livedata';
import { MANUALLY_STOP } from '../../utils';
import { DocEngineLocalPart } from './local';
import { DocEngineRemotePart } from './remote';
import type { DocServer } from './server';
import type { DocStorage } from './storage';
import { DocStorageInner } from './storage';
const logger = new DebugLogger('doc-engine');
export type { DocEvent, DocEventBus } from './event';
export { MemoryDocEventBus } from './event';
export type { DocServer } from './server';
export type { DocStorage } from './storage';
export {
MemoryStorage as MemoryDocStorage,
ReadonlyStorage as ReadonlyDocStorage,
} from './storage';
export class DocEngine {
localPart: DocEngineLocalPart;
remotePart: DocEngineRemotePart | null;
storage: DocStorageInner;
engineState$ = LiveData.computed(get => {
const localState = get(this.localPart.engineState$);
if (this.remotePart) {
const remoteState = get(this.remotePart?.engineState$);
return {
total: remoteState.total,
syncing: remoteState.syncing,
saving: localState.syncing,
retrying: remoteState.retrying,
errorMessage: remoteState.errorMessage,
};
}
return {
total: localState.total,
syncing: localState.syncing,
saving: localState.syncing,
retrying: false,
errorMessage: null,
};
});
docState$(docId: string) {
const localState$ = this.localPart.docState$(docId);
const remoteState$ = this.remotePart?.docState$(docId);
return LiveData.computed(get => {
const local = get(localState$);
const remote = remoteState$ ? get(remoteState$) : null;
return {
ready: local.ready,
saving: local.syncing,
syncing: local.syncing || remote?.syncing,
};
});
}
constructor(
storage: DocStorage,
private readonly server?: DocServer | null
) {
const clientId = nanoid();
this.storage = new DocStorageInner(storage);
this.localPart = new DocEngineLocalPart(clientId, this.storage);
this.remotePart = this.server
? new DocEngineRemotePart(clientId, this.storage, this.server)
: null;
}
abort = new AbortController();
start() {
this.abort.abort(MANUALLY_STOP);
this.abort = new AbortController();
Promise.all([
this.localPart.mainLoop(this.abort.signal),
this.remotePart?.mainLoop(this.abort.signal),
]).catch(err => {
if (err === MANUALLY_STOP) {
return;
}
logger.error('Doc engine error', err);
});
return this;
}
stop() {
this.abort.abort(MANUALLY_STOP);
}
async resetSyncStatus() {
this.stop();
await this.storage.clearSyncMetadata();
await this.storage.clearServerClock();
}
addDoc(doc: YDoc, withSubDocs = true) {
this.localPart.actions.addDoc(doc);
this.remotePart?.actions.addDoc(doc.guid);
if (withSubDocs) {
const subdocs = doc.getSubdocs();
for (const subdoc of subdocs) {
this.addDoc(subdoc, false);
}
doc.on('subdocs', ({ added }: { added: Set<YDoc> }) => {
for (const subdoc of added) {
this.addDoc(subdoc, false);
}
});
}
}
setPriority(docId: string, priority: number) {
this.localPart.setPriority(docId, priority);
this.remotePart?.setPriority(docId, priority);
}
/**
* ## Saved:
* YDoc changes have been saved to storage, and the browser can be safely closed without losing data.
*/
waitForSaved() {
return new Promise<void>(resolve => {
this.engineState$
.pipe(map(state => state.saving === 0))
.subscribe(saved => {
if (saved) {
resolve();
}
});
});
}
/**
* ## Synced:
* is fully synchronized with the server
*/
waitForSynced() {
return new Promise<void>(resolve => {
this.engineState$
.pipe(map(state => state.syncing === 0 && state.saving === 0))
.subscribe(synced => {
if (synced) {
resolve();
}
});
});
}
/**
* ## Ready:
*
* means that the doc has been loaded and the data can be modified.
* (is not force, you can still modify it if you know you are creating some new data)
*
* this is a temporary solution to deal with the yjs overwrite issue.
*
* if content is loaded from storage
* or if content is pulled from the server, it will be true, otherwise be false.
*
* For example, when opening a doc that is not in storage, ready = false until the content is pulled from the server.
*/
waitForReady(docId: string) {
return new Promise<void>(resolve => {
this.docState$(docId)
.pipe(map(state => state.ready))
.subscribe(ready => {
if (ready) {
resolve();
}
});
});
}
}

View File

@@ -0,0 +1,303 @@
import { DebugLogger } from '@affine/debug';
import { Unreachable } from '@affine/env/constant';
import { groupBy } from 'lodash-es';
import { Observable, Subject } from 'rxjs';
import type { Doc as YDoc } from 'yjs';
import { applyUpdate, encodeStateAsUpdate, mergeUpdates } from 'yjs';
import { LiveData } from '../../livedata';
import { throwIfAborted } from '../../utils';
import { AsyncPriorityQueue } from './async-priority-queue';
import type { DocEvent } from './event';
import type { DocStorageInner } from './storage';
import { isEmptyUpdate } from './utils';
type Job =
| {
type: 'load';
docId: string;
}
| {
type: 'save';
docId: string;
update: Uint8Array;
}
| {
type: 'apply';
docId: string;
update: Uint8Array;
isInitialize: boolean;
};
const DOC_ENGINE_ORIGIN = 'doc-engine';
const logger = new DebugLogger('doc-engine:local');
export interface LocalEngineState {
total: number;
syncing: number;
}
export interface LocalDocState {
ready: boolean;
syncing: boolean;
}
/**
* never fail
*/
export class DocEngineLocalPart {
private readonly prioritySettings = new Map<string, number>();
private readonly statusUpdatedSubject$ = new Subject<string>();
private readonly status = {
docs: new Map<string, YDoc>(),
connectedDocs: new Set<string>(),
readyDocs: new Set<string>(),
jobDocQueue: new AsyncPriorityQueue(),
jobMap: new Map<string, Job[]>(),
currentJob: null as { docId: string; jobs: Job[] } | null,
};
engineState$ = LiveData.from<LocalEngineState>(
new Observable(subscribe => {
const next = () => {
subscribe.next({
total: this.status.docs.size,
syncing: this.status.jobMap.size + (this.status.currentJob ? 1 : 0),
});
};
next();
return this.statusUpdatedSubject$.subscribe(() => {
next();
});
}),
{ syncing: 0, total: 0 }
);
docState$(docId: string) {
return LiveData.from<LocalDocState>(
new Observable(subscribe => {
const next = () => {
subscribe.next({
ready: this.status.readyDocs.has(docId) ?? false,
syncing:
(this.status.jobMap.get(docId)?.length ?? 0) > 0 ||
this.status.currentJob?.docId === docId,
});
};
next();
return this.statusUpdatedSubject$.subscribe(updatedId => {
if (updatedId === docId) next();
});
}),
{ ready: false, syncing: false }
);
}
constructor(
private readonly clientId: string,
private readonly storage: DocStorageInner
) {}
async mainLoop(signal?: AbortSignal) {
const dispose = this.storage.eventBus.on(event => {
const handler = this.events[event.type];
if (handler) {
handler(event as any);
}
});
try {
// eslint-disable-next-line no-constant-condition
while (true) {
throwIfAborted(signal);
const docId = await this.status.jobDocQueue.asyncPop(signal);
const jobs = this.status.jobMap.get(docId);
this.status.jobMap.delete(docId);
if (!jobs) {
continue;
}
this.status.currentJob = { docId, jobs };
this.statusUpdatedSubject$.next(docId);
const { apply, load, save } = groupBy(jobs, job => job.type) as {
[key in Job['type']]?: Job[];
};
if (load?.length) {
await this.jobs.load(load[0] as any, signal);
}
for (const applyJob of apply ?? []) {
await this.jobs.apply(applyJob as any, signal);
}
if (save?.length) {
await this.jobs.save(docId, save as any, signal);
}
this.status.currentJob = null;
this.statusUpdatedSubject$.next(docId);
}
} finally {
dispose();
for (const docs of this.status.connectedDocs) {
const doc = this.status.docs.get(docs);
if (doc) {
doc.off('update', this.handleDocUpdate);
}
}
}
}
readonly actions = {
addDoc: (doc: YDoc) => {
this.schedule({
type: 'load',
docId: doc.guid,
});
this.status.docs.set(doc.guid, doc);
this.statusUpdatedSubject$.next(doc.guid);
},
};
readonly jobs = {
load: async (job: Job & { type: 'load' }, signal?: AbortSignal) => {
const doc = this.status.docs.get(job.docId);
if (!doc) {
throw new Unreachable('doc not found');
}
const existingData = encodeStateAsUpdate(doc);
if (!isEmptyUpdate(existingData)) {
this.schedule({
type: 'save',
docId: doc.guid,
update: existingData,
});
}
// mark doc as loaded
doc.emit('sync', [true, doc]);
doc.on('update', this.handleDocUpdate);
this.status.connectedDocs.add(job.docId);
this.statusUpdatedSubject$.next(job.docId);
const docData = await this.storage.loadDocFromLocal(job.docId, signal);
if (!docData || isEmptyUpdate(docData)) {
return;
}
this.applyUpdate(job.docId, docData);
this.status.readyDocs.add(job.docId);
this.statusUpdatedSubject$.next(job.docId);
},
save: async (
docId: string,
jobs: (Job & { type: 'save' })[],
signal?: AbortSignal
) => {
if (this.status.connectedDocs.has(docId)) {
const merged = mergeUpdates(
jobs.map(j => j.update).filter(update => !isEmptyUpdate(update))
);
const newSeqNum = await this.storage.commitDocAsClientUpdate(
docId,
merged,
signal
);
this.storage.eventBus.emit({
type: 'ClientUpdateCommitted',
seqNum: newSeqNum,
docId: docId,
clientId: this.clientId,
update: merged,
});
}
},
apply: async (job: Job & { type: 'apply' }, signal?: AbortSignal) => {
throwIfAborted(signal);
if (this.status.connectedDocs.has(job.docId)) {
this.applyUpdate(job.docId, job.update);
}
if (job.isInitialize && !isEmptyUpdate(job.update)) {
this.status.readyDocs.add(job.docId);
this.statusUpdatedSubject$.next(job.docId);
}
},
};
readonly events: {
[key in DocEvent['type']]?: (event: DocEvent & { type: key }) => void;
} = {
ServerUpdateCommitted: ({ docId, update, clientId }) => {
this.schedule({
type: 'apply',
docId,
update,
isInitialize: clientId === this.clientId,
});
},
ClientUpdateCommitted: ({ docId, update, clientId }) => {
if (clientId !== this.clientId) {
this.schedule({
type: 'apply',
docId,
update,
isInitialize: false,
});
}
},
LegacyClientUpdateCommitted: ({ docId, update }) => {
this.schedule({
type: 'save',
docId,
update,
});
},
};
handleDocUpdate = (update: Uint8Array, origin: any, doc: YDoc) => {
if (origin === DOC_ENGINE_ORIGIN) {
return;
}
this.schedule({
type: 'save',
docId: doc.guid,
update,
});
};
applyUpdate(docId: string, update: Uint8Array) {
const doc = this.status.docs.get(docId);
if (doc && !isEmptyUpdate(update)) {
try {
applyUpdate(doc, update, DOC_ENGINE_ORIGIN);
} catch (err) {
logger;
}
}
}
schedule(job: Job) {
const priority = this.prioritySettings.get(job.docId) ?? 0;
this.status.jobDocQueue.push(job.docId, priority);
const existingJobs = this.status.jobMap.get(job.docId) ?? [];
existingJobs.push(job);
this.status.jobMap.set(job.docId, existingJobs);
this.statusUpdatedSubject$.next(job.docId);
}
setPriority(docId: string, priority: number) {
this.prioritySettings.set(docId, priority);
this.status.jobDocQueue.updatePriority(docId, priority);
}
}

View File

@@ -0,0 +1,69 @@
import { BinarySearchTree } from '@datastructures-js/binary-search-tree';
export class PriorityQueue {
tree = new BinarySearchTree<{ id: string; priority: number }>((a, b) => {
return a.priority === b.priority
? a.id === b.id
? 0
: a.id > b.id
? 1
: -1
: a.priority - b.priority;
});
priorityMap = new Map<string, number>();
push(id: string, priority: number = 0) {
const oldPriority = this.priorityMap.get(id);
if (oldPriority === priority) {
return;
}
if (oldPriority !== undefined) {
this.remove(id);
}
this.tree.insert({ id, priority });
this.priorityMap.set(id, priority);
}
pop() {
const node = this.tree.max();
if (!node) {
return null;
}
this.tree.removeNode(node);
const { id } = node.getValue();
this.priorityMap.delete(id);
return id;
}
remove(id: string, priority?: number) {
priority ??= this.priorityMap.get(id);
if (priority === undefined) {
return false;
}
const removed = this.tree.remove({ id, priority });
if (removed) {
this.priorityMap.delete(id);
}
return removed;
}
clear() {
this.tree.clear();
this.priorityMap.clear();
}
updatePriority(id: string, priority: number) {
if (this.remove(id)) {
this.push(id, priority);
}
}
get length() {
return this.tree.count;
}
}

View File

@@ -0,0 +1,599 @@
import { DebugLogger } from '@affine/debug';
import { remove } from 'lodash-es';
import { Observable, Subject } from 'rxjs';
import { diffUpdate, encodeStateVectorFromUpdate, mergeUpdates } from 'yjs';
import { LiveData } from '../../livedata';
import { throwIfAborted } from '../../utils';
import { AsyncPriorityQueue } from './async-priority-queue';
import { ClockMap } from './clock';
import type { DocEvent } from './event';
import type { DocServer } from './server';
import type { DocStorageInner } from './storage';
import { isEmptyUpdate } from './utils';
const logger = new DebugLogger('doc-engine:remote');
type Job =
| {
type: 'connect';
docId: string;
}
| {
type: 'push';
docId: string;
update: Uint8Array;
seqNum: number;
}
| {
type: 'pull';
docId: string;
}
| {
type: 'pullAndPush';
docId: string;
}
| {
type: 'save';
docId: string;
update?: Uint8Array;
serverClock: number;
};
export interface Status {
docs: Set<string>;
connectedDocs: Set<string>;
jobDocQueue: AsyncPriorityQueue;
jobMap: Map<string, Job[]>;
serverClocks: ClockMap;
syncing: boolean;
retrying: boolean;
errorMessage: string | null;
}
export interface RemoteEngineState {
total: number;
syncing: number;
retrying: boolean;
errorMessage: string | null;
}
export interface RemoteDocState {
syncing: boolean;
}
export class DocEngineRemotePart {
private readonly prioritySettings = new Map<string, number>();
constructor(
private readonly clientId: string,
private readonly storage: DocStorageInner,
private readonly server: DocServer
) {}
private status: Status = {
docs: new Set<string>(),
connectedDocs: new Set<string>(),
jobDocQueue: new AsyncPriorityQueue(),
jobMap: new Map(),
serverClocks: new ClockMap(new Map()),
syncing: false,
retrying: false,
errorMessage: null,
};
private readonly statusUpdatedSubject$ = new Subject<string | true>();
engineState$ = LiveData.from<RemoteEngineState>(
new Observable(subscribe => {
const next = () => {
if (!this.status.syncing) {
subscribe.next({
total: this.status.docs.size,
syncing: this.status.docs.size,
retrying: this.status.retrying,
errorMessage: this.status.errorMessage,
});
}
const syncing = this.status.jobMap.size;
subscribe.next({
total: this.status.docs.size,
syncing: syncing,
retrying: this.status.retrying,
errorMessage: this.status.errorMessage,
});
};
next();
return this.statusUpdatedSubject$.subscribe(() => {
next();
});
}),
{
syncing: 0,
total: 0,
retrying: false,
errorMessage: null,
}
);
docState$(docId: string) {
return LiveData.from<RemoteDocState>(
new Observable(subscribe => {
const next = () => {
subscribe.next({
syncing:
!this.status.connectedDocs.has(docId) ||
this.status.jobMap.has(docId),
});
};
next();
return this.statusUpdatedSubject$.subscribe(updatedId => {
if (updatedId === true || updatedId === docId) next();
});
}),
{ syncing: false }
);
}
readonly jobs = {
connect: async (docId: string, signal?: AbortSignal) => {
const pushedSeqNum = await this.storage.loadDocSeqNumPushed(
docId,
signal
);
const seqNum = await this.storage.loadDocSeqNum(docId, signal);
if (pushedSeqNum === null || pushedSeqNum !== seqNum) {
await this.jobs.pullAndPush(docId, signal);
} else {
const pulled = await this.storage.loadDocServerClockPulled(docId);
if (
pulled === null ||
pulled !== this.status.serverClocks.get(normalizeServerDocId(docId))
) {
await this.jobs.pull(docId, signal);
}
}
this.status.connectedDocs.add(docId);
this.statusUpdatedSubject$.next(docId);
},
push: async (
docId: string,
jobs: (Job & { type: 'push' })[],
signal?: AbortSignal
) => {
if (this.status.connectedDocs.has(docId)) {
const maxSeqNum = Math.max(...jobs.map(j => j.seqNum));
const pushedSeqNum =
(await this.storage.loadDocSeqNumPushed(docId, signal)) ?? 0;
if (maxSeqNum - pushedSeqNum === jobs.length) {
const merged = mergeUpdates(
jobs.map(j => j.update).filter(update => !isEmptyUpdate(update))
);
if (!isEmptyUpdate(merged)) {
const { serverClock } = await this.server.pushDoc(docId, merged);
this.schedule({
type: 'save',
docId,
serverClock,
});
}
await this.storage.saveDocPushedSeqNum(
docId,
{ add: jobs.length },
signal
);
} else {
// maybe other tab is modifying the doc, do full pull and push for safety
await this.jobs.pullAndPush(docId, signal);
}
}
},
pullAndPush: async (docId: string, signal?: AbortSignal) => {
const seqNum = await this.storage.loadDocSeqNum(docId, signal);
const data = await this.storage.loadDocFromLocal(docId, signal);
const stateVector =
data && !isEmptyUpdate(data)
? encodeStateVectorFromUpdate(data)
: new Uint8Array();
const serverData = await this.server.pullDoc(docId, stateVector);
if (serverData) {
const {
data: newData,
stateVector: serverStateVector,
serverClock,
} = serverData;
await this.storage.saveServerClock(
new Map([[normalizeServerDocId(docId), serverClock]]),
signal
);
this.actions.updateServerClock(
normalizeServerDocId(docId),
serverClock
);
await this.storage.commitDocAsServerUpdate(
docId,
newData,
serverClock,
signal
);
this.storage.eventBus.emit({
type: 'ServerUpdateCommitted',
docId,
clientId: this.clientId,
update: newData,
});
const diff =
data && serverStateVector && serverStateVector.length > 0
? diffUpdate(data, serverStateVector)
: data;
if (diff && !isEmptyUpdate(diff)) {
const { serverClock } = await this.server.pushDoc(docId, diff);
this.schedule({
type: 'save',
docId,
serverClock,
});
}
await this.storage.saveDocPushedSeqNum(docId, seqNum, signal);
} else {
if (data && !isEmptyUpdate(data)) {
const { serverClock } = await this.server.pushDoc(docId, data);
await this.storage.saveDocServerClockPulled(
docId,
serverClock,
signal
);
await this.storage.saveServerClock(
new Map([[normalizeServerDocId(docId), serverClock]]),
signal
);
this.actions.updateServerClock(
normalizeServerDocId(docId),
serverClock
);
}
await this.storage.saveDocPushedSeqNum(docId, seqNum, signal);
}
},
pull: async (docId: string, signal?: AbortSignal) => {
const data = await this.storage.loadDocFromLocal(docId, signal);
const stateVector =
data && !isEmptyUpdate(data)
? encodeStateVectorFromUpdate(data)
: new Uint8Array();
const serverDoc = await this.server.pullDoc(docId, stateVector);
if (!serverDoc) {
return;
}
const { data: newData, serverClock } = serverDoc;
await this.storage.commitDocAsServerUpdate(
docId,
newData,
serverClock,
signal
);
this.storage.eventBus.emit({
type: 'ServerUpdateCommitted',
docId,
clientId: this.clientId,
update: newData,
});
await this.storage.saveServerClock(
new Map([[normalizeServerDocId(docId), serverClock]]),
signal
);
this.actions.updateServerClock(normalizeServerDocId(docId), serverClock);
},
save: async (
docId: string,
jobs: (Job & { type: 'save' })[],
signal?: AbortSignal
) => {
const serverClock = jobs.reduce((a, b) => Math.max(a, b.serverClock), 0);
await this.storage.saveServerClock(
new Map([[normalizeServerDocId(docId), serverClock]]),
signal
);
this.actions.updateServerClock(normalizeServerDocId(docId), serverClock);
if (this.status.connectedDocs.has(docId)) {
const data = jobs
.map(j => j.update)
.filter((update): update is Uint8Array =>
update ? !isEmptyUpdate(update) : false
);
const update = data.length > 0 ? mergeUpdates(data) : new Uint8Array();
await this.storage.commitDocAsServerUpdate(
docId,
update,
serverClock,
signal
);
this.storage.eventBus.emit({
type: 'ServerUpdateCommitted',
docId,
clientId: this.clientId,
update,
});
}
},
};
readonly actions = {
updateServerClock: (docId: string, serverClock: number) => {
this.status.serverClocks.setIfBigger(docId, serverClock);
},
addDoc: (docId: string) => {
if (!this.status.docs.has(docId)) {
this.status.docs.add(docId);
this.statusUpdatedSubject$.next(docId);
this.schedule({
type: 'connect',
docId,
});
}
},
};
readonly events: {
[key in DocEvent['type']]?: (event: DocEvent & { type: key }) => void;
} = {
ClientUpdateCommitted: ({ clientId, docId, seqNum, update }) => {
if (clientId !== this.clientId) {
return;
}
this.schedule({
type: 'push',
docId,
update,
seqNum,
});
},
};
async mainLoop(signal?: AbortSignal) {
// eslint-disable-next-line no-constant-condition
while (true) {
try {
this.status.retrying = false;
await this.retryLoop(signal);
} catch (err) {
if (signal?.aborted) {
return;
}
logger.error('Remote sync error, retry in 5s', err);
this.status.errorMessage =
err instanceof Error ? err.message : `${err}`;
this.statusUpdatedSubject$.next(true);
} finally {
this.status = {
docs: this.status.docs,
connectedDocs: new Set<string>(),
jobDocQueue: new AsyncPriorityQueue(),
jobMap: new Map(),
serverClocks: new ClockMap(new Map()),
syncing: false,
retrying: true,
errorMessage: this.status.errorMessage,
};
this.statusUpdatedSubject$.next(true);
}
await Promise.race([
new Promise<void>(resolve => {
setTimeout(resolve, 5 * 1000);
}),
new Promise((_, reject) => {
// exit if manually stopped
if (signal?.aborted) {
reject(signal.reason);
}
signal?.addEventListener('abort', () => {
reject(signal.reason);
});
}),
]);
}
}
async retryLoop(signal?: AbortSignal) {
throwIfAborted(signal);
const abort = new AbortController();
signal?.addEventListener('abort', reason => {
abort.abort(reason);
});
signal = abort.signal;
const disposes: (() => void)[] = [];
try {
disposes.push(
this.storage.eventBus.on(event => {
const handler = this.events[event.type];
handler?.(event as any);
})
);
throwIfAborted(signal);
for (const doc of this.status.docs) {
this.schedule({
type: 'connect',
docId: doc,
});
}
logger.info('Remote sync started');
this.status.syncing = true;
this.statusUpdatedSubject$.next(true);
this.server.onInterrupted(reason => {
abort.abort(reason);
});
await Promise.race([
this.server.waitForConnectingServer(signal),
new Promise<void>((_, reject) => {
setTimeout(() => {
reject(new Error('Connect to server timeout'));
}, 1000 * 30);
}),
new Promise((_, reject) => {
signal?.addEventListener('abort', reason => {
reject(reason);
});
}),
]);
throwIfAborted(signal);
disposes.push(
await this.server.subscribeAllDocs(({ docId, data, serverClock }) => {
this.schedule({
type: 'save',
docId: docId,
serverClock,
update: data,
});
})
);
const cachedClocks = await this.storage.loadServerClock(signal);
for (const [id, v] of cachedClocks) {
this.actions.updateServerClock(id, v);
}
const maxClockValue = this.status.serverClocks.max;
const newClocks = await this.server.loadServerClock(maxClockValue);
for (const [id, v] of newClocks) {
this.actions.updateServerClock(id, v);
}
await this.storage.saveServerClock(newClocks, signal);
// eslint-disable-next-line no-constant-condition
while (true) {
throwIfAborted(signal);
const docId = await this.status.jobDocQueue.asyncPop(signal);
// eslint-disable-next-line no-constant-condition
while (true) {
const jobs = this.status.jobMap.get(docId);
if (!jobs || jobs.length === 0) {
this.status.jobMap.delete(docId);
this.statusUpdatedSubject$.next(docId);
break;
}
const connect = remove(jobs, j => j.type === 'connect');
if (connect && connect.length > 0) {
await this.jobs.connect(docId, signal);
continue;
}
const pullAndPush = remove(jobs, j => j.type === 'pullAndPush');
if (pullAndPush && pullAndPush.length > 0) {
await this.jobs.pullAndPush(docId, signal);
continue;
}
const pull = remove(jobs, j => j.type === 'pull');
if (pull && pull.length > 0) {
await this.jobs.pull(docId, signal);
continue;
}
const push = remove(jobs, j => j.type === 'push');
if (push && push.length > 0) {
await this.jobs.push(
docId,
push as (Job & { type: 'push' })[],
signal
);
continue;
}
const save = remove(jobs, j => j.type === 'save');
if (save && save.length > 0) {
await this.jobs.save(
docId,
save as (Job & { type: 'save' })[],
signal
);
continue;
}
}
}
} finally {
for (const dispose of disposes) {
dispose();
}
try {
this.server.disconnectServer();
} catch (err) {
logger.error('Error on disconnect server', err);
}
this.status.syncing = false;
logger.info('Remote sync ended');
}
}
schedule(job: Job) {
const priority = this.prioritySettings.get(job.docId) ?? 0;
this.status.jobDocQueue.push(job.docId, priority);
const existingJobs = this.status.jobMap.get(job.docId) ?? [];
existingJobs.push(job);
this.status.jobMap.set(job.docId, existingJobs);
this.statusUpdatedSubject$.next(job.docId);
}
setPriority(docId: string, priority: number) {
this.prioritySettings.set(docId, priority);
this.status.jobDocQueue.updatePriority(docId, priority);
}
}
// use normalized id in server clock
function normalizeServerDocId(raw: string) {
enum DocVariant {
Workspace = 'workspace',
Page = 'page',
Space = 'space',
Settings = 'settings',
Unknown = 'unknown',
}
try {
if (!raw.length) {
throw new Error('Invalid Empty Doc ID');
}
let parts = raw.split(':');
if (parts.length > 3) {
// special adapt case `wsId:space:page:pageId`
if (parts[1] === DocVariant.Space && parts[2] === DocVariant.Page) {
parts = [parts[0], DocVariant.Space, parts[3]];
} else {
throw new Error(`Invalid format of Doc ID: ${raw}`);
}
} else if (parts.length === 2) {
// `${variant}:${guid}`
throw new Error('not supported');
} else if (parts.length === 1) {
// ${ws} or ${pageId}
parts = ['', DocVariant.Unknown, parts[0]];
}
const docId = parts.at(2);
if (!docId) {
throw new Error('ID is required');
}
return docId;
} catch (err) {
logger.error('Error on normalize docId ' + raw, err);
return raw;
}
}

View File

@@ -0,0 +1,26 @@
export interface DocServer {
pullDoc(
docId: string,
stateVector: Uint8Array
): Promise<{
data: Uint8Array;
serverClock: number;
stateVector?: Uint8Array;
} | null>;
pushDoc(docId: string, data: Uint8Array): Promise<{ serverClock: number }>;
loadServerClock(after: number): Promise<Map<string, number>>;
subscribeAllDocs(
cb: (updates: {
docId: string;
data: Uint8Array;
serverClock: number;
}) => void
): Promise<() => void>;
waitForConnectingServer(signal: AbortSignal): Promise<void>;
disconnectServer(): void;
onInterrupted(cb: (reason: string) => void): void;
}

View File

@@ -0,0 +1,359 @@
import type { ByteKV, Memento } from '../../storage';
import { MemoryMemento, ReadonlyByteKV, wrapMemento } from '../../storage';
import { AsyncLock, mergeUpdates, throwIfAborted } from '../../utils';
import type { DocEventBus } from '.';
import { DocEventBusInner, MemoryDocEventBus } from './event';
import { isEmptyUpdate } from './utils';
export interface DocStorage {
eventBus: DocEventBus;
doc: ByteKV;
syncMetadata: ByteKV;
serverClock: ByteKV;
}
const Keys = {
SeqNum: (docId: string) => `${docId}:seqNum`,
SeqNumPushed: (docId: string) => `${docId}:seqNumPushed`,
ServerClockPulled: (docId: string) => `${docId}:serverClockPulled`,
UpdatedTime: (docId: string) => `${docId}:updateTime`,
};
const Values = {
UInt64: {
parse: (buffer: Uint8Array) => {
const view = new DataView(buffer.buffer);
return Number(view.getBigUint64(0, false));
},
serialize: (value: number) => {
const buffer = new ArrayBuffer(8);
const view = new DataView(buffer);
view.setBigUint64(0, BigInt(value), false);
return new Uint8Array(buffer);
},
},
};
export class DocStorageInner {
public readonly eventBus = new DocEventBusInner(this.behavior.eventBus);
constructor(public readonly behavior: DocStorage) {}
async loadServerClock(signal?: AbortSignal): Promise<Map<string, number>> {
throwIfAborted(signal);
const list = await this.behavior.serverClock.keys();
const map = new Map<string, number>();
for (const key of list) {
const docId = key;
const value = await this.behavior.serverClock.get(key);
if (value) {
map.set(docId, Values.UInt64.parse(value));
}
}
return map;
}
async saveServerClock(map: Map<string, number>, signal?: AbortSignal) {
throwIfAborted(signal);
await this.behavior.serverClock.transaction(async transaction => {
for (const [docId, value] of map) {
const key = docId;
const oldBuffer = await transaction.get(key);
const old = oldBuffer ? Values.UInt64.parse(oldBuffer) : 0;
if (old < value) {
await transaction.set(key, Values.UInt64.serialize(value));
}
}
});
}
async loadDocSeqNum(docId: string, signal?: AbortSignal) {
throwIfAborted(signal);
const bytes = await this.behavior.syncMetadata.get(Keys.SeqNum(docId));
if (bytes === null) {
return 0;
}
return Values.UInt64.parse(bytes);
}
async saveDocSeqNum(
docId: string,
seqNum: number | true,
signal?: AbortSignal
) {
throwIfAborted(signal);
return await this.behavior.syncMetadata.transaction(async transaction => {
const key = Keys.SeqNum(docId);
const oldBytes = await transaction.get(key);
const old = oldBytes ? Values.UInt64.parse(oldBytes) : 0;
if (seqNum === true) {
await transaction.set(key, Values.UInt64.serialize(old + 1));
return old + 1;
}
if (old < seqNum) {
await transaction.set(key, Values.UInt64.serialize(seqNum));
return seqNum;
}
return old;
});
}
async loadDocSeqNumPushed(docId: string, signal?: AbortSignal) {
throwIfAborted(signal);
const bytes = await this.behavior.syncMetadata.get(
Keys.SeqNumPushed(docId)
);
if (bytes === null) {
return null;
}
return Values.UInt64.parse(bytes);
}
async saveDocPushedSeqNum(
docId: string,
seqNum: number | { add: number },
signal?: AbortSignal
) {
throwIfAborted(signal);
await this.behavior.syncMetadata.transaction(async transaction => {
const key = Keys.SeqNumPushed(docId);
const oldBytes = await transaction.get(key);
const old = oldBytes ? Values.UInt64.parse(oldBytes) : null;
if (typeof seqNum === 'object') {
return transaction.set(
key,
Values.UInt64.serialize((old ?? 0) + seqNum.add)
);
}
if (old === null || old < seqNum) {
return transaction.set(key, Values.UInt64.serialize(seqNum));
}
});
}
async loadDocServerClockPulled(docId: string, signal?: AbortSignal) {
throwIfAborted(signal);
const bytes = await this.behavior.syncMetadata.get(
Keys.ServerClockPulled(docId)
);
if (bytes === null) {
return null;
}
return bytes ? Values.UInt64.parse(bytes) : 0;
}
async saveDocServerClockPulled(
docId: string,
serverClock: number,
signal?: AbortSignal
) {
throwIfAborted(signal);
await this.behavior.syncMetadata.transaction(async transaction => {
const oldBytes = await transaction.get(Keys.ServerClockPulled(docId));
const old = oldBytes ? Values.UInt64.parse(oldBytes) : null;
if (old === null || old < serverClock) {
await transaction.set(
Keys.ServerClockPulled(docId),
Values.UInt64.serialize(serverClock)
);
}
});
}
async loadDocFromLocal(docId: string, signal?: AbortSignal) {
throwIfAborted(signal);
return await this.behavior.doc.get(docId);
}
/**
* Confirm that server updates are applied in the order they occur!!!
*/
async commitDocAsServerUpdate(
docId: string,
update: Uint8Array,
serverClock: number,
signal?: AbortSignal
) {
throwIfAborted(signal);
await this.behavior.doc.transaction(async tx => {
const data = await tx.get(docId);
await tx.set(
docId,
data && !isEmptyUpdate(data)
? !isEmptyUpdate(update)
? mergeUpdates([data, update])
: data
: update
);
});
await this.saveDocServerClockPulled(docId, serverClock);
}
async commitDocAsClientUpdate(
docId: string,
update: Uint8Array,
signal?: AbortSignal
) {
throwIfAborted(signal);
await this.behavior.doc.transaction(async tx => {
const data = await tx.get(docId);
await tx.set(
docId,
data && !isEmptyUpdate(data)
? !isEmptyUpdate(update)
? mergeUpdates([data, update])
: data
: update
);
});
return await this.saveDocSeqNum(docId, true);
}
clearSyncMetadata() {
return this.behavior.syncMetadata.clear();
}
async clearServerClock() {
return this.behavior.serverClock.clear();
}
}
export class ReadonlyStorage implements DocStorage {
constructor(
private readonly map: {
[key: string]: Uint8Array;
}
) {}
eventBus = new MemoryDocEventBus();
doc = new ReadonlyByteKV(new Map(Object.entries(this.map)));
serverClock = new ReadonlyByteKV();
syncMetadata = new ReadonlyByteKV();
}
export class MemoryStorage implements DocStorage {
constructor(private readonly memo: Memento = new MemoryMemento()) {}
eventBus = new MemoryDocEventBus();
lock = new AsyncLock();
readonly docDb = wrapMemento(this.memo, 'doc:');
readonly syncMetadataDb = wrapMemento(this.memo, 'syncMetadata:');
readonly serverClockDb = wrapMemento(this.memo, 'serverClock:');
readonly doc = {
transaction: async cb => {
using _lock = await this.lock.acquire();
return await cb({
get: async key => {
return this.docDb.get(key) ?? null;
},
set: async (key, value) => {
this.docDb.set(key, value);
},
keys: async () => {
return Array.from(this.docDb.keys());
},
clear: () => {
this.docDb.clear();
},
del: key => {
this.docDb.del(key);
},
});
},
get(key) {
return this.transaction(async tx => tx.get(key));
},
set(key, value) {
return this.transaction(async tx => tx.set(key, value));
},
keys() {
return this.transaction(async tx => tx.keys());
},
clear() {
return this.transaction(async tx => tx.clear());
},
del(key) {
return this.transaction(async tx => tx.del(key));
},
} satisfies ByteKV;
readonly syncMetadata = {
transaction: async cb => {
using _lock = await this.lock.acquire();
return await cb({
get: async key => {
return this.syncMetadataDb.get(key) ?? null;
},
set: async (key, value) => {
this.syncMetadataDb.set(key, value);
},
keys: async () => {
return Array.from(this.syncMetadataDb.keys());
},
clear: () => {
this.syncMetadataDb.clear();
},
del: key => {
this.syncMetadataDb.del(key);
},
});
},
get(key) {
return this.transaction(async tx => tx.get(key));
},
set(key, value) {
return this.transaction(async tx => tx.set(key, value));
},
keys() {
return this.transaction(async tx => tx.keys());
},
clear() {
return this.transaction(async tx => tx.clear());
},
del(key) {
return this.transaction(async tx => tx.del(key));
},
} satisfies ByteKV;
readonly serverClock = {
transaction: async cb => {
using _lock = await this.lock.acquire();
return await cb({
get: async key => {
return this.serverClockDb.get(key) ?? null;
},
set: async (key, value) => {
this.serverClockDb.set(key, value);
},
keys: async () => {
return Array.from(this.serverClockDb.keys());
},
clear: () => {
this.serverClockDb.clear();
},
del: key => {
this.serverClockDb.del(key);
},
});
},
get(key) {
return this.transaction(async tx => tx.get(key));
},
set(key, value) {
return this.transaction(async tx => tx.set(key, value));
},
keys() {
return this.transaction(async tx => tx.keys());
},
clear() {
return this.transaction(async tx => tx.clear());
},
del(key) {
return this.transaction(async tx => tx.del(key));
},
} satisfies ByteKV;
}

View File

@@ -0,0 +1,6 @@
export function isEmptyUpdate(binary: Uint8Array) {
return (
binary.byteLength === 0 ||
(binary.byteLength === 2 && binary[0] === 0 && binary[1] === 0)
);
}

View File

@@ -0,0 +1,6 @@
export type { AwarenessConnection } from './awareness';
export { AwarenessEngine } from './awareness';
export type { BlobStatus, BlobStorage } from './blob/blob';
export { BlobEngine, EmptyBlobStorage } from './blob/blob';
export { BlobStorageOverCapacity } from './blob/error';
export * from './doc';