mirror of
https://github.com/toeverything/AFFiNE.git
synced 2026-02-13 21:05:19 +00:00
feat(core): new worker workspace engine (#9257)
This commit is contained in:
@@ -50,7 +50,6 @@ describe('message', () => {
|
||||
removeEventListener: vi.fn(),
|
||||
};
|
||||
ctx.handler = new CustomMessageHandler(ctx.receivePort);
|
||||
ctx.handler.listen();
|
||||
});
|
||||
|
||||
it('should ignore unknown message type', ctx => {
|
||||
|
||||
@@ -162,16 +162,19 @@ export class OpClient<Ops extends OpSchema> extends AutoMessageHandler {
|
||||
op: Op,
|
||||
...args: OpInput<Ops, Op>
|
||||
): Observable<Out> {
|
||||
const payload = args[0];
|
||||
|
||||
const msg = {
|
||||
type: 'subscribe',
|
||||
id: this.nextCallId(op),
|
||||
name: op as string,
|
||||
payload,
|
||||
} satisfies SubscribeMessage;
|
||||
|
||||
const sub$ = new Observable<Out>(ob => {
|
||||
const payload = args[0];
|
||||
|
||||
const msg = {
|
||||
type: 'subscribe',
|
||||
id: this.nextCallId(op),
|
||||
name: op as string,
|
||||
payload,
|
||||
} satisfies SubscribeMessage;
|
||||
|
||||
const transferables = fetchTransferables(payload);
|
||||
this.port.postMessage(msg, { transfer: transferables });
|
||||
|
||||
this.obs.set(msg.id, ob);
|
||||
|
||||
return () => {
|
||||
@@ -184,9 +187,6 @@ export class OpClient<Ops extends OpSchema> extends AutoMessageHandler {
|
||||
};
|
||||
});
|
||||
|
||||
const transferables = fetchTransferables(payload);
|
||||
this.port.postMessage(msg, { transfer: transferables });
|
||||
|
||||
return sub$;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import EventEmitter2 from 'eventemitter2';
|
||||
import { defer, from, fromEvent, Observable, of, take, takeUntil } from 'rxjs';
|
||||
|
||||
import { MANUALLY_STOP } from '../utils';
|
||||
import {
|
||||
AutoMessageHandler,
|
||||
type CallMessage,
|
||||
@@ -45,7 +46,7 @@ export class OpConsumer<Ops extends OpSchema> extends AutoMessageHandler {
|
||||
};
|
||||
}
|
||||
|
||||
private readonly handleCallMessage: MessageHandlers['call'] = async msg => {
|
||||
private readonly handleCallMessage: MessageHandlers['call'] = msg => {
|
||||
const abortController = new AbortController();
|
||||
this.processing.set(msg.id, abortController);
|
||||
|
||||
@@ -119,7 +120,7 @@ export class OpConsumer<Ops extends OpSchema> extends AutoMessageHandler {
|
||||
return;
|
||||
}
|
||||
|
||||
abortController.abort();
|
||||
abortController.abort(MANUALLY_STOP);
|
||||
};
|
||||
|
||||
register<Op extends OpNames<Ops>>(op: Op, handler: OpHandler<Ops, Op>) {
|
||||
@@ -181,7 +182,7 @@ export class OpConsumer<Ops extends OpSchema> extends AutoMessageHandler {
|
||||
super.close();
|
||||
this.registeredOpHandlers.clear();
|
||||
this.processing.forEach(controller => {
|
||||
controller.abort();
|
||||
controller.abort(MANUALLY_STOP);
|
||||
});
|
||||
this.processing.clear();
|
||||
this.eventBus.removeAllListeners();
|
||||
|
||||
@@ -134,7 +134,9 @@ export abstract class AutoMessageHandler {
|
||||
private listening = false;
|
||||
protected abstract handlers: Partial<MessageHandlers>;
|
||||
|
||||
constructor(protected readonly port: MessageCommunicapable) {}
|
||||
constructor(protected readonly port: MessageCommunicapable) {
|
||||
this.listen();
|
||||
}
|
||||
|
||||
protected handleMessage = ignoreUnknownEvent((msg: Messages) => {
|
||||
const handler = this.handlers[msg.type];
|
||||
@@ -145,7 +147,7 @@ export abstract class AutoMessageHandler {
|
||||
handler(msg as any);
|
||||
});
|
||||
|
||||
listen() {
|
||||
protected listen() {
|
||||
if (this.listening) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
import { nanoid } from 'nanoid';
|
||||
import {
|
||||
afterEach,
|
||||
beforeEach,
|
||||
describe,
|
||||
expect,
|
||||
test as t,
|
||||
type TestAPI,
|
||||
vitest,
|
||||
} from 'vitest';
|
||||
import { Doc } from 'yjs';
|
||||
|
||||
import { DocEngine } from '../../../sync';
|
||||
import { MiniSyncServer } from '../../../sync/doc/__tests__/utils';
|
||||
import { MemoryStorage } from '../../../sync/doc/storage';
|
||||
import { createORMClient, type DBSchemaBuilder, f, YjsDBAdapter } from '../';
|
||||
|
||||
const TEST_SCHEMA = {
|
||||
tags: {
|
||||
id: f.string().primaryKey().default(nanoid),
|
||||
name: f.string(),
|
||||
color: f.string().optional(),
|
||||
colors: f.json<string[]>().optional(),
|
||||
},
|
||||
} satisfies DBSchemaBuilder;
|
||||
|
||||
const ORMClient = createORMClient(TEST_SCHEMA);
|
||||
|
||||
type Context = {
|
||||
server: MiniSyncServer;
|
||||
user1: {
|
||||
client: InstanceType<typeof ORMClient>;
|
||||
engine: DocEngine;
|
||||
};
|
||||
user2: {
|
||||
client: InstanceType<typeof ORMClient>;
|
||||
engine: DocEngine;
|
||||
};
|
||||
};
|
||||
|
||||
function createEngine(server: MiniSyncServer) {
|
||||
return new DocEngine(new MemoryStorage(), server.client());
|
||||
}
|
||||
|
||||
async function createClient(server: MiniSyncServer, clientId: number) {
|
||||
const engine = createEngine(server);
|
||||
const Client = createORMClient(TEST_SCHEMA);
|
||||
|
||||
// define the hooks
|
||||
Client.defineHook('tags', 'migrate field `color` to field `colors`', {
|
||||
deserialize(data) {
|
||||
if (!data.colors && data.color) {
|
||||
data.colors = [data.color];
|
||||
}
|
||||
|
||||
return data;
|
||||
},
|
||||
});
|
||||
|
||||
const client = new Client(
|
||||
new YjsDBAdapter(TEST_SCHEMA, {
|
||||
getDoc(guid: string) {
|
||||
const doc = new Doc({ guid });
|
||||
doc.clientID = clientId;
|
||||
engine.addDoc(doc);
|
||||
return doc;
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
return {
|
||||
engine,
|
||||
client,
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach<Context>(async t => {
|
||||
t.server = new MiniSyncServer();
|
||||
// we set user2's clientId greater than user1's clientId,
|
||||
// so all conflicts will be resolved to user2's changes
|
||||
t.user1 = await createClient(t.server, 1);
|
||||
t.user2 = await createClient(t.server, 2);
|
||||
|
||||
t.user1.engine.start();
|
||||
t.user2.engine.start();
|
||||
});
|
||||
|
||||
afterEach<Context>(async t => {
|
||||
t.user1.engine.stop();
|
||||
t.user2.engine.stop();
|
||||
});
|
||||
|
||||
const test = t as TestAPI<Context>;
|
||||
|
||||
describe('ORM compatibility in synchronization scenerio', () => {
|
||||
test('2 clients create at the same time', async t => {
|
||||
const { user1, user2 } = t;
|
||||
const tag1 = user1.client.tags.create({
|
||||
name: 'tag1',
|
||||
color: 'blue',
|
||||
});
|
||||
|
||||
const tag2 = user2.client.tags.create({
|
||||
name: 'tag2',
|
||||
color: 'red',
|
||||
});
|
||||
|
||||
await vitest.waitFor(() => {
|
||||
expect(user1.client.tags.keys()).toHaveLength(2);
|
||||
expect(user2.client.tags.keys()).toHaveLength(2);
|
||||
});
|
||||
|
||||
expect(user2.client.tags.get(tag1.id)).toStrictEqual(tag1);
|
||||
expect(user1.client.tags.get(tag2.id)).toStrictEqual(tag2);
|
||||
});
|
||||
|
||||
test('2 clients updating the same entity', async t => {
|
||||
const { user1, user2 } = t;
|
||||
const tag = user1.client.tags.create({
|
||||
name: 'tag1',
|
||||
color: 'blue',
|
||||
});
|
||||
|
||||
await vitest.waitFor(() => {
|
||||
expect(user2.client.tags.keys()).toHaveLength(1);
|
||||
});
|
||||
|
||||
user1.client.tags.update(tag.id, { color: 'red' });
|
||||
user2.client.tags.update(tag.id, { color: 'gray' });
|
||||
|
||||
await vitest.waitFor(() => {
|
||||
expect(user1.client.tags.get(tag.id)).toHaveProperty('color', 'gray');
|
||||
expect(user2.client.tags.get(tag.id)).toHaveProperty('color', 'gray');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,23 +0,0 @@
|
||||
import type { Awareness } from 'y-protocols/awareness.js';
|
||||
|
||||
export interface AwarenessConnection {
|
||||
connect(awareness: Awareness): void;
|
||||
disconnect(): void;
|
||||
dispose?(): void;
|
||||
}
|
||||
|
||||
export class AwarenessEngine {
|
||||
constructor(public readonly connections: AwarenessConnection[]) {}
|
||||
|
||||
connect(awareness: Awareness) {
|
||||
this.connections.forEach(connection => connection.connect(awareness));
|
||||
}
|
||||
|
||||
disconnect() {
|
||||
this.connections.forEach(connection => connection.disconnect());
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.connections.forEach(connection => connection.dispose?.());
|
||||
}
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
import { DebugLogger } from '@affine/debug';
|
||||
import EventEmitter2 from 'eventemitter2';
|
||||
import { difference } from 'lodash-es';
|
||||
|
||||
import { LiveData } from '../../livedata';
|
||||
import type { Memento } from '../../storage';
|
||||
import { MANUALLY_STOP } from '../../utils';
|
||||
import { BlobStorageOverCapacity } from './error';
|
||||
|
||||
const logger = new DebugLogger('affine:blob-engine');
|
||||
|
||||
export interface BlobStorage {
|
||||
name: string;
|
||||
readonly: boolean;
|
||||
get: (key: string) => Promise<Blob | null>;
|
||||
set: (key: string, value: Blob) => Promise<string>;
|
||||
delete: (key: string) => Promise<void>;
|
||||
list: () => Promise<string[]>;
|
||||
}
|
||||
|
||||
export interface BlobStatus {
|
||||
isStorageOverCapacity: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* # BlobEngine
|
||||
*
|
||||
* sync blobs between storages in background.
|
||||
*
|
||||
* all operations priority use local, then use remote.
|
||||
*/
|
||||
export class BlobEngine {
|
||||
readonly name = 'blob-engine';
|
||||
readonly readonly = this.local.readonly;
|
||||
readonly event = new EventEmitter2();
|
||||
|
||||
private abort: AbortController | null = null;
|
||||
|
||||
readonly isStorageOverCapacity$ = new LiveData(false);
|
||||
|
||||
singleBlobSizeLimit: number = 100 * 1024 * 1024;
|
||||
onAbortLargeBlob = (callback: (blob: Blob) => void) => {
|
||||
this.event.on('abort-large-blob', callback);
|
||||
return () => {
|
||||
this.event.off('abort-large-blob', callback);
|
||||
};
|
||||
};
|
||||
|
||||
constructor(
|
||||
private readonly local: BlobStorage,
|
||||
private readonly remotes: BlobStorage[]
|
||||
) {}
|
||||
|
||||
start() {
|
||||
if (this.abort || this.isStorageOverCapacity$.value) {
|
||||
return;
|
||||
}
|
||||
this.abort = new AbortController();
|
||||
const abortSignal = this.abort.signal;
|
||||
|
||||
const sync = () => {
|
||||
if (abortSignal.aborted) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.sync()
|
||||
.catch(error => {
|
||||
logger.error('sync blob error', error);
|
||||
})
|
||||
.finally(() => {
|
||||
// sync every 1 minute
|
||||
setTimeout(sync, 60000);
|
||||
});
|
||||
};
|
||||
|
||||
sync();
|
||||
}
|
||||
|
||||
stop() {
|
||||
this.abort?.abort(MANUALLY_STOP);
|
||||
this.abort = null;
|
||||
}
|
||||
|
||||
get storages() {
|
||||
return [this.local, ...this.remotes];
|
||||
}
|
||||
|
||||
async sync() {
|
||||
if (this.local.readonly) {
|
||||
return;
|
||||
}
|
||||
logger.debug('start syncing blob...');
|
||||
for (const remote of this.remotes) {
|
||||
let localList: string[] = [];
|
||||
let remoteList: string[] = [];
|
||||
|
||||
if (!remote.readonly) {
|
||||
try {
|
||||
localList = await this.local.list();
|
||||
remoteList = await remote.list();
|
||||
} catch (err) {
|
||||
logger.error(`error when sync`, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
const needUpload = difference(localList, remoteList);
|
||||
for (const key of needUpload) {
|
||||
try {
|
||||
const data = await this.local.get(key);
|
||||
if (data) {
|
||||
await remote.set(key, data);
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
`error when sync ${key} from [${this.local.name}] to [${remote.name}]`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const needDownload = difference(remoteList, localList);
|
||||
|
||||
for (const key of needDownload) {
|
||||
try {
|
||||
const data = await remote.get(key);
|
||||
if (data) {
|
||||
await this.local.set(key, data);
|
||||
}
|
||||
} catch (err) {
|
||||
if (err instanceof BlobStorageOverCapacity) {
|
||||
this.isStorageOverCapacity$.value = true;
|
||||
}
|
||||
logger.error(
|
||||
`error when sync ${key} from [${remote.name}] to [${this.local.name}]`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('finish syncing blob');
|
||||
}
|
||||
|
||||
async get(key: string) {
|
||||
logger.debug('get blob', key);
|
||||
for (const storage of this.storages) {
|
||||
const data = await storage.get(key);
|
||||
if (data) {
|
||||
return data;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async set(key: string, value: Blob) {
|
||||
if (this.local.readonly) {
|
||||
throw new Error('local peer is readonly');
|
||||
}
|
||||
|
||||
if (value.size > this.singleBlobSizeLimit) {
|
||||
this.event.emit('abort-large-blob', value);
|
||||
logger.error('blob over limit, abort set');
|
||||
return key;
|
||||
}
|
||||
|
||||
// await upload to the local peer
|
||||
await this.local.set(key, value);
|
||||
|
||||
// uploads to other peers in the background
|
||||
Promise.allSettled(
|
||||
this.remotes
|
||||
.filter(r => !r.readonly)
|
||||
.map(peer =>
|
||||
peer.set(key, value).catch(err => {
|
||||
logger.error('Error when uploading to peer', err);
|
||||
})
|
||||
)
|
||||
)
|
||||
.then(result => {
|
||||
if (result.some(({ status }) => status === 'rejected')) {
|
||||
logger.error(
|
||||
`blob ${key} update finish, but some peers failed to update`
|
||||
);
|
||||
} else {
|
||||
logger.debug(`blob ${key} update finish`);
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// Promise.allSettled never reject
|
||||
});
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
async delete(_key: string) {
|
||||
// not supported
|
||||
}
|
||||
|
||||
async list() {
|
||||
const blobList = new Set<string>();
|
||||
|
||||
for (const peer of this.storages) {
|
||||
const list = await peer.list();
|
||||
if (list) {
|
||||
for (const blob of list) {
|
||||
blobList.add(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(blobList);
|
||||
}
|
||||
}
|
||||
|
||||
export const EmptyBlobStorage: BlobStorage = {
|
||||
name: 'empty',
|
||||
readonly: true,
|
||||
async get(_key: string) {
|
||||
return null;
|
||||
},
|
||||
async set(_key: string, _value: Blob) {
|
||||
throw new Error('not supported');
|
||||
},
|
||||
async delete(_key: string) {
|
||||
throw new Error('not supported');
|
||||
},
|
||||
async list() {
|
||||
return [];
|
||||
},
|
||||
};
|
||||
|
||||
export class MemoryBlobStorage implements BlobStorage {
|
||||
name = 'testing';
|
||||
readonly = false;
|
||||
|
||||
constructor(private readonly state: Memento) {}
|
||||
|
||||
get(key: string) {
|
||||
return Promise.resolve(this.state.get<Blob>(key) ?? null);
|
||||
}
|
||||
set(key: string, value: Blob) {
|
||||
this.state.set(key, value);
|
||||
|
||||
const list = this.state.get<Set<string>>('list') ?? new Set<string>();
|
||||
list.add(key);
|
||||
this.state.set('list', list);
|
||||
|
||||
return Promise.resolve(key);
|
||||
}
|
||||
delete(key: string) {
|
||||
this.state.set(key, null);
|
||||
|
||||
const list = this.state.get<Set<string>>('list') ?? new Set<string>();
|
||||
list.delete(key);
|
||||
this.state.set('list', list);
|
||||
|
||||
return Promise.resolve();
|
||||
}
|
||||
list() {
|
||||
const list = this.state.get<Set<string>>('list');
|
||||
return Promise.resolve(list ? Array.from(list) : []);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
export class BlobStorageOverCapacity extends Error {
|
||||
constructor(public originError?: any) {
|
||||
super('Blob storage over capacity.');
|
||||
}
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
# DocEngine
|
||||
|
||||
The synchronization algorithm for yjs docs.
|
||||
|
||||
```
|
||||
┌─────────┐ ┌───────────┐ ┌────────┐
|
||||
│ Storage ◄──┤ DocEngine ├──► Server │
|
||||
└─────────┘ └───────────┘ └────────┘
|
||||
```
|
||||
|
||||
# Core Components
|
||||
|
||||
## DocStorage
|
||||
|
||||
```ts
|
||||
export interface DocStorage {
|
||||
eventBus: DocEventBus;
|
||||
doc: ByteKV;
|
||||
syncMetadata: ByteKV;
|
||||
serverClock: ByteKV;
|
||||
}
|
||||
```
|
||||
|
||||
Represents the local storage used, Specific implementations are replaceable, such as `IndexedDBDocStorage` on the `browser` and `SqliteDocStorage` on the `desktop`.
|
||||
|
||||
### DocEventBus
|
||||
|
||||
Each `DocStorage` contains a `DocEventBus`, which is used to communicate with other engines that share the same storage.
|
||||
|
||||
With `DocEventBus` we can sync updates between engines without connecting to the server.
|
||||
|
||||
For example, on the `browser`, we have multiple tabs, all tabs share the same `IndexedDBDocStorage`, so we use `BroadcastChannel` to implement `DocEventBus`, which allows us to broadcast events to all tabs.
|
||||
|
||||
On the `desktop` app, if we have multiple Windows sharing the same `SqliteDocStorage`, we must build a mechanism to broadcast events between all Windows (currently not implemented).
|
||||
|
||||
## DocServer
|
||||
|
||||
```ts
|
||||
export interface DocServer {
|
||||
pullDoc(
|
||||
docId: string,
|
||||
stateVector: Uint8Array
|
||||
): Promise<{
|
||||
data: Uint8Array;
|
||||
serverClock: number;
|
||||
stateVector?: Uint8Array;
|
||||
} | null>;
|
||||
|
||||
pushDoc(docId: string, data: Uint8Array): Promise<{ serverClock: number }>;
|
||||
|
||||
subscribeAllDocs(cb: (updates: { docId: string; data: Uint8Array; serverClock: number }) => void): Promise<() => void>;
|
||||
|
||||
loadServerClock(after: number): Promise<Map<string, number>>;
|
||||
|
||||
waitForConnectingServer(signal: AbortSignal): Promise<void>;
|
||||
disconnectServer(): void;
|
||||
onInterrupted(cb: (reason: string) => void): void;
|
||||
}
|
||||
```
|
||||
|
||||
Represents the server we want to synchronize, there is a simulated implementation in `tests/sync.spec.ts`, and the real implementation is in `packages/backend/server`.
|
||||
|
||||
### ServerClock
|
||||
|
||||
`ServerClock` is a clock generated after each updates is stored in the Server. It is used to determine the order in which updates are stored in the Server.
|
||||
|
||||
The `DocEngine` decides whether to pull updates from the server based on the `ServerClock`.
|
||||
|
||||
The `ServerClock` written later must be **greater** than all previously. So on the client side, we can use `loadServerClock(the largest ServerClock previously received)` to obtain all changed `ServerClock`.
|
||||
|
||||
## DocEngine
|
||||
|
||||
The `DocEngine` is where all the synchronization logic actually happens.
|
||||
|
||||
Due to the complexity of the implementation, we divide it into 2 parts.
|
||||
|
||||
## DocEngine - LocalPart
|
||||
|
||||
Synchronizing **the `YDoc` instance** and **storage**.
|
||||
|
||||
The typical workflow is:
|
||||
|
||||
1. load data from storage, apply to `YDoc` instance.
|
||||
2. track `YDoc` changes
|
||||
3. write the changes back to storage.
|
||||
|
||||
### SeqNum
|
||||
|
||||
There is a `SeqNum` on each Doc data in `Storage`. Every time `LocalPart` writes data, `SeqNum` will be +1.
|
||||
|
||||
There is also a `PushedSeqNum`, which is used for RemotePart later.
|
||||
|
||||
## DocEngine - RemotePart
|
||||
|
||||
Synchronizing `Storage` and `Server`.
|
||||
|
||||
The typical workflow is:
|
||||
|
||||
1. Connect with the server, Load `ServerClocks` for all docs, Start subscribing to server-side updates.
|
||||
|
||||
2. Check whether each doc requires `push` and `pull`
|
||||
|
||||
3. Execute all push and pull
|
||||
|
||||
4. Listen for updates from `LocalPart` and push the updates to the server
|
||||
|
||||
5. Listen for server-side updates and write them to storage.
|
||||
|
||||
### PushedSeqNum
|
||||
|
||||
Each Doc will record a `PushedSeqNum`, used to determine whether the doc has unpush updates.
|
||||
|
||||
After each `push` is completed, `PushedSeqNum` + 1
|
||||
|
||||
If `PushedSeqNum` and `SeqNum` are still different after we complete the push (usually means the previous `push` failed)
|
||||
|
||||
Then do a full pull and push and set `pushedSeqNum` = `SeqNum`
|
||||
|
||||
### PulledServerClock
|
||||
|
||||
Each Doc also record `PulledServerClock`, Used to compare with ServerClock to determine whether to `pull` doc.
|
||||
|
||||
When the `pull` is completed, set `PulledServerClock` = `ServerClock` returned by the server.
|
||||
|
||||
### Retry
|
||||
|
||||
The `RemotePart` may fail at any time, and `RemotePart`'s built-in retry mechanism will restart the process in 5 seconds after failure.
|
||||
@@ -1,41 +0,0 @@
|
||||
import { describe, expect, test } from 'vitest';
|
||||
|
||||
import { PriorityQueue } from '../priority-queue';
|
||||
|
||||
describe('Priority Queue', () => {
|
||||
test('priority', () => {
|
||||
const queue = new PriorityQueue();
|
||||
|
||||
queue.push('foo', 1);
|
||||
queue.push('bar', 2);
|
||||
queue.push('baz', 0);
|
||||
|
||||
expect(queue.pop()).toBe('bar');
|
||||
expect(queue.pop()).toBe('foo');
|
||||
expect(queue.pop()).toBe('baz');
|
||||
expect(queue.pop()).toBe(null);
|
||||
|
||||
queue.push('B', 1);
|
||||
queue.push('A', 1);
|
||||
|
||||
// if priority same then follow id binary order
|
||||
expect(queue.pop()).toBe('B');
|
||||
expect(queue.pop()).toBe('A');
|
||||
expect(queue.pop()).toBe(null);
|
||||
|
||||
queue.push('A', 1);
|
||||
queue.push('B', 2);
|
||||
queue.push('A', 3); // same id but different priority, update the priority
|
||||
|
||||
expect(queue.pop()).toBe('A');
|
||||
expect(queue.pop()).toBe('B');
|
||||
expect(queue.pop()).toBe(null);
|
||||
|
||||
queue.push('A', 1);
|
||||
queue.push('B', 2);
|
||||
queue.remove('B');
|
||||
|
||||
expect(queue.pop()).toBe('A');
|
||||
expect(queue.pop()).toBe(null);
|
||||
});
|
||||
});
|
||||
@@ -1,128 +0,0 @@
|
||||
import { describe, expect, test, vitest } from 'vitest';
|
||||
import { Doc as YDoc, encodeStateAsUpdate } from 'yjs';
|
||||
|
||||
import { DocEngine } from '..';
|
||||
import { MemoryStorage } from '../storage';
|
||||
import { MiniSyncServer } from './utils';
|
||||
|
||||
describe('sync', () => {
|
||||
test('basic sync', async () => {
|
||||
const storage = new MemoryStorage();
|
||||
const server = new MiniSyncServer();
|
||||
const engine = new DocEngine(storage, server.client()).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('a', 1);
|
||||
|
||||
await engine.waitForSynced();
|
||||
expect(server.db.size).toBe(1);
|
||||
expect(storage.docDb.keys().length).toBe(1);
|
||||
});
|
||||
|
||||
test('can pull from server', async () => {
|
||||
const server = new MiniSyncServer();
|
||||
{
|
||||
const engine = new DocEngine(
|
||||
new MemoryStorage(),
|
||||
server.client()
|
||||
).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('a', 1);
|
||||
await engine.waitForSynced();
|
||||
expect(server.db.size).toBe(1);
|
||||
}
|
||||
{
|
||||
const engine = new DocEngine(
|
||||
new MemoryStorage(),
|
||||
server.client()
|
||||
).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
await engine.waitForSynced();
|
||||
expect(doc.getMap('aaa').get('a')).toBe(1);
|
||||
}
|
||||
});
|
||||
|
||||
test('2 client', async () => {
|
||||
const server = new MiniSyncServer();
|
||||
await Promise.all([
|
||||
(async () => {
|
||||
const engine = new DocEngine(
|
||||
new MemoryStorage(),
|
||||
server.client()
|
||||
).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('a', 1);
|
||||
await vitest.waitUntil(() => {
|
||||
return map.get('b') === 2;
|
||||
});
|
||||
})(),
|
||||
(async () => {
|
||||
const engine = new DocEngine(
|
||||
new MemoryStorage(),
|
||||
server.client()
|
||||
).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('b', 2);
|
||||
await vitest.waitUntil(() => {
|
||||
return map.get('a') === 1;
|
||||
});
|
||||
})(),
|
||||
]);
|
||||
});
|
||||
|
||||
test('2 client share storage and eventBus (simulate different tabs in same browser)', async () => {
|
||||
const server = new MiniSyncServer();
|
||||
const storage = new MemoryStorage();
|
||||
|
||||
await Promise.all([
|
||||
(async () => {
|
||||
const engine = new DocEngine(storage, server.client()).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('a', 1);
|
||||
await vitest.waitUntil(() => map.get('b') === 2);
|
||||
})(),
|
||||
(async () => {
|
||||
const engine = new DocEngine(storage, server.client()).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('b', 2);
|
||||
await vitest.waitUntil(() => map.get('a') === 1);
|
||||
})(),
|
||||
]);
|
||||
});
|
||||
|
||||
test('legacy data', async () => {
|
||||
const server = new MiniSyncServer();
|
||||
const storage = new MemoryStorage();
|
||||
|
||||
{
|
||||
// write legacy data to storage
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
const map = doc.getMap('aaa');
|
||||
map.set('a', 1);
|
||||
|
||||
await storage.doc.set('a', encodeStateAsUpdate(doc));
|
||||
}
|
||||
|
||||
const engine = new DocEngine(storage, server.client()).start();
|
||||
const doc = new YDoc({ guid: 'a' });
|
||||
engine.addDoc(doc);
|
||||
|
||||
// should load to ydoc and save to server
|
||||
await vitest.waitUntil(
|
||||
() => doc.getMap('aaa').get('a') === 1 && server.db.size === 1
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,108 +0,0 @@
|
||||
import { nanoid } from 'nanoid';
|
||||
import { diffUpdate, encodeStateVectorFromUpdate, mergeUpdates } from 'yjs';
|
||||
|
||||
import { AsyncLock } from '../../../utils';
|
||||
import type { DocServer } from '../server';
|
||||
import { isEmptyUpdate } from '../utils';
|
||||
|
||||
export class MiniSyncServer {
|
||||
lock = new AsyncLock();
|
||||
db = new Map<string, { data: Uint8Array; clock: number }>();
|
||||
listeners = new Set<{
|
||||
cb: (updates: {
|
||||
docId: string;
|
||||
data: Uint8Array;
|
||||
serverClock: number;
|
||||
}) => void;
|
||||
clientId: string;
|
||||
}>();
|
||||
|
||||
client() {
|
||||
return new MiniServerClient(nanoid(), this);
|
||||
}
|
||||
}
|
||||
|
||||
export class MiniServerClient implements DocServer {
|
||||
constructor(
|
||||
private readonly id: string,
|
||||
private readonly server: MiniSyncServer
|
||||
) {}
|
||||
|
||||
async pullDoc(docId: string, stateVector: Uint8Array) {
|
||||
using _lock = await this.server.lock.acquire();
|
||||
const doc = this.server.db.get(docId);
|
||||
if (!doc) {
|
||||
return null;
|
||||
}
|
||||
const data = doc.data;
|
||||
return {
|
||||
data:
|
||||
!isEmptyUpdate(data) && stateVector.length > 0
|
||||
? diffUpdate(data, stateVector)
|
||||
: data,
|
||||
serverClock: 0,
|
||||
stateVector: !isEmptyUpdate(data)
|
||||
? encodeStateVectorFromUpdate(data)
|
||||
: new Uint8Array(),
|
||||
};
|
||||
}
|
||||
|
||||
async pushDoc(
|
||||
docId: string,
|
||||
data: Uint8Array
|
||||
): Promise<{ serverClock: number }> {
|
||||
using _lock = await this.server.lock.acquire();
|
||||
const doc = this.server.db.get(docId);
|
||||
const oldData = doc?.data ?? new Uint8Array();
|
||||
const newClock = (doc?.clock ?? 0) + 1;
|
||||
this.server.db.set(docId, {
|
||||
data: !isEmptyUpdate(data)
|
||||
? !isEmptyUpdate(oldData)
|
||||
? mergeUpdates([oldData, data])
|
||||
: data
|
||||
: oldData,
|
||||
clock: newClock,
|
||||
});
|
||||
for (const { clientId, cb } of this.server.listeners) {
|
||||
if (clientId !== this.id) {
|
||||
cb({
|
||||
docId,
|
||||
data,
|
||||
serverClock: newClock,
|
||||
});
|
||||
}
|
||||
}
|
||||
return { serverClock: newClock };
|
||||
}
|
||||
|
||||
async loadServerClock(after: number): Promise<Map<string, number>> {
|
||||
using _lock = await this.server.lock.acquire();
|
||||
const map = new Map<string, number>();
|
||||
|
||||
for (const [docId, { clock }] of this.server.db) {
|
||||
if (clock > after) {
|
||||
map.set(docId, clock);
|
||||
}
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
async subscribeAllDocs(
|
||||
cb: (updates: {
|
||||
docId: string;
|
||||
data: Uint8Array;
|
||||
serverClock: number;
|
||||
}) => void
|
||||
): Promise<() => void> {
|
||||
const listener = { cb, clientId: this.id };
|
||||
this.server.listeners.add(listener);
|
||||
return () => {
|
||||
this.server.listeners.delete(listener);
|
||||
};
|
||||
}
|
||||
|
||||
async waitForConnectingServer(): Promise<void> {}
|
||||
disconnectServer(): void {}
|
||||
onInterrupted(_cb: (reason: string) => void): void {}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import { PriorityQueue } from './priority-queue';
|
||||
|
||||
export class AsyncPriorityQueue extends PriorityQueue {
|
||||
private _resolveUpdate: (() => void) | null = null;
|
||||
private _waitForUpdate: Promise<void> | null = null;
|
||||
|
||||
async asyncPop(abort?: AbortSignal): Promise<string> {
|
||||
const update = this.pop();
|
||||
if (update) {
|
||||
return update;
|
||||
} else {
|
||||
if (!this._waitForUpdate) {
|
||||
this._waitForUpdate = new Promise(resolve => {
|
||||
this._resolveUpdate = resolve;
|
||||
});
|
||||
}
|
||||
|
||||
await Promise.race([
|
||||
this._waitForUpdate,
|
||||
new Promise((_, reject) => {
|
||||
if (abort?.aborted) {
|
||||
reject(abort?.reason);
|
||||
}
|
||||
abort?.addEventListener('abort', () => {
|
||||
reject(abort.reason);
|
||||
});
|
||||
}),
|
||||
]);
|
||||
|
||||
return this.asyncPop(abort);
|
||||
}
|
||||
}
|
||||
|
||||
override push(id: string, priority: number = 0) {
|
||||
super.push(id, priority);
|
||||
if (this._resolveUpdate) {
|
||||
const resolve = this._resolveUpdate;
|
||||
this._resolveUpdate = null;
|
||||
this._waitForUpdate = null;
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
export class ClockMap {
|
||||
max: number = 0;
|
||||
constructor(private readonly map: Map<string, number>) {
|
||||
for (const value of map.values()) {
|
||||
if (value > this.max) {
|
||||
this.max = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get(id: string): number {
|
||||
return this.map.get(id) ?? 0;
|
||||
}
|
||||
|
||||
set(id: string, value: number) {
|
||||
this.map.set(id, value);
|
||||
if (value > this.max) {
|
||||
this.max = value;
|
||||
}
|
||||
}
|
||||
|
||||
setIfBigger(id: string, value: number) {
|
||||
if (value > this.get(id)) {
|
||||
this.set(id, value);
|
||||
}
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.map.clear();
|
||||
this.max = 0;
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
export type DocEvent =
|
||||
| {
|
||||
type: 'ClientUpdateCommitted';
|
||||
clientId: string;
|
||||
docId: string;
|
||||
update: Uint8Array;
|
||||
seqNum: number;
|
||||
}
|
||||
| {
|
||||
type: 'ServerUpdateCommitted';
|
||||
docId: string;
|
||||
update: Uint8Array;
|
||||
clientId: string;
|
||||
};
|
||||
|
||||
export interface DocEventBus {
|
||||
emit(event: DocEvent): void;
|
||||
on(cb: (event: DocEvent) => void): () => void;
|
||||
}
|
||||
|
||||
export class MemoryDocEventBus implements DocEventBus {
|
||||
listeners = new Set<(event: DocEvent) => void>();
|
||||
emit(event: DocEvent): void {
|
||||
for (const listener of this.listeners) {
|
||||
try {
|
||||
listener(event);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
on(cb: (event: DocEvent) => void): () => void {
|
||||
this.listeners.add(cb);
|
||||
return () => {
|
||||
this.listeners.delete(cb);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class DocEventBusInner implements DocEventBus {
|
||||
constructor(private readonly eventBusBehavior: DocEventBus) {}
|
||||
|
||||
emit(event: DocEvent) {
|
||||
this.eventBusBehavior.emit(event);
|
||||
}
|
||||
|
||||
on(cb: (event: DocEvent) => void) {
|
||||
return this.eventBusBehavior.on(cb);
|
||||
}
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
import { DebugLogger } from '@affine/debug';
|
||||
import { nanoid } from 'nanoid';
|
||||
import { map } from 'rxjs';
|
||||
import type { Doc as YDoc } from 'yjs';
|
||||
|
||||
import { LiveData } from '../../livedata';
|
||||
import { MANUALLY_STOP } from '../../utils';
|
||||
import { DocEngineLocalPart } from './local';
|
||||
import { DocEngineRemotePart } from './remote';
|
||||
import type { DocServer } from './server';
|
||||
import type { DocStorage } from './storage';
|
||||
import { DocStorageInner } from './storage';
|
||||
|
||||
const logger = new DebugLogger('doc-engine');
|
||||
|
||||
export type { DocEvent, DocEventBus } from './event';
|
||||
export { MemoryDocEventBus } from './event';
|
||||
export type { DocServer } from './server';
|
||||
export type { DocStorage } from './storage';
|
||||
export {
|
||||
MemoryStorage as MemoryDocStorage,
|
||||
ReadonlyStorage as ReadonlyDocStorage,
|
||||
} from './storage';
|
||||
|
||||
export interface DocEngineDocState {
|
||||
/**
|
||||
* is syncing with the server
|
||||
*/
|
||||
syncing: boolean;
|
||||
/**
|
||||
* is saving to local storage
|
||||
*/
|
||||
saving: boolean;
|
||||
/**
|
||||
* is loading from local storage
|
||||
*/
|
||||
loading: boolean;
|
||||
retrying: boolean;
|
||||
ready: boolean;
|
||||
errorMessage: string | null;
|
||||
serverClock: number | null;
|
||||
}
|
||||
|
||||
export class DocEngine {
|
||||
readonly clientId: string;
|
||||
localPart: DocEngineLocalPart;
|
||||
remotePart: DocEngineRemotePart | null;
|
||||
|
||||
storage: DocStorageInner;
|
||||
|
||||
engineState$ = LiveData.computed(get => {
|
||||
const localState = get(this.localPart.engineState$);
|
||||
if (this.remotePart) {
|
||||
const remoteState = get(this.remotePart?.engineState$);
|
||||
return {
|
||||
total: remoteState.total,
|
||||
syncing: remoteState.syncing,
|
||||
saving: localState.syncing,
|
||||
retrying: remoteState.retrying,
|
||||
errorMessage: remoteState.errorMessage,
|
||||
};
|
||||
}
|
||||
return {
|
||||
total: localState.total,
|
||||
syncing: localState.syncing,
|
||||
saving: localState.syncing,
|
||||
retrying: false,
|
||||
errorMessage: null,
|
||||
};
|
||||
});
|
||||
|
||||
docState$(docId: string) {
|
||||
const localState$ = this.localPart.docState$(docId);
|
||||
const remoteState$ = this.remotePart?.docState$(docId);
|
||||
return LiveData.computed<DocEngineDocState>(get => {
|
||||
const localState = get(localState$);
|
||||
const remoteState = remoteState$ ? get(remoteState$) : null;
|
||||
if (remoteState) {
|
||||
return {
|
||||
syncing: remoteState.syncing,
|
||||
saving: localState.syncing,
|
||||
loading: localState.syncing,
|
||||
retrying: remoteState.retrying,
|
||||
ready: localState.ready,
|
||||
errorMessage: remoteState.errorMessage,
|
||||
serverClock: remoteState.serverClock,
|
||||
};
|
||||
}
|
||||
return {
|
||||
syncing: localState.syncing,
|
||||
saving: localState.syncing,
|
||||
loading: localState.syncing,
|
||||
ready: localState.ready,
|
||||
retrying: false,
|
||||
errorMessage: null,
|
||||
serverClock: null,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
markAsReady(docId: string) {
|
||||
this.localPart.actions.markAsReady(docId);
|
||||
}
|
||||
|
||||
constructor(
|
||||
storage: DocStorage,
|
||||
private readonly server?: DocServer | null
|
||||
) {
|
||||
this.clientId = nanoid();
|
||||
this.storage = new DocStorageInner(storage);
|
||||
this.localPart = new DocEngineLocalPart(this.clientId, this.storage);
|
||||
this.remotePart = this.server
|
||||
? new DocEngineRemotePart(this.clientId, this.storage, this.server)
|
||||
: null;
|
||||
}
|
||||
|
||||
abort = new AbortController();
|
||||
|
||||
start() {
|
||||
this.abort.abort(MANUALLY_STOP);
|
||||
this.abort = new AbortController();
|
||||
Promise.all([
|
||||
this.localPart.mainLoop(this.abort.signal),
|
||||
this.remotePart?.mainLoop(this.abort.signal),
|
||||
]).catch(err => {
|
||||
if (err === MANUALLY_STOP) {
|
||||
return;
|
||||
}
|
||||
logger.error('Doc engine error', err);
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
stop() {
|
||||
this.abort.abort(MANUALLY_STOP);
|
||||
}
|
||||
|
||||
async resetSyncStatus() {
|
||||
this.stop();
|
||||
await this.storage.clearSyncMetadata();
|
||||
await this.storage.clearServerClock();
|
||||
}
|
||||
|
||||
addDoc(doc: YDoc, withSubDocs = true) {
|
||||
this.remotePart?.actions.addDoc(doc.guid);
|
||||
this.localPart.actions.addDoc(doc);
|
||||
|
||||
if (withSubDocs) {
|
||||
doc.on('subdocs', ({ added, loaded }) => {
|
||||
// added: the subdocs that are existing on the ydoc
|
||||
// loaded: the subdocs that have been called `ydoc.load()`
|
||||
//
|
||||
// we add all existing subdocs to remote part, let them sync between storage and server
|
||||
// but only add loaded subdocs to local part, let them sync between storage and ydoc
|
||||
// sync data to ydoc will consume more memory, so we only sync the ydoc that are necessary.
|
||||
for (const subdoc of added) {
|
||||
this.remotePart?.actions.addDoc(subdoc.guid);
|
||||
}
|
||||
for (const subdoc of loaded) {
|
||||
this.localPart.actions.addDoc(subdoc);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
setPriority(docId: string, priority: number) {
|
||||
this.localPart.setPriority(docId, priority);
|
||||
this.remotePart?.setPriority(docId, priority);
|
||||
}
|
||||
|
||||
/**
|
||||
* ## Saved:
|
||||
* YDoc changes have been saved to storage, and the browser can be safely closed without losing data.
|
||||
*/
|
||||
waitForSaved() {
|
||||
return new Promise<void>(resolve => {
|
||||
this.engineState$
|
||||
.pipe(map(state => state.saving === 0))
|
||||
.subscribe(saved => {
|
||||
if (saved) {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* ## Synced:
|
||||
* is fully synchronized with the server
|
||||
*/
|
||||
waitForSynced() {
|
||||
return new Promise<void>(resolve => {
|
||||
this.engineState$
|
||||
.pipe(map(state => state.syncing === 0 && state.saving === 0))
|
||||
.subscribe(synced => {
|
||||
if (synced) {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* ## Ready:
|
||||
*
|
||||
* means that the doc has been loaded and the data can be modified.
|
||||
* (is not force, you can still modify it if you know you are creating some new data)
|
||||
*
|
||||
* this is a temporary solution to deal with the yjs overwrite issue.
|
||||
*
|
||||
* if content is loaded from storage
|
||||
* or if content is pulled from the server, it will be true, otherwise be false.
|
||||
*
|
||||
* For example, when opening a doc that is not in storage, ready = false until the content is pulled from the server.
|
||||
*/
|
||||
waitForReady(docId: string) {
|
||||
return new Promise<void>(resolve => {
|
||||
this.docState$(docId)
|
||||
.pipe(map(state => state.ready))
|
||||
.subscribe(ready => {
|
||||
if (ready) {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.stop();
|
||||
this.server?.dispose?.();
|
||||
}
|
||||
}
|
||||
@@ -1,302 +0,0 @@
|
||||
import { DebugLogger } from '@affine/debug';
|
||||
import { Unreachable } from '@affine/env/constant';
|
||||
import { groupBy } from 'lodash-es';
|
||||
import { Observable, Subject } from 'rxjs';
|
||||
import type { Doc as YDoc } from 'yjs';
|
||||
import { applyUpdate, encodeStateAsUpdate, mergeUpdates } from 'yjs';
|
||||
|
||||
import { LiveData } from '../../livedata';
|
||||
import { throwIfAborted } from '../../utils';
|
||||
import { AsyncPriorityQueue } from './async-priority-queue';
|
||||
import type { DocEvent } from './event';
|
||||
import type { DocStorageInner } from './storage';
|
||||
import { isEmptyUpdate } from './utils';
|
||||
|
||||
type Job =
|
||||
| {
|
||||
type: 'load';
|
||||
docId: string;
|
||||
}
|
||||
| {
|
||||
type: 'save';
|
||||
docId: string;
|
||||
update: Uint8Array;
|
||||
}
|
||||
| {
|
||||
type: 'apply';
|
||||
docId: string;
|
||||
update: Uint8Array;
|
||||
isInitialize: boolean;
|
||||
};
|
||||
|
||||
const DOC_ENGINE_ORIGIN = 'doc-engine';
|
||||
|
||||
const logger = new DebugLogger('doc-engine:local');
|
||||
|
||||
export interface LocalEngineState {
|
||||
total: number;
|
||||
syncing: number;
|
||||
}
|
||||
|
||||
export interface LocalDocState {
|
||||
ready: boolean;
|
||||
loading: boolean;
|
||||
syncing: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* never fail
|
||||
*/
|
||||
export class DocEngineLocalPart {
|
||||
private readonly prioritySettings = new Map<string, number>();
|
||||
private readonly statusUpdatedSubject$ = new Subject<string>();
|
||||
|
||||
private readonly status = {
|
||||
docs: new Map<string, YDoc>(),
|
||||
connectedDocs: new Set<string>(),
|
||||
readyDocs: new Set<string>(),
|
||||
jobDocQueue: new AsyncPriorityQueue(),
|
||||
jobMap: new Map<string, Job[]>(),
|
||||
currentJob: null as { docId: string; jobs: Job[] } | null,
|
||||
};
|
||||
|
||||
engineState$ = LiveData.from<LocalEngineState>(
|
||||
new Observable(subscribe => {
|
||||
const next = () => {
|
||||
subscribe.next({
|
||||
total: this.status.docs.size,
|
||||
syncing: this.status.jobMap.size + (this.status.currentJob ? 1 : 0),
|
||||
});
|
||||
};
|
||||
next();
|
||||
return this.statusUpdatedSubject$.subscribe(() => {
|
||||
next();
|
||||
});
|
||||
}),
|
||||
{ syncing: 0, total: 0 }
|
||||
);
|
||||
|
||||
docState$(docId: string) {
|
||||
return LiveData.from<LocalDocState>(
|
||||
new Observable(subscribe => {
|
||||
const next = () => {
|
||||
subscribe.next({
|
||||
ready: this.status.readyDocs.has(docId) ?? false,
|
||||
loading: this.status.connectedDocs.has(docId),
|
||||
syncing:
|
||||
(this.status.jobMap.get(docId)?.length ?? 0) > 0 ||
|
||||
this.status.currentJob?.docId === docId,
|
||||
});
|
||||
};
|
||||
next();
|
||||
return this.statusUpdatedSubject$.subscribe(updatedId => {
|
||||
if (updatedId === docId) next();
|
||||
});
|
||||
}),
|
||||
{ ready: false, loading: false, syncing: false }
|
||||
);
|
||||
}
|
||||
|
||||
constructor(
|
||||
private readonly clientId: string,
|
||||
private readonly storage: DocStorageInner
|
||||
) {}
|
||||
|
||||
async mainLoop(signal?: AbortSignal) {
|
||||
const dispose = this.storage.eventBus.on(event => {
|
||||
const handler = this.events[event.type];
|
||||
if (handler) {
|
||||
handler(event as any);
|
||||
}
|
||||
});
|
||||
try {
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
throwIfAborted(signal);
|
||||
const docId = await this.status.jobDocQueue.asyncPop(signal);
|
||||
const jobs = this.status.jobMap.get(docId);
|
||||
this.status.jobMap.delete(docId);
|
||||
|
||||
if (!jobs) {
|
||||
continue;
|
||||
}
|
||||
|
||||
this.status.currentJob = { docId, jobs };
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
|
||||
const { apply, load, save } = groupBy(jobs, job => job.type) as {
|
||||
[key in Job['type']]?: Job[];
|
||||
};
|
||||
|
||||
if (load?.length) {
|
||||
await this.jobs.load(load[0] as any, signal);
|
||||
}
|
||||
|
||||
for (const applyJob of apply ?? []) {
|
||||
await this.jobs.apply(applyJob as any, signal);
|
||||
}
|
||||
|
||||
if (save?.length) {
|
||||
await this.jobs.save(docId, save as any, signal);
|
||||
}
|
||||
|
||||
this.status.currentJob = null;
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
}
|
||||
} finally {
|
||||
dispose();
|
||||
|
||||
for (const docs of this.status.connectedDocs) {
|
||||
const doc = this.status.docs.get(docs);
|
||||
if (doc) {
|
||||
doc.off('update', this.handleDocUpdate);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
readonly actions = {
|
||||
addDoc: (doc: YDoc) => {
|
||||
this.schedule({
|
||||
type: 'load',
|
||||
docId: doc.guid,
|
||||
});
|
||||
|
||||
this.status.docs.set(doc.guid, doc);
|
||||
this.statusUpdatedSubject$.next(doc.guid);
|
||||
},
|
||||
markAsReady: (docId: string) => {
|
||||
this.status.readyDocs.add(docId);
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
},
|
||||
};
|
||||
|
||||
readonly jobs = {
|
||||
load: async (job: Job & { type: 'load' }, signal?: AbortSignal) => {
|
||||
const doc = this.status.docs.get(job.docId);
|
||||
if (!doc) {
|
||||
throw new Unreachable('doc not found');
|
||||
}
|
||||
const existingData = encodeStateAsUpdate(doc);
|
||||
|
||||
if (!isEmptyUpdate(existingData)) {
|
||||
this.schedule({
|
||||
type: 'save',
|
||||
docId: doc.guid,
|
||||
update: existingData,
|
||||
});
|
||||
}
|
||||
|
||||
// mark doc as loaded
|
||||
doc.emit('sync', [true, doc]);
|
||||
doc.on('update', this.handleDocUpdate);
|
||||
|
||||
this.status.connectedDocs.add(job.docId);
|
||||
this.statusUpdatedSubject$.next(job.docId);
|
||||
|
||||
const docData = await this.storage.loadDocFromLocal(job.docId, signal);
|
||||
|
||||
if (!docData || isEmptyUpdate(docData)) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.applyUpdate(job.docId, docData);
|
||||
this.status.readyDocs.add(job.docId);
|
||||
this.statusUpdatedSubject$.next(job.docId);
|
||||
},
|
||||
save: async (
|
||||
docId: string,
|
||||
jobs: (Job & { type: 'save' })[],
|
||||
signal?: AbortSignal
|
||||
) => {
|
||||
if (this.status.connectedDocs.has(docId)) {
|
||||
const merged = mergeUpdates(
|
||||
jobs.map(j => j.update).filter(update => !isEmptyUpdate(update))
|
||||
);
|
||||
const newSeqNum = await this.storage.commitDocAsClientUpdate(
|
||||
docId,
|
||||
merged,
|
||||
signal
|
||||
);
|
||||
this.storage.eventBus.emit({
|
||||
type: 'ClientUpdateCommitted',
|
||||
seqNum: newSeqNum,
|
||||
docId: docId,
|
||||
clientId: this.clientId,
|
||||
update: merged,
|
||||
});
|
||||
}
|
||||
},
|
||||
apply: async (job: Job & { type: 'apply' }, signal?: AbortSignal) => {
|
||||
throwIfAborted(signal);
|
||||
if (this.status.connectedDocs.has(job.docId)) {
|
||||
this.applyUpdate(job.docId, job.update);
|
||||
}
|
||||
if (job.isInitialize && !isEmptyUpdate(job.update)) {
|
||||
this.status.readyDocs.add(job.docId);
|
||||
this.statusUpdatedSubject$.next(job.docId);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
readonly events: {
|
||||
[key in DocEvent['type']]?: (event: DocEvent & { type: key }) => void;
|
||||
} = {
|
||||
ServerUpdateCommitted: ({ docId, update, clientId }) => {
|
||||
this.schedule({
|
||||
type: 'apply',
|
||||
docId,
|
||||
update,
|
||||
isInitialize: clientId === this.clientId,
|
||||
});
|
||||
},
|
||||
ClientUpdateCommitted: ({ docId, update, clientId }) => {
|
||||
if (clientId !== this.clientId) {
|
||||
this.schedule({
|
||||
type: 'apply',
|
||||
docId,
|
||||
update,
|
||||
isInitialize: false,
|
||||
});
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
handleDocUpdate = (update: Uint8Array, origin: any, doc: YDoc) => {
|
||||
if (origin === DOC_ENGINE_ORIGIN) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.schedule({
|
||||
type: 'save',
|
||||
docId: doc.guid,
|
||||
update,
|
||||
});
|
||||
};
|
||||
|
||||
applyUpdate(docId: string, update: Uint8Array) {
|
||||
const doc = this.status.docs.get(docId);
|
||||
if (doc && !isEmptyUpdate(update)) {
|
||||
try {
|
||||
applyUpdate(doc, update, DOC_ENGINE_ORIGIN);
|
||||
} catch (err) {
|
||||
logger.error('failed to apply update yjs doc', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
schedule(job: Job) {
|
||||
const priority = this.prioritySettings.get(job.docId) ?? 0;
|
||||
this.status.jobDocQueue.push(job.docId, priority);
|
||||
|
||||
const existingJobs = this.status.jobMap.get(job.docId) ?? [];
|
||||
existingJobs.push(job);
|
||||
this.status.jobMap.set(job.docId, existingJobs);
|
||||
this.statusUpdatedSubject$.next(job.docId);
|
||||
}
|
||||
|
||||
setPriority(docId: string, priority: number) {
|
||||
this.prioritySettings.set(docId, priority);
|
||||
this.status.jobDocQueue.updatePriority(docId, priority);
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
AFFiNE currently has a lot of data stored using the old ID format. Here, we record the usage of IDs to avoid forgetting.
|
||||
|
||||
## Old ID Format
|
||||
|
||||
The format is:
|
||||
|
||||
- `{workspace-id}:space:{nanoid}` Common
|
||||
- `{workspace-id}:space:page:{nanoid}`
|
||||
|
||||
> Note: sometimes the `workspace-id` is not same with current workspace id.
|
||||
|
||||
## Usage
|
||||
|
||||
- Local Storage
|
||||
- indexeddb: Both new and old IDs coexist
|
||||
- sqlite: Both new and old IDs coexist
|
||||
- server-clock: Only new IDs are stored
|
||||
- sync-metadata: Both new and old IDs coexist
|
||||
- Server Storage
|
||||
- Only stores new IDs but accepts writes using old IDs
|
||||
- Protocols
|
||||
- When the client submits an update, both new and old IDs are used.
|
||||
- When the server broadcasts updates sent by other clients, both new and old IDs are used.
|
||||
- When the server responds to `client-pre-sync` (listing all updated docids), only new IDs are used.
|
||||
@@ -1,69 +0,0 @@
|
||||
import { BinarySearchTree } from '@datastructures-js/binary-search-tree';
|
||||
|
||||
export class PriorityQueue {
|
||||
tree = new BinarySearchTree<{ id: string; priority: number }>((a, b) => {
|
||||
return a.priority === b.priority
|
||||
? a.id === b.id
|
||||
? 0
|
||||
: a.id > b.id
|
||||
? 1
|
||||
: -1
|
||||
: a.priority - b.priority;
|
||||
});
|
||||
priorityMap = new Map<string, number>();
|
||||
|
||||
push(id: string, priority: number = 0) {
|
||||
const oldPriority = this.priorityMap.get(id);
|
||||
if (oldPriority === priority) {
|
||||
return;
|
||||
}
|
||||
if (oldPriority !== undefined) {
|
||||
this.remove(id);
|
||||
}
|
||||
this.tree.insert({ id, priority });
|
||||
this.priorityMap.set(id, priority);
|
||||
}
|
||||
|
||||
pop() {
|
||||
const node = this.tree.max();
|
||||
|
||||
if (!node) {
|
||||
return null;
|
||||
}
|
||||
|
||||
this.tree.removeNode(node);
|
||||
|
||||
const { id } = node.getValue();
|
||||
this.priorityMap.delete(id);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
remove(id: string, priority?: number) {
|
||||
priority ??= this.priorityMap.get(id);
|
||||
if (priority === undefined) {
|
||||
return false;
|
||||
}
|
||||
const removed = this.tree.remove({ id, priority });
|
||||
if (removed) {
|
||||
this.priorityMap.delete(id);
|
||||
}
|
||||
|
||||
return removed;
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.tree.clear();
|
||||
this.priorityMap.clear();
|
||||
}
|
||||
|
||||
updatePriority(id: string, priority: number) {
|
||||
if (this.remove(id)) {
|
||||
this.push(id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
get length() {
|
||||
return this.tree.count;
|
||||
}
|
||||
}
|
||||
@@ -1,611 +0,0 @@
|
||||
import { DebugLogger } from '@affine/debug';
|
||||
import { remove } from 'lodash-es';
|
||||
import { Observable, Subject } from 'rxjs';
|
||||
import { diffUpdate, encodeStateVectorFromUpdate, mergeUpdates } from 'yjs';
|
||||
|
||||
import { LiveData } from '../../livedata';
|
||||
import { throwIfAborted } from '../../utils';
|
||||
import { AsyncPriorityQueue } from './async-priority-queue';
|
||||
import { ClockMap } from './clock';
|
||||
import type { DocEvent } from './event';
|
||||
import type { DocServer } from './server';
|
||||
import type { DocStorageInner } from './storage';
|
||||
import { isEmptyUpdate } from './utils';
|
||||
|
||||
const logger = new DebugLogger('doc-engine:remote');
|
||||
|
||||
type Job =
|
||||
| {
|
||||
type: 'connect';
|
||||
docId: string;
|
||||
}
|
||||
| {
|
||||
type: 'push';
|
||||
docId: string;
|
||||
update: Uint8Array;
|
||||
seqNum: number;
|
||||
}
|
||||
| {
|
||||
type: 'pull';
|
||||
docId: string;
|
||||
}
|
||||
| {
|
||||
type: 'pullAndPush';
|
||||
docId: string;
|
||||
}
|
||||
| {
|
||||
type: 'save';
|
||||
docId: string;
|
||||
update?: Uint8Array;
|
||||
serverClock: number;
|
||||
};
|
||||
|
||||
export interface Status {
|
||||
docs: Set<string>;
|
||||
connectedDocs: Set<string>;
|
||||
jobDocQueue: AsyncPriorityQueue;
|
||||
jobMap: Map<string, Job[]>;
|
||||
serverClocks: ClockMap;
|
||||
syncing: boolean;
|
||||
retrying: boolean;
|
||||
errorMessage: string | null;
|
||||
}
|
||||
|
||||
export interface RemoteEngineState {
|
||||
total: number;
|
||||
syncing: number;
|
||||
retrying: boolean;
|
||||
errorMessage: string | null;
|
||||
}
|
||||
|
||||
export interface RemoteDocState {
|
||||
syncing: boolean;
|
||||
retrying: boolean;
|
||||
serverClock: number | null;
|
||||
errorMessage: string | null;
|
||||
}
|
||||
|
||||
export class DocEngineRemotePart {
|
||||
private readonly prioritySettings = new Map<string, number>();
|
||||
|
||||
constructor(
|
||||
private readonly clientId: string,
|
||||
private readonly storage: DocStorageInner,
|
||||
private readonly server: DocServer
|
||||
) {}
|
||||
|
||||
private status: Status = {
|
||||
docs: new Set<string>(),
|
||||
connectedDocs: new Set<string>(),
|
||||
jobDocQueue: new AsyncPriorityQueue(),
|
||||
jobMap: new Map(),
|
||||
serverClocks: new ClockMap(new Map()),
|
||||
syncing: false,
|
||||
retrying: false,
|
||||
errorMessage: null,
|
||||
};
|
||||
private readonly statusUpdatedSubject$ = new Subject<string | true>();
|
||||
|
||||
engineState$ = LiveData.from<RemoteEngineState>(
|
||||
new Observable(subscribe => {
|
||||
const next = () => {
|
||||
if (!this.status.syncing) {
|
||||
// if syncing = false, jobMap is empty
|
||||
subscribe.next({
|
||||
total: this.status.docs.size,
|
||||
syncing: this.status.docs.size,
|
||||
retrying: this.status.retrying,
|
||||
errorMessage: this.status.errorMessage,
|
||||
});
|
||||
} else {
|
||||
const syncing = this.status.jobMap.size;
|
||||
subscribe.next({
|
||||
total: this.status.docs.size,
|
||||
syncing: syncing,
|
||||
retrying: this.status.retrying,
|
||||
errorMessage: this.status.errorMessage,
|
||||
});
|
||||
}
|
||||
};
|
||||
next();
|
||||
return this.statusUpdatedSubject$.subscribe(() => {
|
||||
next();
|
||||
});
|
||||
}),
|
||||
{
|
||||
syncing: 0,
|
||||
total: 0,
|
||||
retrying: false,
|
||||
errorMessage: null,
|
||||
}
|
||||
);
|
||||
|
||||
docState$(docId: string) {
|
||||
return LiveData.from<RemoteDocState>(
|
||||
new Observable(subscribe => {
|
||||
const next = () => {
|
||||
subscribe.next({
|
||||
syncing:
|
||||
!this.status.connectedDocs.has(docId) ||
|
||||
this.status.jobMap.has(docId),
|
||||
serverClock: this.status.serverClocks.get(docId),
|
||||
retrying: this.status.retrying,
|
||||
errorMessage: this.status.errorMessage,
|
||||
});
|
||||
};
|
||||
next();
|
||||
return this.statusUpdatedSubject$.subscribe(updatedId => {
|
||||
if (updatedId === true || updatedId === docId) next();
|
||||
});
|
||||
}),
|
||||
{ syncing: false, retrying: false, errorMessage: null, serverClock: null }
|
||||
);
|
||||
}
|
||||
|
||||
readonly jobs = {
|
||||
connect: async (docId: string, signal?: AbortSignal) => {
|
||||
const pushedSeqNum = await this.storage.loadDocSeqNumPushed(
|
||||
docId,
|
||||
signal
|
||||
);
|
||||
const seqNum = await this.storage.loadDocSeqNum(docId, signal);
|
||||
|
||||
if (pushedSeqNum === null || pushedSeqNum !== seqNum) {
|
||||
await this.jobs.pullAndPush(docId, signal);
|
||||
} else {
|
||||
const pulled = await this.storage.loadDocServerClockPulled(docId);
|
||||
if (
|
||||
pulled === null ||
|
||||
pulled !== this.status.serverClocks.get(normalizeServerDocId(docId))
|
||||
) {
|
||||
await this.jobs.pull(docId, signal);
|
||||
}
|
||||
}
|
||||
|
||||
this.status.connectedDocs.add(docId);
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
},
|
||||
push: async (
|
||||
docId: string,
|
||||
jobs: (Job & { type: 'push' })[],
|
||||
signal?: AbortSignal
|
||||
) => {
|
||||
if (this.status.connectedDocs.has(docId)) {
|
||||
const maxSeqNum = Math.max(...jobs.map(j => j.seqNum));
|
||||
const pushedSeqNum =
|
||||
(await this.storage.loadDocSeqNumPushed(docId, signal)) ?? 0;
|
||||
|
||||
if (maxSeqNum - pushedSeqNum === jobs.length) {
|
||||
const merged = mergeUpdates(
|
||||
jobs.map(j => j.update).filter(update => !isEmptyUpdate(update))
|
||||
);
|
||||
if (!isEmptyUpdate(merged)) {
|
||||
const { serverClock } = await this.server.pushDoc(docId, merged);
|
||||
this.schedule({
|
||||
type: 'save',
|
||||
docId,
|
||||
serverClock,
|
||||
});
|
||||
}
|
||||
await this.storage.saveDocPushedSeqNum(
|
||||
docId,
|
||||
{ add: jobs.length },
|
||||
signal
|
||||
);
|
||||
} else {
|
||||
// maybe other tab is modifying the doc, do full pull and push for safety
|
||||
await this.jobs.pullAndPush(docId, signal);
|
||||
}
|
||||
}
|
||||
},
|
||||
pullAndPush: async (docId: string, signal?: AbortSignal) => {
|
||||
const seqNum = await this.storage.loadDocSeqNum(docId, signal);
|
||||
const data = await this.storage.loadDocFromLocal(docId, signal);
|
||||
|
||||
const stateVector =
|
||||
data && !isEmptyUpdate(data)
|
||||
? encodeStateVectorFromUpdate(data)
|
||||
: new Uint8Array();
|
||||
const serverData = await this.server.pullDoc(docId, stateVector);
|
||||
|
||||
if (serverData) {
|
||||
const {
|
||||
data: newData,
|
||||
stateVector: serverStateVector,
|
||||
serverClock,
|
||||
} = serverData;
|
||||
await this.storage.saveServerClock(
|
||||
new Map([[normalizeServerDocId(docId), serverClock]]),
|
||||
signal
|
||||
);
|
||||
this.actions.updateServerClock(
|
||||
normalizeServerDocId(docId),
|
||||
serverClock
|
||||
);
|
||||
await this.storage.commitDocAsServerUpdate(
|
||||
docId,
|
||||
newData,
|
||||
serverClock,
|
||||
signal
|
||||
);
|
||||
this.storage.eventBus.emit({
|
||||
type: 'ServerUpdateCommitted',
|
||||
docId,
|
||||
clientId: this.clientId,
|
||||
update: newData,
|
||||
});
|
||||
const diff =
|
||||
data && serverStateVector && serverStateVector.length > 0
|
||||
? diffUpdate(data, serverStateVector)
|
||||
: data;
|
||||
if (diff && !isEmptyUpdate(diff)) {
|
||||
const { serverClock } = await this.server.pushDoc(docId, diff);
|
||||
this.schedule({
|
||||
type: 'save',
|
||||
docId,
|
||||
serverClock,
|
||||
});
|
||||
}
|
||||
await this.storage.saveDocPushedSeqNum(docId, seqNum, signal);
|
||||
} else {
|
||||
if (data && !isEmptyUpdate(data)) {
|
||||
const { serverClock } = await this.server.pushDoc(docId, data);
|
||||
await this.storage.saveDocServerClockPulled(
|
||||
docId,
|
||||
serverClock,
|
||||
signal
|
||||
);
|
||||
await this.storage.saveServerClock(
|
||||
new Map([[normalizeServerDocId(docId), serverClock]]),
|
||||
signal
|
||||
);
|
||||
this.actions.updateServerClock(
|
||||
normalizeServerDocId(docId),
|
||||
serverClock
|
||||
);
|
||||
}
|
||||
await this.storage.saveDocPushedSeqNum(docId, seqNum, signal);
|
||||
}
|
||||
},
|
||||
pull: async (docId: string, signal?: AbortSignal) => {
|
||||
const data = await this.storage.loadDocFromLocal(docId, signal);
|
||||
|
||||
const stateVector =
|
||||
data && !isEmptyUpdate(data)
|
||||
? encodeStateVectorFromUpdate(data)
|
||||
: new Uint8Array();
|
||||
const serverDoc = await this.server.pullDoc(docId, stateVector);
|
||||
if (!serverDoc) {
|
||||
return;
|
||||
}
|
||||
const { data: newData, serverClock } = serverDoc;
|
||||
await this.storage.commitDocAsServerUpdate(
|
||||
docId,
|
||||
newData,
|
||||
serverClock,
|
||||
signal
|
||||
);
|
||||
this.storage.eventBus.emit({
|
||||
type: 'ServerUpdateCommitted',
|
||||
docId,
|
||||
clientId: this.clientId,
|
||||
update: newData,
|
||||
});
|
||||
await this.storage.saveServerClock(
|
||||
new Map([[normalizeServerDocId(docId), serverClock]]),
|
||||
signal
|
||||
);
|
||||
this.actions.updateServerClock(normalizeServerDocId(docId), serverClock);
|
||||
},
|
||||
save: async (
|
||||
docId: string,
|
||||
jobs: (Job & { type: 'save' })[],
|
||||
signal?: AbortSignal
|
||||
) => {
|
||||
const serverClock = jobs.reduce((a, b) => Math.max(a, b.serverClock), 0);
|
||||
await this.storage.saveServerClock(
|
||||
new Map([[normalizeServerDocId(docId), serverClock]]),
|
||||
signal
|
||||
);
|
||||
this.actions.updateServerClock(normalizeServerDocId(docId), serverClock);
|
||||
if (this.status.connectedDocs.has(docId)) {
|
||||
const data = jobs
|
||||
.map(j => j.update)
|
||||
.filter((update): update is Uint8Array =>
|
||||
update ? !isEmptyUpdate(update) : false
|
||||
);
|
||||
const update = data.length > 0 ? mergeUpdates(data) : new Uint8Array();
|
||||
await this.storage.commitDocAsServerUpdate(
|
||||
docId,
|
||||
update,
|
||||
serverClock,
|
||||
signal
|
||||
);
|
||||
this.storage.eventBus.emit({
|
||||
type: 'ServerUpdateCommitted',
|
||||
docId,
|
||||
clientId: this.clientId,
|
||||
update,
|
||||
});
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
readonly actions = {
|
||||
updateServerClock: (docId: string, serverClock: number) => {
|
||||
this.status.serverClocks.setIfBigger(docId, serverClock);
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
},
|
||||
addDoc: (docId: string) => {
|
||||
if (!this.status.docs.has(docId)) {
|
||||
this.status.docs.add(docId);
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
this.schedule({
|
||||
type: 'connect',
|
||||
docId,
|
||||
});
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
readonly events: {
|
||||
[key in DocEvent['type']]?: (event: DocEvent & { type: key }) => void;
|
||||
} = {
|
||||
ClientUpdateCommitted: ({ clientId, docId, seqNum, update }) => {
|
||||
if (clientId !== this.clientId) {
|
||||
return;
|
||||
}
|
||||
this.schedule({
|
||||
type: 'push',
|
||||
docId,
|
||||
update,
|
||||
seqNum,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
async mainLoop(signal?: AbortSignal) {
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
try {
|
||||
await this.retryLoop(signal);
|
||||
} catch (err) {
|
||||
if (signal?.aborted) {
|
||||
return;
|
||||
}
|
||||
logger.error('Remote sync error, retry in 5s', err);
|
||||
this.status.errorMessage =
|
||||
err instanceof Error ? err.message : `${err}`;
|
||||
this.statusUpdatedSubject$.next(true);
|
||||
} finally {
|
||||
this.status = {
|
||||
docs: this.status.docs,
|
||||
connectedDocs: new Set<string>(),
|
||||
jobDocQueue: new AsyncPriorityQueue(),
|
||||
jobMap: new Map(),
|
||||
serverClocks: new ClockMap(new Map()),
|
||||
syncing: false,
|
||||
retrying: true,
|
||||
errorMessage: this.status.errorMessage,
|
||||
};
|
||||
this.statusUpdatedSubject$.next(true);
|
||||
}
|
||||
await Promise.race([
|
||||
new Promise<void>(resolve => {
|
||||
setTimeout(resolve, 5 * 1000);
|
||||
}),
|
||||
new Promise((_, reject) => {
|
||||
// exit if manually stopped
|
||||
if (signal?.aborted) {
|
||||
reject(signal.reason);
|
||||
}
|
||||
signal?.addEventListener('abort', () => {
|
||||
reject(signal.reason);
|
||||
});
|
||||
}),
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
async retryLoop(signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
const abort = new AbortController();
|
||||
|
||||
signal?.addEventListener('abort', reason => {
|
||||
abort.abort(reason);
|
||||
});
|
||||
|
||||
signal = abort.signal;
|
||||
|
||||
const disposes: (() => void)[] = [];
|
||||
|
||||
try {
|
||||
disposes.push(
|
||||
this.storage.eventBus.on(event => {
|
||||
const handler = this.events[event.type];
|
||||
handler?.(event as any);
|
||||
})
|
||||
);
|
||||
throwIfAborted(signal);
|
||||
|
||||
for (const doc of this.status.docs) {
|
||||
this.schedule({
|
||||
type: 'connect',
|
||||
docId: doc,
|
||||
});
|
||||
}
|
||||
|
||||
logger.info('Remote sync started');
|
||||
this.status.syncing = true;
|
||||
this.statusUpdatedSubject$.next(true);
|
||||
|
||||
this.server.onInterrupted(reason => {
|
||||
abort.abort(reason);
|
||||
});
|
||||
await Promise.race([
|
||||
this.server.waitForConnectingServer(signal),
|
||||
new Promise<void>((_, reject) => {
|
||||
setTimeout(() => {
|
||||
reject(new Error('Connect to server timeout'));
|
||||
}, 1000 * 30);
|
||||
}),
|
||||
new Promise((_, reject) => {
|
||||
signal?.addEventListener('abort', reason => {
|
||||
reject(reason);
|
||||
});
|
||||
}),
|
||||
]);
|
||||
|
||||
// reset retrying flag after connected with server
|
||||
this.status.retrying = false;
|
||||
this.statusUpdatedSubject$.next(true);
|
||||
|
||||
throwIfAborted(signal);
|
||||
disposes.push(
|
||||
await this.server.subscribeAllDocs(({ docId, data, serverClock }) => {
|
||||
this.schedule({
|
||||
type: 'save',
|
||||
docId: docId,
|
||||
serverClock,
|
||||
update: data,
|
||||
});
|
||||
})
|
||||
);
|
||||
const cachedClocks = await this.storage.loadServerClock(signal);
|
||||
for (const [id, v] of cachedClocks) {
|
||||
this.actions.updateServerClock(id, v);
|
||||
}
|
||||
const maxClockValue = this.status.serverClocks.max;
|
||||
const newClocks = await this.server.loadServerClock(maxClockValue);
|
||||
for (const [id, v] of newClocks) {
|
||||
this.actions.updateServerClock(id, v);
|
||||
}
|
||||
await this.storage.saveServerClock(newClocks, signal);
|
||||
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
throwIfAborted(signal);
|
||||
|
||||
const docId = await this.status.jobDocQueue.asyncPop(signal);
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const jobs = this.status.jobMap.get(docId);
|
||||
if (!jobs || jobs.length === 0) {
|
||||
this.status.jobMap.delete(docId);
|
||||
this.statusUpdatedSubject$.next(docId);
|
||||
break;
|
||||
}
|
||||
|
||||
const connect = remove(jobs, j => j.type === 'connect');
|
||||
if (connect && connect.length > 0) {
|
||||
await this.jobs.connect(docId, signal);
|
||||
continue;
|
||||
}
|
||||
|
||||
const pullAndPush = remove(jobs, j => j.type === 'pullAndPush');
|
||||
if (pullAndPush && pullAndPush.length > 0) {
|
||||
await this.jobs.pullAndPush(docId, signal);
|
||||
continue;
|
||||
}
|
||||
|
||||
const pull = remove(jobs, j => j.type === 'pull');
|
||||
if (pull && pull.length > 0) {
|
||||
await this.jobs.pull(docId, signal);
|
||||
continue;
|
||||
}
|
||||
|
||||
const push = remove(jobs, j => j.type === 'push');
|
||||
if (push && push.length > 0) {
|
||||
await this.jobs.push(
|
||||
docId,
|
||||
push as (Job & { type: 'push' })[],
|
||||
signal
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const save = remove(jobs, j => j.type === 'save');
|
||||
if (save && save.length > 0) {
|
||||
await this.jobs.save(
|
||||
docId,
|
||||
save as (Job & { type: 'save' })[],
|
||||
signal
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
for (const dispose of disposes) {
|
||||
dispose();
|
||||
}
|
||||
try {
|
||||
this.server.disconnectServer();
|
||||
} catch (err) {
|
||||
logger.error('Error on disconnect server', err);
|
||||
}
|
||||
this.status.syncing = false;
|
||||
logger.info('Remote sync ended');
|
||||
}
|
||||
}
|
||||
|
||||
schedule(job: Job) {
|
||||
const priority = this.prioritySettings.get(job.docId) ?? 0;
|
||||
this.status.jobDocQueue.push(job.docId, priority);
|
||||
|
||||
const existingJobs = this.status.jobMap.get(job.docId) ?? [];
|
||||
existingJobs.push(job);
|
||||
this.status.jobMap.set(job.docId, existingJobs);
|
||||
this.statusUpdatedSubject$.next(job.docId);
|
||||
}
|
||||
|
||||
setPriority(docId: string, priority: number) {
|
||||
this.prioritySettings.set(docId, priority);
|
||||
this.status.jobDocQueue.updatePriority(docId, priority);
|
||||
}
|
||||
}
|
||||
|
||||
// use normalized id in server clock
|
||||
function normalizeServerDocId(raw: string) {
|
||||
enum DocVariant {
|
||||
Workspace = 'workspace',
|
||||
Page = 'page',
|
||||
Space = 'space',
|
||||
Settings = 'settings',
|
||||
Unknown = 'unknown',
|
||||
}
|
||||
|
||||
try {
|
||||
if (!raw.length) {
|
||||
throw new Error('Invalid Empty Doc ID');
|
||||
}
|
||||
|
||||
let parts = raw.split(':');
|
||||
|
||||
if (parts.length > 3) {
|
||||
// special adapt case `wsId:space:page:pageId`
|
||||
if (parts[1] === DocVariant.Space && parts[2] === DocVariant.Page) {
|
||||
parts = [parts[0], DocVariant.Space, parts[3]];
|
||||
} else {
|
||||
throw new Error(`Invalid format of Doc ID: ${raw}`);
|
||||
}
|
||||
} else if (parts.length === 2) {
|
||||
// `${variant}:${guid}`
|
||||
throw new Error('not supported');
|
||||
} else if (parts.length === 1) {
|
||||
// ${ws} or ${pageId}
|
||||
parts = ['', DocVariant.Unknown, parts[0]];
|
||||
}
|
||||
|
||||
const docId = parts.at(2);
|
||||
|
||||
if (!docId) {
|
||||
throw new Error('ID is required');
|
||||
}
|
||||
|
||||
return docId;
|
||||
} catch (err) {
|
||||
logger.error('Error on normalize docId ' + raw, err);
|
||||
return raw;
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
export interface DocServer {
|
||||
pullDoc(
|
||||
docId: string,
|
||||
stateVector: Uint8Array
|
||||
): Promise<{
|
||||
data: Uint8Array;
|
||||
serverClock: number;
|
||||
stateVector?: Uint8Array;
|
||||
} | null>;
|
||||
|
||||
pushDoc(docId: string, data: Uint8Array): Promise<{ serverClock: number }>;
|
||||
|
||||
loadServerClock(after: number): Promise<Map<string, number>>;
|
||||
|
||||
subscribeAllDocs(
|
||||
cb: (updates: {
|
||||
docId: string;
|
||||
data: Uint8Array;
|
||||
serverClock: number;
|
||||
}) => void
|
||||
): Promise<() => void>;
|
||||
|
||||
waitForConnectingServer(signal: AbortSignal): Promise<void>;
|
||||
disconnectServer(): void;
|
||||
onInterrupted(cb: (reason: string) => void): void;
|
||||
|
||||
dispose?(): void;
|
||||
}
|
||||
@@ -1,361 +0,0 @@
|
||||
import { mergeUpdates } from 'yjs';
|
||||
|
||||
import type { ByteKV, Memento } from '../../storage';
|
||||
import { MemoryMemento, ReadonlyByteKV, wrapMemento } from '../../storage';
|
||||
import { AsyncLock, throwIfAborted } from '../../utils';
|
||||
import type { DocEventBus } from '.';
|
||||
import { DocEventBusInner, MemoryDocEventBus } from './event';
|
||||
import { isEmptyUpdate } from './utils';
|
||||
|
||||
export interface DocStorage {
|
||||
eventBus: DocEventBus;
|
||||
doc: ByteKV;
|
||||
syncMetadata: ByteKV;
|
||||
serverClock: ByteKV;
|
||||
}
|
||||
|
||||
const Keys = {
|
||||
SeqNum: (docId: string) => `${docId}:seqNum`,
|
||||
SeqNumPushed: (docId: string) => `${docId}:seqNumPushed`,
|
||||
ServerClockPulled: (docId: string) => `${docId}:serverClockPulled`,
|
||||
UpdatedTime: (docId: string) => `${docId}:updateTime`,
|
||||
};
|
||||
|
||||
const Values = {
|
||||
UInt64: {
|
||||
parse: (buffer: Uint8Array) => {
|
||||
const view = new DataView(buffer.buffer);
|
||||
return Number(view.getBigUint64(0, false));
|
||||
},
|
||||
serialize: (value: number) => {
|
||||
const buffer = new ArrayBuffer(8);
|
||||
const view = new DataView(buffer);
|
||||
view.setBigUint64(0, BigInt(value), false);
|
||||
return new Uint8Array(buffer);
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export class DocStorageInner {
|
||||
public readonly eventBus = new DocEventBusInner(this.behavior.eventBus);
|
||||
constructor(public readonly behavior: DocStorage) {}
|
||||
|
||||
async loadServerClock(signal?: AbortSignal): Promise<Map<string, number>> {
|
||||
throwIfAborted(signal);
|
||||
const list = await this.behavior.serverClock.keys();
|
||||
|
||||
const map = new Map<string, number>();
|
||||
for (const key of list) {
|
||||
const docId = key;
|
||||
const value = await this.behavior.serverClock.get(key);
|
||||
if (value) {
|
||||
map.set(docId, Values.UInt64.parse(value));
|
||||
}
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
async saveServerClock(map: Map<string, number>, signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
await this.behavior.serverClock.transaction(async transaction => {
|
||||
for (const [docId, value] of map) {
|
||||
const key = docId;
|
||||
const oldBuffer = await transaction.get(key);
|
||||
const old = oldBuffer ? Values.UInt64.parse(oldBuffer) : 0;
|
||||
if (old < value) {
|
||||
await transaction.set(key, Values.UInt64.serialize(value));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async loadDocSeqNum(docId: string, signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
const bytes = await this.behavior.syncMetadata.get(Keys.SeqNum(docId));
|
||||
if (bytes === null) {
|
||||
return 0;
|
||||
}
|
||||
return Values.UInt64.parse(bytes);
|
||||
}
|
||||
|
||||
async saveDocSeqNum(
|
||||
docId: string,
|
||||
seqNum: number | true,
|
||||
signal?: AbortSignal
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
return await this.behavior.syncMetadata.transaction(async transaction => {
|
||||
const key = Keys.SeqNum(docId);
|
||||
const oldBytes = await transaction.get(key);
|
||||
const old = oldBytes ? Values.UInt64.parse(oldBytes) : 0;
|
||||
if (seqNum === true) {
|
||||
await transaction.set(key, Values.UInt64.serialize(old + 1));
|
||||
return old + 1;
|
||||
}
|
||||
if (old < seqNum) {
|
||||
await transaction.set(key, Values.UInt64.serialize(seqNum));
|
||||
return seqNum;
|
||||
}
|
||||
return old;
|
||||
});
|
||||
}
|
||||
|
||||
async loadDocSeqNumPushed(docId: string, signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
const bytes = await this.behavior.syncMetadata.get(
|
||||
Keys.SeqNumPushed(docId)
|
||||
);
|
||||
if (bytes === null) {
|
||||
return null;
|
||||
}
|
||||
return Values.UInt64.parse(bytes);
|
||||
}
|
||||
|
||||
async saveDocPushedSeqNum(
|
||||
docId: string,
|
||||
seqNum: number | { add: number },
|
||||
signal?: AbortSignal
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
await this.behavior.syncMetadata.transaction(async transaction => {
|
||||
const key = Keys.SeqNumPushed(docId);
|
||||
const oldBytes = await transaction.get(key);
|
||||
const old = oldBytes ? Values.UInt64.parse(oldBytes) : null;
|
||||
if (typeof seqNum === 'object') {
|
||||
return transaction.set(
|
||||
key,
|
||||
Values.UInt64.serialize((old ?? 0) + seqNum.add)
|
||||
);
|
||||
}
|
||||
if (old === null || old < seqNum) {
|
||||
return transaction.set(key, Values.UInt64.serialize(seqNum));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async loadDocServerClockPulled(docId: string, signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
const bytes = await this.behavior.syncMetadata.get(
|
||||
Keys.ServerClockPulled(docId)
|
||||
);
|
||||
if (bytes === null) {
|
||||
return null;
|
||||
}
|
||||
return bytes ? Values.UInt64.parse(bytes) : 0;
|
||||
}
|
||||
|
||||
async saveDocServerClockPulled(
|
||||
docId: string,
|
||||
serverClock: number,
|
||||
signal?: AbortSignal
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
await this.behavior.syncMetadata.transaction(async transaction => {
|
||||
const oldBytes = await transaction.get(Keys.ServerClockPulled(docId));
|
||||
const old = oldBytes ? Values.UInt64.parse(oldBytes) : null;
|
||||
if (old === null || old < serverClock) {
|
||||
await transaction.set(
|
||||
Keys.ServerClockPulled(docId),
|
||||
Values.UInt64.serialize(serverClock)
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async loadDocFromLocal(docId: string, signal?: AbortSignal) {
|
||||
throwIfAborted(signal);
|
||||
return await this.behavior.doc.get(docId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Confirm that server updates are applied in the order they occur!!!
|
||||
*/
|
||||
async commitDocAsServerUpdate(
|
||||
docId: string,
|
||||
update: Uint8Array,
|
||||
serverClock: number,
|
||||
signal?: AbortSignal
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
await this.behavior.doc.transaction(async tx => {
|
||||
const data = await tx.get(docId);
|
||||
await tx.set(
|
||||
docId,
|
||||
data && !isEmptyUpdate(data)
|
||||
? !isEmptyUpdate(update)
|
||||
? mergeUpdates([data, update])
|
||||
: data
|
||||
: update
|
||||
);
|
||||
});
|
||||
await this.saveDocServerClockPulled(docId, serverClock);
|
||||
}
|
||||
|
||||
async commitDocAsClientUpdate(
|
||||
docId: string,
|
||||
update: Uint8Array,
|
||||
signal?: AbortSignal
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
|
||||
await this.behavior.doc.transaction(async tx => {
|
||||
const data = await tx.get(docId);
|
||||
await tx.set(
|
||||
docId,
|
||||
data && !isEmptyUpdate(data)
|
||||
? !isEmptyUpdate(update)
|
||||
? mergeUpdates([data, update])
|
||||
: data
|
||||
: update
|
||||
);
|
||||
});
|
||||
|
||||
return await this.saveDocSeqNum(docId, true);
|
||||
}
|
||||
|
||||
clearSyncMetadata() {
|
||||
return this.behavior.syncMetadata.clear();
|
||||
}
|
||||
|
||||
async clearServerClock() {
|
||||
return this.behavior.serverClock.clear();
|
||||
}
|
||||
}
|
||||
|
||||
export class ReadonlyStorage implements DocStorage {
|
||||
constructor(
|
||||
private readonly map: {
|
||||
[key: string]: Uint8Array;
|
||||
}
|
||||
) {}
|
||||
|
||||
eventBus = new MemoryDocEventBus();
|
||||
doc = new ReadonlyByteKV(new Map(Object.entries(this.map)));
|
||||
serverClock = new ReadonlyByteKV();
|
||||
syncMetadata = new ReadonlyByteKV();
|
||||
}
|
||||
|
||||
export class MemoryStorage implements DocStorage {
|
||||
constructor(private readonly memo: Memento = new MemoryMemento()) {}
|
||||
|
||||
eventBus = new MemoryDocEventBus();
|
||||
lock = new AsyncLock();
|
||||
readonly docDb = wrapMemento(this.memo, 'doc:');
|
||||
readonly syncMetadataDb = wrapMemento(this.memo, 'syncMetadata:');
|
||||
readonly serverClockDb = wrapMemento(this.memo, 'serverClock:');
|
||||
|
||||
readonly doc = {
|
||||
transaction: async cb => {
|
||||
using _lock = await this.lock.acquire();
|
||||
return await cb({
|
||||
get: async key => {
|
||||
return this.docDb.get(key) ?? null;
|
||||
},
|
||||
set: async (key, value) => {
|
||||
this.docDb.set(key, value);
|
||||
},
|
||||
keys: async () => {
|
||||
return Array.from(this.docDb.keys());
|
||||
},
|
||||
clear: () => {
|
||||
this.docDb.clear();
|
||||
},
|
||||
del: key => {
|
||||
this.docDb.del(key);
|
||||
},
|
||||
});
|
||||
},
|
||||
get(key) {
|
||||
return this.transaction(async tx => tx.get(key));
|
||||
},
|
||||
set(key, value) {
|
||||
return this.transaction(async tx => tx.set(key, value));
|
||||
},
|
||||
keys() {
|
||||
return this.transaction(async tx => tx.keys());
|
||||
},
|
||||
clear() {
|
||||
return this.transaction(async tx => tx.clear());
|
||||
},
|
||||
del(key) {
|
||||
return this.transaction(async tx => tx.del(key));
|
||||
},
|
||||
} satisfies ByteKV;
|
||||
|
||||
readonly syncMetadata = {
|
||||
transaction: async cb => {
|
||||
using _lock = await this.lock.acquire();
|
||||
return await cb({
|
||||
get: async key => {
|
||||
return this.syncMetadataDb.get(key) ?? null;
|
||||
},
|
||||
set: async (key, value) => {
|
||||
this.syncMetadataDb.set(key, value);
|
||||
},
|
||||
keys: async () => {
|
||||
return Array.from(this.syncMetadataDb.keys());
|
||||
},
|
||||
clear: () => {
|
||||
this.syncMetadataDb.clear();
|
||||
},
|
||||
del: key => {
|
||||
this.syncMetadataDb.del(key);
|
||||
},
|
||||
});
|
||||
},
|
||||
get(key) {
|
||||
return this.transaction(async tx => tx.get(key));
|
||||
},
|
||||
set(key, value) {
|
||||
return this.transaction(async tx => tx.set(key, value));
|
||||
},
|
||||
keys() {
|
||||
return this.transaction(async tx => tx.keys());
|
||||
},
|
||||
clear() {
|
||||
return this.transaction(async tx => tx.clear());
|
||||
},
|
||||
del(key) {
|
||||
return this.transaction(async tx => tx.del(key));
|
||||
},
|
||||
} satisfies ByteKV;
|
||||
|
||||
readonly serverClock = {
|
||||
transaction: async cb => {
|
||||
using _lock = await this.lock.acquire();
|
||||
return await cb({
|
||||
get: async key => {
|
||||
return this.serverClockDb.get(key) ?? null;
|
||||
},
|
||||
set: async (key, value) => {
|
||||
this.serverClockDb.set(key, value);
|
||||
},
|
||||
keys: async () => {
|
||||
return Array.from(this.serverClockDb.keys());
|
||||
},
|
||||
clear: () => {
|
||||
this.serverClockDb.clear();
|
||||
},
|
||||
del: key => {
|
||||
this.serverClockDb.del(key);
|
||||
},
|
||||
});
|
||||
},
|
||||
get(key) {
|
||||
return this.transaction(async tx => tx.get(key));
|
||||
},
|
||||
set(key, value) {
|
||||
return this.transaction(async tx => tx.set(key, value));
|
||||
},
|
||||
keys() {
|
||||
return this.transaction(async tx => tx.keys());
|
||||
},
|
||||
clear() {
|
||||
return this.transaction(async tx => tx.clear());
|
||||
},
|
||||
del(key) {
|
||||
return this.transaction(async tx => tx.del(key));
|
||||
},
|
||||
} satisfies ByteKV;
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
export function isEmptyUpdate(binary: Uint8Array) {
|
||||
return (
|
||||
binary.byteLength === 0 ||
|
||||
(binary.byteLength === 2 && binary[0] === 0 && binary[1] === 0)
|
||||
);
|
||||
}
|
||||
@@ -1,9 +1,3 @@
|
||||
export type { AwarenessConnection } from './awareness';
|
||||
export { AwarenessEngine } from './awareness';
|
||||
export type { BlobStatus, BlobStorage } from './blob/blob';
|
||||
export { BlobEngine, EmptyBlobStorage } from './blob/blob';
|
||||
export { BlobStorageOverCapacity } from './blob/error';
|
||||
export * from './doc';
|
||||
export * from './indexer';
|
||||
export {
|
||||
IndexedDBIndex,
|
||||
|
||||
@@ -48,7 +48,10 @@ export class JobRunner<J> {
|
||||
// TODO: retry logic
|
||||
await this.queue.return(jobs);
|
||||
}
|
||||
logger.error('Error processing jobs', err);
|
||||
logger.error(
|
||||
'Error processing jobs',
|
||||
err instanceof Error ? (err.stack ?? err.message) : err
|
||||
);
|
||||
}
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
Reference in New Issue
Block a user