mirror of
https://github.com/toeverything/AFFiNE.git
synced 2026-05-08 13:57:34 +08:00
feat(editor): add Bear backup import and markdown zip folder hierarchy (#14599)
## Summary
- Add Bear `.bear2bk` backup importer (TextBundle-based zip format)
- Enhance markdown zip import to preserve folder structure from zip
paths
- Add colored highlight (`<mark data-color="...">`) support to HTML
adapter
### Bear Import Details
Bear backups are zip archives of TextBundle directories. The importer:
- Parses Bear-specific markdown (highlights `==text==`, callouts `>
[!NOTE]`, inline tags `#tag`)
- Extracts creation/modification dates from `info.json` metadata
- Filters out trashed notes
- Converts Bear tags to AFFiNE tags (consolidated by root segment)
- Builds folder hierarchy from nested tag paths (e.g.,
`#work/projects/alpha`)
- Uses JSZip for lazy decompression to handle large backups without OOM
### Markdown Zip Folder Hierarchy
`importMarkdownZip` now returns `{ docIds, folderHierarchy }` instead of
just `docIds[]`, enabling the UI to recreate the zip's directory
structure as AFFiNE folders.
## Related Issues
- Implements the TextBundle-based import approach suggested in #14115 /
Discussion #14142
- Addresses folder structure preservation requested in #10003
- Partially addresses frontmatter metadata import from #11286
## Test Plan
- [ ] Import a Bear `.bear2bk` backup file via the import dialog
- [ ] Verify tags are created and assigned to documents
- [ ] Verify folder hierarchy matches Bear's nested tag structure
- [ ] Verify creation/modification dates are preserved
- [ ] Verify highlighted text and callouts render correctly
- [ ] Verify images and attachments are imported
- [ ] Import a markdown zip with nested folders, verify folder structure
is recreated
- [ ] Verify trashed Bear notes are excluded
<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit
* **New Features**
* Bear (.bear2bk) backup import: bulk import notes, convert/dedupe tags,
create nested folders, and return imported doc IDs plus folder
hierarchy; UI import option and progress integrated.
* Markdown ZIP import now returns an optional folder hierarchy alongside
created doc IDs.
* **Bug Fixes / Improvements**
* Highlighting: mark elements validate color names, default safely, and
apply consistent background styling.
* **Chores**
* Added runtime dependency for ZIP handling.
* **Documentation**
* Added localization strings and i18n accessors for Bear import UI.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
---------
Co-authored-by: DarkSky <25152247+darkskygit@users.noreply.github.com>
This commit is contained in:
@@ -320,9 +320,21 @@ export const htmlMarkElementToDeltaMatcher = HtmlASTToDeltaExtension({
|
||||
if (!isElement(ast)) {
|
||||
return [];
|
||||
}
|
||||
const dataColor =
|
||||
typeof ast.properties?.dataColor === 'string'
|
||||
? ast.properties.dataColor
|
||||
: '';
|
||||
const colorName =
|
||||
dataColor &&
|
||||
/^(red|orange|yellow|green|teal|blue|purple|grey)$/.test(dataColor)
|
||||
? dataColor
|
||||
: 'yellow';
|
||||
return ast.children.flatMap(child =>
|
||||
context.toDelta(child, { trim: false }).map(delta => {
|
||||
delta.attributes = { ...delta.attributes };
|
||||
delta.attributes = {
|
||||
...delta.attributes,
|
||||
background: `var(--affine-text-highlight-${colorName})`,
|
||||
};
|
||||
return delta;
|
||||
})
|
||||
);
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"fflate": "^0.8.2",
|
||||
"js-yaml": "^4.1.1",
|
||||
"jszip": "^3.10.1",
|
||||
"lit": "^3.2.0",
|
||||
"lodash-es": "^4.17.23",
|
||||
"mammoth": "^1.11.0",
|
||||
|
||||
531
blocksuite/affine/widgets/linked-doc/src/transformers/bear.ts
Normal file
531
blocksuite/affine/widgets/linked-doc/src/transformers/bear.ts
Normal file
@@ -0,0 +1,531 @@
|
||||
import {
|
||||
defaultImageProxyMiddleware,
|
||||
docLinkBaseURLMiddleware,
|
||||
fileNameMiddleware,
|
||||
filePathMiddleware,
|
||||
MarkdownAdapter,
|
||||
} from '@blocksuite/affine-shared/adapters';
|
||||
import { Container } from '@blocksuite/global/di';
|
||||
import { sha } from '@blocksuite/global/utils';
|
||||
import type { ExtensionType, Schema, Workspace } from '@blocksuite/store';
|
||||
import { extMimeMap, Transformer } from '@blocksuite/store';
|
||||
import JSZip from 'jszip';
|
||||
|
||||
import { createCollectionDocCRUD } from './markdown.js';
|
||||
|
||||
/** Recursive tree node representing a tag-based folder hierarchy. */
|
||||
type FolderHierarchy = {
|
||||
name: string;
|
||||
path: string;
|
||||
children: Map<string, FolderHierarchy>;
|
||||
pageId?: string;
|
||||
parentPath?: string;
|
||||
};
|
||||
|
||||
type BearImportOptions = {
|
||||
collection: Workspace;
|
||||
schema: Schema;
|
||||
imported: Blob;
|
||||
extensions: ExtensionType[];
|
||||
};
|
||||
|
||||
type BearImportResult = {
|
||||
docIds: string[];
|
||||
tags: Map<string, string[]>;
|
||||
folderHierarchy: FolderHierarchy;
|
||||
};
|
||||
|
||||
type BundleEntry = {
|
||||
bundlePath: string;
|
||||
markdownPath: string | null;
|
||||
infoJsonPath: string | null;
|
||||
assetPaths: string[];
|
||||
};
|
||||
|
||||
/** Create a DI provider from the given extensions. */
|
||||
function getProvider(extensions: ExtensionType[]) {
|
||||
const container = new Container();
|
||||
extensions.forEach(ext => {
|
||||
ext.setup(container);
|
||||
});
|
||||
return container.provider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract Bear tags from the trailing footer of a markdown document.
|
||||
* Bear places tags (e.g. `#tag`, `#multi word tag#`, `#nested/tag`) at the end
|
||||
* of notes. This scans from the bottom up, collecting tag-only lines (up to 5)
|
||||
* and returns the deduplicated tags plus the content with those lines removed.
|
||||
*/
|
||||
function parseBearTags(markdown: string): {
|
||||
tags: string[];
|
||||
content: string;
|
||||
} {
|
||||
const lines = markdown.split('\n');
|
||||
|
||||
const codeFenceState: boolean[] = [];
|
||||
let inCodeBlock = false;
|
||||
for (const line of lines) {
|
||||
if (line.trimStart().startsWith('```')) {
|
||||
inCodeBlock = !inCodeBlock;
|
||||
}
|
||||
codeFenceState.push(inCodeBlock);
|
||||
}
|
||||
|
||||
const tags: string[] = [];
|
||||
const tagLineIndices = new Set<number>();
|
||||
|
||||
for (let i = lines.length - 1; i >= 0; i--) {
|
||||
const line = lines[i].trim();
|
||||
if (!line) continue;
|
||||
if (codeFenceState[i]) break;
|
||||
|
||||
const lineTags = extractTagsFromLine(line);
|
||||
if (lineTags.length > 0) {
|
||||
for (const tag of lineTags) {
|
||||
tags.push(tag);
|
||||
}
|
||||
tagLineIndices.add(i);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
if (tagLineIndices.size >= 5) break;
|
||||
}
|
||||
|
||||
const filteredLines = lines.filter((_, i) => !tagLineIndices.has(i));
|
||||
while (
|
||||
filteredLines.length > 0 &&
|
||||
filteredLines[filteredLines.length - 1].trim() === ''
|
||||
) {
|
||||
filteredLines.pop();
|
||||
}
|
||||
|
||||
return {
|
||||
tags: deduplicateTags(tags),
|
||||
content: filteredLines.join('\n'),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse Bear tags from a single line. Supports open tags (`#tag`),
|
||||
* closed tags (`#multi word tag#`), and nested tags (`#parent/child`).
|
||||
* Returns an empty array if the line contains non-tag content.
|
||||
*/
|
||||
function extractTagsFromLine(line: string): string[] {
|
||||
const tags: string[] = [];
|
||||
let remaining = line;
|
||||
|
||||
while (remaining.length > 0) {
|
||||
remaining = remaining.trimStart();
|
||||
if (!remaining) break;
|
||||
|
||||
if (remaining.startsWith('[')) return [];
|
||||
|
||||
if (remaining.startsWith('#')) {
|
||||
if (remaining.length > 1 && remaining[1] === ' ') return [];
|
||||
if (remaining.length > 2 && remaining[1] === '#') return [];
|
||||
|
||||
const closedMatch = remaining.match(/^#([^#\n]+)#/);
|
||||
if (closedMatch) {
|
||||
const tagValue = closedMatch[1].trim();
|
||||
if (tagValue) {
|
||||
tags.push(tagValue);
|
||||
remaining = remaining.slice(closedMatch[0].length);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const openMatch = remaining.match(
|
||||
/^#([\p{L}\p{N}_][\p{L}\p{N}_/-]*)(.*)$/u
|
||||
);
|
||||
if (openMatch) {
|
||||
const tagValue = openMatch[1];
|
||||
const after = openMatch[2].trim();
|
||||
if (tagValue) {
|
||||
tags.push(tagValue);
|
||||
remaining = after;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
} else {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
return tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deduplicate tags case-insensitively while preserving the original
|
||||
* capitalization of the first occurrence of each tag.
|
||||
*/
|
||||
function deduplicateTags(tags: string[]): string[] {
|
||||
const seen = new Set<string>();
|
||||
const result: string[] = [];
|
||||
for (const tag of tags) {
|
||||
const normalized = tag.toLowerCase();
|
||||
if (!seen.has(normalized)) {
|
||||
seen.add(normalized);
|
||||
result.push(tag);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a nested folder hierarchy from Bear tags.
|
||||
* Tags like `parent/child` create nested folders. Documents are attached
|
||||
* as leaf nodes under their tag's folder using `__doc__` prefixed keys.
|
||||
*/
|
||||
function buildFolderHierarchyFromTags(
|
||||
tagDocMap: Map<string, string[]>
|
||||
): FolderHierarchy {
|
||||
const root: FolderHierarchy = {
|
||||
name: '',
|
||||
path: '',
|
||||
children: new Map(),
|
||||
};
|
||||
|
||||
for (const [tag, docIds] of tagDocMap) {
|
||||
const parts = tag.split('/');
|
||||
let current = root;
|
||||
let currentPath = '';
|
||||
|
||||
for (const part of parts) {
|
||||
const parentPath = currentPath;
|
||||
currentPath = currentPath ? `${currentPath}/${part}` : part;
|
||||
|
||||
if (!current.children.has(part)) {
|
||||
current.children.set(part, {
|
||||
name: part,
|
||||
path: currentPath,
|
||||
parentPath: parentPath || undefined,
|
||||
children: new Map(),
|
||||
});
|
||||
}
|
||||
current = current.children.get(part)!;
|
||||
}
|
||||
|
||||
for (const docId of docIds) {
|
||||
const docNodeKey = `__doc__${docId}`;
|
||||
if (!current.children.has(docNodeKey)) {
|
||||
current.children.set(docNodeKey, {
|
||||
name: docNodeKey,
|
||||
path: `${current.path}/${docNodeKey}`,
|
||||
parentPath: current.path,
|
||||
children: new Map(),
|
||||
pageId: docId,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return root;
|
||||
}
|
||||
|
||||
const GFM_CALLOUT_MAP: Record<string, string> = {
|
||||
IMPORTANT: '\u26A0',
|
||||
NOTE: '\uD83D\uDCDD',
|
||||
WARNING: '\u26A0',
|
||||
TIP: '\uD83D\uDCA1',
|
||||
CAUTION: '\uD83D\uDD34',
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert GFM-style callouts (`> [!NOTE]`, `> [!WARNING]`, etc.) to
|
||||
* emoji-based callouts that AFFiNE's remark-callout plugin understands.
|
||||
* Skips content inside fenced code blocks.
|
||||
*/
|
||||
function convertGfmCallouts(markdown: string): string {
|
||||
const lines = markdown.split('\n');
|
||||
let inCodeBlock = false;
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
if (lines[i].trimStart().startsWith('```')) {
|
||||
inCodeBlock = !inCodeBlock;
|
||||
continue;
|
||||
}
|
||||
if (!inCodeBlock) {
|
||||
lines[i] = lines[i].replace(
|
||||
/^(>\s*)\[!(\w+)\]/,
|
||||
(_match, prefix: string, type: string) => {
|
||||
const emoji = GFM_CALLOUT_MAP[type.toUpperCase()];
|
||||
return emoji ? `${prefix}[!${emoji}]` : _match;
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
const HIGHLIGHT_COLOR_MAP: Record<string, string> = {
|
||||
'\uD83D\uDFE2': 'green',
|
||||
'\uD83D\uDD35': 'blue',
|
||||
'\uD83D\uDFE3': 'purple',
|
||||
'\uD83D\uDD34': 'red',
|
||||
'\uD83D\uDFE1': 'yellow',
|
||||
'\uD83D\uDFE0': 'orange',
|
||||
};
|
||||
|
||||
/** Escape HTML special characters to prevent markup injection. */
|
||||
function escapeHtml(value: string): string {
|
||||
return value
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Bear `==highlight==` syntax to `<mark>` HTML elements.
|
||||
* Supports colored highlights via leading color emoji (e.g. `==🟢green text==`).
|
||||
* Skips content inside fenced code blocks.
|
||||
*/
|
||||
function convertHighlights(markdown: string): string {
|
||||
const lines = markdown.split('\n');
|
||||
let inCodeBlock = false;
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
if (lines[i].trimStart().startsWith('```')) {
|
||||
inCodeBlock = !inCodeBlock;
|
||||
continue;
|
||||
}
|
||||
if (!inCodeBlock) {
|
||||
lines[i] = lines[i].replace(
|
||||
/==(\S(?:[^=]|=[^=])*?)==/g,
|
||||
(_match, content: string) => {
|
||||
const firstChar = String.fromCodePoint(content.codePointAt(0)!);
|
||||
const color = HIGHLIGHT_COLOR_MAP[firstChar];
|
||||
if (color) {
|
||||
const text = content.slice(firstChar.length);
|
||||
return `<mark data-color="${color}">${escapeHtml(text)}</mark>`;
|
||||
}
|
||||
return `<mark>${escapeHtml(content)}</mark>`;
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Extract the document title from the first `# heading` or fall back to the bundle name. */
|
||||
function extractTitle(markdown: string, bundleName: string): string {
|
||||
const lines = markdown.split('\n');
|
||||
let inCodeBlock = false;
|
||||
for (const line of lines) {
|
||||
if (line.trimStart().startsWith('```')) {
|
||||
inCodeBlock = !inCodeBlock;
|
||||
continue;
|
||||
}
|
||||
if (inCodeBlock) continue;
|
||||
const match = line.match(/^#\s+(.+)/);
|
||||
if (match) {
|
||||
const title = match[1].trim();
|
||||
if (title) return title;
|
||||
}
|
||||
}
|
||||
return bundleName.replace(/\.textbundle$/i, '') || 'Untitled';
|
||||
}
|
||||
|
||||
/**
|
||||
* Import a Bear .bear2bk backup file.
|
||||
* Uses JSZip for lazy/streaming decompression to handle large backups.
|
||||
*/
|
||||
async function importBearBackup({
|
||||
collection,
|
||||
schema,
|
||||
imported,
|
||||
extensions,
|
||||
}: BearImportOptions): Promise<BearImportResult> {
|
||||
const provider = getProvider(extensions);
|
||||
|
||||
// JSZip reads the zip directory without decompressing all entries
|
||||
const zip = await JSZip.loadAsync(imported);
|
||||
|
||||
// Scan entries and group by textbundle
|
||||
const bundleMap = new Map<string, BundleEntry>();
|
||||
|
||||
zip.forEach((path, _entry) => {
|
||||
if (path.includes('__MACOSX') || path.includes('.DS_Store')) return;
|
||||
|
||||
const tbMatch = path.match(/^(.+?\.textbundle)\/(.*)/i);
|
||||
if (!tbMatch) return;
|
||||
|
||||
const bundlePath = tbMatch[1];
|
||||
const innerPath = tbMatch[2];
|
||||
|
||||
if (!bundleMap.has(bundlePath)) {
|
||||
bundleMap.set(bundlePath, {
|
||||
bundlePath,
|
||||
markdownPath: null,
|
||||
infoJsonPath: null,
|
||||
assetPaths: [],
|
||||
});
|
||||
}
|
||||
const bundle = bundleMap.get(bundlePath)!;
|
||||
|
||||
if (innerPath === 'text.md' || innerPath === 'text.txt') {
|
||||
bundle.markdownPath = path;
|
||||
} else if (innerPath === 'info.json') {
|
||||
bundle.infoJsonPath = path;
|
||||
} else if (innerPath.startsWith('assets/') && innerPath !== 'assets/') {
|
||||
bundle.assetPaths.push(path);
|
||||
}
|
||||
});
|
||||
|
||||
// Read info.json for all bundles to filter out trashed notes
|
||||
// (info.json is tiny, safe to read all at once)
|
||||
const validBundles: Array<{
|
||||
entry: BundleEntry;
|
||||
bearMeta: Record<string, unknown> | undefined;
|
||||
}> = [];
|
||||
|
||||
for (const entry of bundleMap.values()) {
|
||||
if (!entry.markdownPath) continue;
|
||||
|
||||
let info: Record<string, unknown> = {};
|
||||
if (entry.infoJsonPath) {
|
||||
try {
|
||||
const text = await zip.file(entry.infoJsonPath)!.async('string');
|
||||
info = JSON.parse(text);
|
||||
} catch {
|
||||
// Invalid JSON
|
||||
}
|
||||
}
|
||||
|
||||
const bearMeta = info['net.shinyfrog.bear'] as
|
||||
| Record<string, unknown>
|
||||
| undefined;
|
||||
if (bearMeta?.trashed === 1) continue;
|
||||
|
||||
validBundles.push({ entry, bearMeta });
|
||||
}
|
||||
|
||||
if (validBundles.length === 0) {
|
||||
throw new Error(
|
||||
'No valid Bear textbundles found in the archive. Please select a .bear2bk backup file.'
|
||||
);
|
||||
}
|
||||
|
||||
const docIds: string[] = [];
|
||||
const tagDocMap = new Map<string, string[]>();
|
||||
|
||||
// Process bundles sequentially to limit memory.
|
||||
// Each bundle is wrapped in try/catch so one bad note does not abort the
|
||||
// entire import after earlier notes have already been written.
|
||||
for (const { entry, bearMeta } of validBundles) {
|
||||
try {
|
||||
// Read markdown (decompress on demand)
|
||||
const rawMarkdown = await zip.file(entry.markdownPath!)!.async('string');
|
||||
if (!rawMarkdown.trim()) continue;
|
||||
|
||||
const { tags, content: cleanedMarkdown } = parseBearTags(rawMarkdown);
|
||||
const bundleDirName =
|
||||
entry.bundlePath.split('/').findLast(Boolean) ?? 'Untitled';
|
||||
const title = extractTitle(cleanedMarkdown, bundleDirName);
|
||||
const markdown = convertHighlights(
|
||||
convertGfmCallouts(
|
||||
cleanedMarkdown.replace(/<!--\s*\{[^}]*\}\s*-->/g, '')
|
||||
)
|
||||
);
|
||||
|
||||
// Read assets on demand (decompress only this bundle's assets)
|
||||
const pendingAssets = new Map<string, File>();
|
||||
const pendingPathBlobIdMap = new Map<string, string>();
|
||||
|
||||
for (const assetFullPath of entry.assetPaths) {
|
||||
try {
|
||||
const data = await zip.file(assetFullPath)!.async('arraybuffer');
|
||||
const tbMatch = assetFullPath.match(/^.+?\.textbundle\/(.*)/i);
|
||||
const assetRelPath = tbMatch ? tbMatch[1] : assetFullPath;
|
||||
const ext = assetRelPath.split('.').at(-1) ?? '';
|
||||
const mime = extMimeMap.get(ext.toLowerCase()) ?? '';
|
||||
const key = await sha(data);
|
||||
// Map both the full zip path and the relative path (assets/...)
|
||||
pendingPathBlobIdMap.set(assetFullPath, key);
|
||||
pendingPathBlobIdMap.set(assetRelPath, key);
|
||||
try {
|
||||
const decodedRel = decodeURIComponent(assetRelPath);
|
||||
if (decodedRel !== assetRelPath) {
|
||||
pendingPathBlobIdMap.set(decodedRel, key);
|
||||
}
|
||||
const decodedFull = decodeURIComponent(assetFullPath);
|
||||
if (decodedFull !== assetFullPath) {
|
||||
pendingPathBlobIdMap.set(decodedFull, key);
|
||||
}
|
||||
} catch {
|
||||
// Invalid URI encoding
|
||||
}
|
||||
const fileName = assetRelPath.split('/').pop() ?? '';
|
||||
pendingAssets.set(key, new File([data], fileName, { type: mime }));
|
||||
} catch {
|
||||
// Failed to read asset, skip
|
||||
}
|
||||
}
|
||||
|
||||
const fullPath = `${entry.bundlePath}/text.md`;
|
||||
const job = new Transformer({
|
||||
schema,
|
||||
blobCRUD: collection.blobSync,
|
||||
docCRUD: createCollectionDocCRUD(collection),
|
||||
middlewares: [
|
||||
defaultImageProxyMiddleware,
|
||||
fileNameMiddleware(title),
|
||||
filePathMiddleware(fullPath),
|
||||
docLinkBaseURLMiddleware(collection.id),
|
||||
],
|
||||
});
|
||||
|
||||
const assets = job.assets;
|
||||
const pathBlobIdMap = job.assetsManager.getPathBlobIdMap();
|
||||
for (const [p, key] of pendingPathBlobIdMap.entries()) {
|
||||
pathBlobIdMap.set(p, key);
|
||||
}
|
||||
for (const [key, file] of pendingAssets.entries()) {
|
||||
assets.set(key, file);
|
||||
}
|
||||
|
||||
const mdAdapter = new MarkdownAdapter(job, provider);
|
||||
const doc = await mdAdapter.toDoc({
|
||||
file: markdown,
|
||||
assets: job.assetsManager,
|
||||
});
|
||||
|
||||
if (doc) {
|
||||
docIds.push(doc.id);
|
||||
|
||||
const metaPatch: Record<string, unknown> = {};
|
||||
if (bearMeta?.creationDate) {
|
||||
const ts = Date.parse(String(bearMeta.creationDate));
|
||||
if (!isNaN(ts)) metaPatch.createDate = ts;
|
||||
}
|
||||
if (bearMeta?.modificationDate) {
|
||||
const ts = Date.parse(String(bearMeta.modificationDate));
|
||||
if (!isNaN(ts)) metaPatch.updatedDate = ts;
|
||||
}
|
||||
if (Object.keys(metaPatch).length) {
|
||||
collection.meta.setDocMeta(doc.id, metaPatch);
|
||||
}
|
||||
|
||||
for (const tag of tags) {
|
||||
if (!tagDocMap.has(tag)) {
|
||||
tagDocMap.set(tag, []);
|
||||
}
|
||||
tagDocMap.get(tag)!.push(doc.id);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn(`Failed to import bundle: ${entry.bundlePath}`, err);
|
||||
}
|
||||
}
|
||||
|
||||
const folderHierarchy = buildFolderHierarchyFromTags(tagDocMap);
|
||||
return { docIds, tags: tagDocMap, folderHierarchy };
|
||||
}
|
||||
|
||||
/** Public API for importing Bear .bear2bk backup archives. */
|
||||
export const BearTransformer = {
|
||||
importBearBackup,
|
||||
};
|
||||
@@ -1,3 +1,4 @@
|
||||
export { BearTransformer } from './bear.js';
|
||||
export { DocxTransformer } from './docx.js';
|
||||
export { HtmlTransformer } from './html.js';
|
||||
export { MarkdownTransformer } from './markdown.js';
|
||||
|
||||
@@ -462,12 +462,23 @@ async function importMarkdownToDoc({
|
||||
* @param options.imported The zip file as a Blob
|
||||
* @returns A Promise that resolves to an array of IDs of the newly created docs
|
||||
*/
|
||||
type FolderHierarchy = {
|
||||
name: string;
|
||||
path: string;
|
||||
children: Map<string, FolderHierarchy>;
|
||||
pageId?: string;
|
||||
parentPath?: string;
|
||||
};
|
||||
|
||||
async function importMarkdownZip({
|
||||
collection,
|
||||
schema,
|
||||
imported,
|
||||
extensions,
|
||||
}: ImportMarkdownZipOptions) {
|
||||
}: ImportMarkdownZipOptions): Promise<{
|
||||
docIds: string[];
|
||||
folderHierarchy?: FolderHierarchy;
|
||||
}> {
|
||||
const provider = getProvider(extensions);
|
||||
const unzip = new Unzip();
|
||||
await unzip.load(imported);
|
||||
@@ -476,6 +487,7 @@ async function importMarkdownZip({
|
||||
const pendingAssets: AssetMap = new Map();
|
||||
const pendingPathBlobIdMap: PathBlobIdMap = new Map();
|
||||
const markdownBlobs: ImportedFileEntry[] = [];
|
||||
const docPathMap: Array<{ fullPath: string; docId: string }> = [];
|
||||
|
||||
// Iterate over all files in the zip
|
||||
for (const { path, content: blob } of unzip) {
|
||||
@@ -527,10 +539,94 @@ async function importMarkdownZip({
|
||||
if (doc) {
|
||||
applyMetaPatch(collection, doc.id, meta);
|
||||
docIds.push(doc.id);
|
||||
docPathMap.push({ fullPath, docId: doc.id });
|
||||
}
|
||||
})
|
||||
);
|
||||
return docIds;
|
||||
|
||||
// Build folder hierarchy from zip paths
|
||||
const folderHierarchy = buildMarkdownZipFolderHierarchy(docPathMap);
|
||||
|
||||
return { docIds, folderHierarchy };
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a tree of {@link FolderHierarchy} nodes from the zip paths of
|
||||
* imported markdown files. Returns `undefined` when every entry sits at
|
||||
* the same level (no real subfolder structure). A common root directory
|
||||
* shared by all entries is stripped automatically so that the resulting
|
||||
* hierarchy starts one level deeper.
|
||||
*/
|
||||
function buildMarkdownZipFolderHierarchy(
|
||||
entries: Array<{ fullPath: string; docId: string }>
|
||||
): FolderHierarchy | undefined {
|
||||
if (entries.length === 0) return undefined;
|
||||
|
||||
// Check if any entries have folder structure
|
||||
const hasSubfolders = entries.some(e => {
|
||||
const parts = e.fullPath.split('/').filter(Boolean);
|
||||
// More than just "root/file.md" -- need at least one real subfolder
|
||||
return parts.length > 2;
|
||||
});
|
||||
if (!hasSubfolders) {
|
||||
// All files are at the same level, no folder hierarchy needed
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const root: FolderHierarchy = {
|
||||
name: '',
|
||||
path: '',
|
||||
children: new Map(),
|
||||
};
|
||||
|
||||
// Check once whether all entries share a common root directory
|
||||
const candidateRoot = entries[0]?.fullPath.split('/').find(Boolean);
|
||||
const skipRoot =
|
||||
!!candidateRoot &&
|
||||
entries.every(e => e.fullPath.startsWith(candidateRoot + '/'));
|
||||
|
||||
for (const { fullPath, docId } of entries) {
|
||||
const parts = fullPath.split('/').filter(Boolean);
|
||||
const fileName = parts.pop(); // Remove filename
|
||||
if (!fileName) continue;
|
||||
|
||||
let folderParts = skipRoot ? parts.slice(1) : parts;
|
||||
|
||||
if (folderParts.length === 0) {
|
||||
// Root-level file, no folder needed
|
||||
continue;
|
||||
}
|
||||
|
||||
let current = root;
|
||||
let currentPath = '';
|
||||
|
||||
for (const folderName of folderParts) {
|
||||
const parentPath = currentPath;
|
||||
currentPath = currentPath ? `${currentPath}/${folderName}` : folderName;
|
||||
|
||||
if (!current.children.has(folderName)) {
|
||||
current.children.set(folderName, {
|
||||
name: folderName,
|
||||
path: currentPath,
|
||||
parentPath: parentPath || undefined,
|
||||
children: new Map(),
|
||||
});
|
||||
}
|
||||
current = current.children.get(folderName)!;
|
||||
}
|
||||
|
||||
// Add the doc as a leaf
|
||||
const docNodeKey = `__doc__${docId}`;
|
||||
current.children.set(docNodeKey, {
|
||||
name: docNodeKey,
|
||||
path: `${current.path}/${docNodeKey}`,
|
||||
parentPath: current.path,
|
||||
children: new Map(),
|
||||
pageId: docId,
|
||||
});
|
||||
}
|
||||
|
||||
return root.children.size > 0 ? root : undefined;
|
||||
}
|
||||
|
||||
export const MarkdownTransformer = {
|
||||
|
||||
@@ -436,7 +436,7 @@ export class StarterDebugMenu extends ShadowlessElement {
|
||||
try {
|
||||
const file = await openSingleFileWith('Zip');
|
||||
if (!file) return;
|
||||
const result = await MarkdownTransformer.importMarkdownZip({
|
||||
const { docIds } = await MarkdownTransformer.importMarkdownZip({
|
||||
collection: this.collection,
|
||||
schema: this.editor.doc.schema,
|
||||
imported: file,
|
||||
@@ -445,7 +445,7 @@ export class StarterDebugMenu extends ShadowlessElement {
|
||||
if (!this.editor.host) return;
|
||||
toast(
|
||||
this.editor.host,
|
||||
`Successfully imported ${result.length} markdown files.`
|
||||
`Successfully imported ${docIds.length} markdown files.`
|
||||
);
|
||||
} catch (error) {
|
||||
console.error('Import markdown zip files failed:', error);
|
||||
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
} from '@affine/core/modules/dialogs';
|
||||
import { ExplorerIconService } from '@affine/core/modules/explorer-icon/services/explorer-icon';
|
||||
import { OrganizeService } from '@affine/core/modules/organize';
|
||||
import { TagService } from '@affine/core/modules/tag';
|
||||
import { UrlService } from '@affine/core/modules/url';
|
||||
import {
|
||||
getAFFiNEWorkspaceSchema,
|
||||
@@ -27,6 +28,7 @@ import track from '@affine/track';
|
||||
import { openDirectory, openFilesWith } from '@blocksuite/affine/shared/utils';
|
||||
import type { Workspace } from '@blocksuite/affine/store';
|
||||
import {
|
||||
BearTransformer,
|
||||
DocxTransformer,
|
||||
HtmlTransformer,
|
||||
MarkdownTransformer,
|
||||
@@ -188,11 +190,49 @@ function createFolderStructure(
|
||||
return { folderId: rootFolderId, docLinks };
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the folder tree described by {@link folderHierarchy} via
|
||||
* {@link OrganizeService} and links every document into its folder.
|
||||
* Returns the root folder ID on success, or `undefined` if the
|
||||
* hierarchy is empty or an error occurs.
|
||||
*
|
||||
* When {@link explorerIconService} is provided, document icons from the
|
||||
* hierarchy (e.g. Notion page emojis) are applied. Callers that do not
|
||||
* need icon support can omit it safely.
|
||||
*/
|
||||
function applyFolderHierarchy(
|
||||
organizeService: OrganizeService,
|
||||
folderHierarchy: FolderHierarchy,
|
||||
explorerIconService?: ExplorerIconService
|
||||
): string | undefined {
|
||||
if (folderHierarchy.children.size === 0) return undefined;
|
||||
try {
|
||||
const { folderId, docLinks } = createFolderStructure(
|
||||
organizeService,
|
||||
folderHierarchy,
|
||||
null,
|
||||
explorerIconService
|
||||
);
|
||||
for (const { folderId, docId } of docLinks) {
|
||||
const folder = organizeService.folderTree.folderNode$(folderId).value;
|
||||
if (folder) {
|
||||
const index = folder.indexAt('after');
|
||||
folder.createLink('doc', docId, index);
|
||||
}
|
||||
}
|
||||
return folderId || undefined;
|
||||
} catch (error) {
|
||||
logger.warn('Failed to create folder structure:', error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
type ImportType =
|
||||
| 'markdown'
|
||||
| 'markdownZip'
|
||||
| 'notion'
|
||||
| 'obsidian'
|
||||
| 'bear'
|
||||
| 'snapshot'
|
||||
| 'html'
|
||||
| 'docx'
|
||||
@@ -218,7 +258,8 @@ type ImportConfig = {
|
||||
files: File[],
|
||||
handleImportAffineFile: () => Promise<WorkspaceMetadata | undefined>,
|
||||
organizeService?: OrganizeService,
|
||||
explorerIconService?: ExplorerIconService
|
||||
explorerIconService?: ExplorerIconService,
|
||||
tagService?: TagService
|
||||
) => Promise<ImportResult>;
|
||||
};
|
||||
|
||||
@@ -290,6 +331,19 @@ const importOptions = [
|
||||
testId: 'editor-option-menu-import-obsidian',
|
||||
type: 'obsidian' as ImportType,
|
||||
},
|
||||
{
|
||||
key: 'bear',
|
||||
label: 'com.affine.import.bear',
|
||||
prefixIcon: (
|
||||
<FileIcon color={cssVarV2('icon/primary')} width={20} height={20} />
|
||||
),
|
||||
suffixIcon: (
|
||||
<HelpIcon color={cssVarV2('icon/primary')} width={20} height={20} />
|
||||
),
|
||||
suffixTooltip: 'com.affine.import.bear.tooltip',
|
||||
testId: 'editor-option-menu-import-bear',
|
||||
type: 'bear' as ImportType,
|
||||
},
|
||||
{
|
||||
key: 'docx',
|
||||
label: 'com.affine.import.docx',
|
||||
@@ -365,21 +419,29 @@ const importConfigs: Record<ImportType, ImportConfig> = {
|
||||
docCollection,
|
||||
files,
|
||||
_handleImportAffineFile,
|
||||
_organizeService,
|
||||
organizeService,
|
||||
_explorerIconService
|
||||
) => {
|
||||
const file = files.length === 1 ? files[0] : null;
|
||||
if (!file) {
|
||||
throw new Error('Expected a single zip file for markdownZip import');
|
||||
}
|
||||
const docIds = await MarkdownTransformer.importMarkdownZip({
|
||||
collection: docCollection,
|
||||
schema: getAFFiNEWorkspaceSchema(),
|
||||
imported: file,
|
||||
extensions: getStoreManager().config.init().value.get('store'),
|
||||
});
|
||||
const { docIds, folderHierarchy } =
|
||||
await MarkdownTransformer.importMarkdownZip({
|
||||
collection: docCollection,
|
||||
schema: getAFFiNEWorkspaceSchema(),
|
||||
imported: file,
|
||||
extensions: getStoreManager().config.init().value.get('store'),
|
||||
});
|
||||
|
||||
const rootFolderId =
|
||||
folderHierarchy && organizeService
|
||||
? applyFolderHierarchy(organizeService, folderHierarchy)
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
docIds,
|
||||
rootFolderId,
|
||||
};
|
||||
},
|
||||
},
|
||||
@@ -431,37 +493,14 @@ const importConfigs: Record<ImportType, ImportConfig> = {
|
||||
extensions: getStoreManager().config.init().value.get('store'),
|
||||
});
|
||||
|
||||
let rootFolderId: string | undefined;
|
||||
|
||||
// Create folder structure if hierarchy exists and OrganizeService is available
|
||||
if (
|
||||
folderHierarchy &&
|
||||
organizeService &&
|
||||
folderHierarchy.children.size > 0
|
||||
) {
|
||||
try {
|
||||
const { folderId, docLinks } = createFolderStructure(
|
||||
organizeService,
|
||||
folderHierarchy,
|
||||
null,
|
||||
explorerIconService
|
||||
);
|
||||
rootFolderId = folderId || undefined;
|
||||
|
||||
// Create links for all documents to their respective folders
|
||||
for (const { folderId, docId } of docLinks) {
|
||||
const folder =
|
||||
organizeService.folderTree.folderNode$(folderId).value;
|
||||
if (folder) {
|
||||
const index = folder.indexAt('after');
|
||||
folder.createLink('doc', docId, index);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to create folder structure:', error);
|
||||
// Continue with import even if folder creation fails
|
||||
}
|
||||
}
|
||||
const rootFolderId =
|
||||
folderHierarchy && organizeService
|
||||
? applyFolderHierarchy(
|
||||
organizeService,
|
||||
folderHierarchy,
|
||||
explorerIconService
|
||||
)
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
docIds: pageIds,
|
||||
@@ -501,6 +540,114 @@ const importConfigs: Record<ImportType, ImportConfig> = {
|
||||
return { docIds };
|
||||
},
|
||||
},
|
||||
bear: {
|
||||
fileOptions: { acceptType: 'Zip', multiple: false },
|
||||
importFunction: async (
|
||||
docCollection,
|
||||
files,
|
||||
_handleImportAffineFile,
|
||||
organizeService,
|
||||
_explorerIconService,
|
||||
tagService
|
||||
) => {
|
||||
const file = files.length === 1 ? files[0] : null;
|
||||
if (!file) {
|
||||
throw new Error('Expected a single .bear2bk file for Bear import');
|
||||
}
|
||||
let docIds: string[];
|
||||
let tags: Map<string, string[]>;
|
||||
let folderHierarchy: FolderHierarchy;
|
||||
try {
|
||||
const result = await BearTransformer.importBearBackup({
|
||||
collection: docCollection,
|
||||
schema: getAFFiNEWorkspaceSchema(),
|
||||
imported: file,
|
||||
extensions: getStoreManager().config.init().value.get('store'),
|
||||
});
|
||||
docIds = result.docIds;
|
||||
tags = result.tags;
|
||||
folderHierarchy = result.folderHierarchy;
|
||||
} catch (err) {
|
||||
logger.error('Bear import failed:', err);
|
||||
throw err instanceof Error
|
||||
? err
|
||||
: new Error(String(err) || 'Bear import failed');
|
||||
}
|
||||
|
||||
// Create AFFiNE tags from Bear tags
|
||||
if (tagService && tags.size > 0) {
|
||||
try {
|
||||
// Get existing tags for deduplication
|
||||
const existingTags = tagService.tagList.tags$.value;
|
||||
const existingTagMap = new Map<string, string>(); // lowercase name → tag id
|
||||
for (const tag of existingTags) {
|
||||
const name = tag.value$.value.toLowerCase();
|
||||
existingTagMap.set(name, tag.id);
|
||||
}
|
||||
|
||||
// Consolidate tags by root segment (e.g., "privat/bike" → "privat").
|
||||
// Keyed by lowercase root for case-insensitive dedup, but the
|
||||
// original capitalization of the first occurrence is preserved
|
||||
// so new AFFiNE tags are created with the user's casing.
|
||||
const rootTagDocMap = new Map<
|
||||
string,
|
||||
{ displayName: string; docs: Set<string> }
|
||||
>();
|
||||
for (const [tagName, tagDocIds] of tags) {
|
||||
const originalRoot = tagName.split('/')[0];
|
||||
const key = originalRoot.toLowerCase();
|
||||
let entry = rootTagDocMap.get(key);
|
||||
if (!entry) {
|
||||
entry = { displayName: originalRoot, docs: new Set<string>() };
|
||||
rootTagDocMap.set(key, entry);
|
||||
}
|
||||
for (const docId of tagDocIds) {
|
||||
entry.docs.add(docId);
|
||||
}
|
||||
}
|
||||
|
||||
for (const [
|
||||
rootTagKey,
|
||||
{ displayName, docs: docIdSet },
|
||||
] of rootTagDocMap) {
|
||||
// Check if tag already exists (case-insensitive)
|
||||
let tagId = existingTagMap.get(rootTagKey);
|
||||
if (!tagId) {
|
||||
const newTag = tagService.tagList.createTag(
|
||||
displayName,
|
||||
tagService.randomTagColor()
|
||||
);
|
||||
tagId = newTag.id;
|
||||
existingTagMap.set(rootTagKey, tagId);
|
||||
}
|
||||
|
||||
// Assign tag to each doc
|
||||
for (const docId of docIdSet) {
|
||||
const doc = docCollection.getDoc(docId);
|
||||
const currentTags = doc?.meta?.tags ?? [];
|
||||
if (!currentTags.includes(tagId)) {
|
||||
docCollection.meta.setDocMeta(docId, {
|
||||
tags: [...currentTags, tagId],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to create Bear tags:', error);
|
||||
}
|
||||
}
|
||||
|
||||
const rootFolderId =
|
||||
folderHierarchy && organizeService
|
||||
? applyFolderHierarchy(organizeService, folderHierarchy)
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
docIds,
|
||||
rootFolderId,
|
||||
};
|
||||
},
|
||||
},
|
||||
docx: {
|
||||
fileOptions: { acceptType: 'Docx', multiple: false },
|
||||
importFunction: async (docCollection, file) => {
|
||||
@@ -735,6 +882,7 @@ export const ImportDialog = ({
|
||||
const docCollection = workspace.docCollection;
|
||||
const organizeService = useService(OrganizeService);
|
||||
const explorerIconService = useService(ExplorerIconService);
|
||||
const tagService = useService(TagService);
|
||||
|
||||
const globalDialogService = useService(GlobalDialogService);
|
||||
|
||||
@@ -824,7 +972,8 @@ export const ImportDialog = ({
|
||||
files,
|
||||
handleImportAffineFile,
|
||||
organizeService,
|
||||
explorerIconService
|
||||
explorerIconService,
|
||||
tagService
|
||||
);
|
||||
|
||||
setImportResult({
|
||||
@@ -863,6 +1012,7 @@ export const ImportDialog = ({
|
||||
explorerIconService,
|
||||
handleImportAffineFile,
|
||||
organizeService,
|
||||
tagService,
|
||||
t,
|
||||
]
|
||||
);
|
||||
|
||||
@@ -2462,6 +2462,14 @@ export function useAFFiNEI18N(): {
|
||||
* `AFFiNE workspace data`
|
||||
*/
|
||||
["com.affine.import.affine-workspace-data"](): string;
|
||||
/**
|
||||
* `Bear (.bear2bk)`
|
||||
*/
|
||||
["com.affine.import.bear"](): string;
|
||||
/**
|
||||
* `Import your Bear note backup. Tags will be converted to AFFiNE tags and folders.`
|
||||
*/
|
||||
["com.affine.import.bear.tooltip"](): string;
|
||||
/**
|
||||
* `Docx`
|
||||
*/
|
||||
|
||||
@@ -614,6 +614,8 @@
|
||||
"com.affine.import-clipper.dialog.errorLoad": "Failed to load content, please try again.",
|
||||
"com.affine.import_file": "Support Markdown/Notion",
|
||||
"com.affine.import.affine-workspace-data": "AFFiNE workspace data",
|
||||
"com.affine.import.bear": "Bear (.bear2bk)",
|
||||
"com.affine.import.bear.tooltip": "Import your Bear note backup. Tags will be converted to AFFiNE tags and folders.",
|
||||
"com.affine.import.docx": "Docx",
|
||||
"com.affine.import.docx.tooltip": "Import your .docx file.",
|
||||
"com.affine.import.html-files": "HTML",
|
||||
|
||||
Reference in New Issue
Block a user