Compare commits

...

15 Commits

Author SHA1 Message Date
zzj3720
9bee2cb0fa fix(editor): improve string conversion logic for checkbox property
- Add a FALSE_VALUES set containing various falsy string representations

- Support Chinese negation terms like "否", "不", "错", etc.

- Optimize the implementation of cellFromString method
2025-02-26 00:11:36 +08:00
zzj3720
1addd17d64 fix(editor): table block supports parsing rich text 2025-02-25 18:52:13 +08:00
darkskygit
842c39c3be feat(native): doc loader for common native (#9941) 2025-02-25 07:50:56 +00:00
EYHN
26674b0cb8 fix(core): fallback when loading share page (#10428) 2025-02-25 07:35:58 +00:00
EYHN
cafff4e0eb fix(nbstore): reduce unnecessary sync (#10426) 2025-02-25 07:21:46 +00:00
fundon
abc3f9f23f chore(editor): bump @floating-ui/dom to 1.6.13 (#10425) 2025-02-25 07:06:27 +00:00
Brooooooklyn
5dbffba08d feat(native): media capture (#9992) 2025-02-25 06:51:56 +00:00
EYHN
2ec7de7e32 fix(core): add linked doc button (#10417) 2025-02-25 13:03:56 +08:00
liuyi
e5e5c0a8ba perf(core): only full sync before exporting (#10408) 2025-02-25 04:41:56 +00:00
EYHN
c644a46b8d fix(nbstore): local doc update lost (#10422) 2025-02-25 04:26:49 +00:00
Peng Xiao
7e892b3a7e fix(core): unused blobs query (#10399) 2025-02-25 10:58:43 +08:00
JimmFly
848145150d fix(core): close popover after successful invite in member editor (#10388) 2025-02-25 09:51:22 +08:00
JimmFly
dee6be11fb fix(core): reorder plan card action button conditions (#10387) 2025-02-25 09:51:10 +08:00
JimmFly
abda70d2c8 fix(core): fix permission checks for export workspace (#10401) 2025-02-25 09:50:43 +08:00
Saul-Mirone
40104f2f87 refactor(editor): remove unused any convension (#10410) 2025-02-24 15:57:49 +00:00
135 changed files with 9187 additions and 403 deletions

1327
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,8 +15,12 @@ affine_common = { path = "./packages/common/native" }
affine_nbstore = { path = "./packages/frontend/native/nbstore" }
anyhow = "1"
base64-simd = "0.8"
block2 = "0.6"
chrono = "0.4"
core-foundation = "0.10"
coreaudio-rs = "0.12"
criterion2 = { version = "2", default-features = false }
dispatch2 = "0.2"
dotenvy = "0.15"
file-format = { version = "0.26", features = ["reader"] }
homedir = "0.3"
@@ -31,6 +35,8 @@ once_cell = "1"
parking_lot = "0.12"
rand = "0.9"
rayon = "1.10"
rubato = "0.16"
screencapturekit = "0.3"
serde = "1"
serde_json = "1"
sha3 = "0.10"

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -22,7 +22,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -21,7 +21,7 @@
"@blocksuite/icons": "^2.2.3",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.3",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -20,7 +20,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -22,7 +22,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -22,7 +22,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -21,7 +21,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -21,7 +21,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -20,7 +20,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -20,7 +20,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -42,7 +42,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -22,7 +22,7 @@
"@blocksuite/global": "workspace:*",
"@blocksuite/icons": "^2.2.1",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@preact/signals-core": "^1.8.0",
"@vanilla-extract/css": "^1.17.0",
"lit": "^3.2.0",

View File

@@ -30,7 +30,10 @@ export const tableBlockHtmlAdapterMatcher: BlockHtmlAdapterMatcher = {
}
const { walkerContext } = context;
if (o.node.tagName === 'table') {
const tableProps = parseTableFromHtml(o.node);
const astToDelta = context.deltaConverter.astToDelta.bind(
context.deltaConverter
);
const tableProps = parseTableFromHtml(o.node, astToDelta);
walkerContext.openNode(
{
type: 'block',

View File

@@ -25,12 +25,15 @@ export const tableBlockMarkdownAdapterMatcher: BlockMarkdownAdapterMatcher = {
enter: (o, context) => {
const { walkerContext } = context;
if (o.node.type === 'table') {
const astToDelta = context.deltaConverter.astToDelta.bind(
context.deltaConverter
);
walkerContext.openNode(
{
type: 'block',
id: nanoid(),
flavour: TableModelFlavour,
props: parseTableFromMarkdown(o.node),
props: parseTableFromMarkdown(o.node, astToDelta),
children: [],
},
'children'

View File

@@ -7,6 +7,7 @@ import {
BlockPlainTextAdapterExtension,
type BlockPlainTextAdapterMatcher,
} from '@blocksuite/affine-shared/adapters';
import type { DeltaInsert } from '@blocksuite/inline';
import { nanoid } from '@blocksuite/store';
import { createTableProps, formatTable, processTable } from './utils.js';
@@ -21,10 +22,14 @@ export const tableBlockPlainTextAdapterMatcher: BlockPlainTextAdapterMatcher = {
const text = o.node.content;
const rowTexts = text.split('\n');
if (rowTexts.length <= 1) return;
const rowTextLists: string[][] = [];
const rowTextLists: DeltaInsert[][][] = [];
let columnCount: number | null = null;
for (const row of rowTexts) {
const cells = row.split('\t');
const cells = row.split('\t').map<DeltaInsert[]>(text => [
{
insert: text,
},
]);
if (cells.length <= 1) return;
if (columnCount == null) {
columnCount = cells.length;

View File

@@ -5,14 +5,23 @@ import type {
TableRow,
} from '@blocksuite/affine-model';
import {
AdapterTextUtils,
HastUtils,
type HtmlAST,
type MarkdownAST,
} from '@blocksuite/affine-shared/adapters';
import { HastUtils } from '@blocksuite/affine-shared/adapters';
import { generateFractionalIndexingKeyBetween } from '@blocksuite/affine-shared/utils';
import type { DeltaInsert } from '@blocksuite/inline';
import { nanoid } from '@blocksuite/store';
import type { Element, ElementContent } from 'hast';
import type { PhrasingContent, Table as MarkdownTable, TableCell } from 'mdast';
import type { Element } from 'hast';
import type { Table as MarkdownTable } from 'mdast';
type RichTextType = DeltaInsert[];
const createRichText = (text: RichTextType) => {
return {
'$blocksuite:internal:text$': true,
delta: text,
};
};
function calculateColumnWidths(rows: string[][]): number[] {
return (
rows[0]?.map((_, colIndex) =>
@@ -92,15 +101,6 @@ export const processTable = (
});
return table;
};
const getTextFromElement = (element: ElementContent): string => {
if (element.type === 'text') {
return element.value.trim();
}
if (element.type === 'element') {
return element.children.map(child => getTextFromElement(child)).join('');
}
return '';
};
const getAllTag = (node: Element | undefined, tagName: string): Element[] => {
if (!node) {
@@ -120,7 +120,7 @@ const getAllTag = (node: Element | undefined, tagName: string): Element[] => {
return [];
};
export const createTableProps = (rowTextLists: string[][]) => {
export const createTableProps = (deltasLists: RichTextType[][]) => {
const createIdAndOrder = (count: number) => {
const result: { id: string; order: string }[] = Array.from({
length: count,
@@ -135,8 +135,8 @@ export const createTableProps = (rowTextLists: string[][]) => {
}
return result;
};
const columnCount = Math.max(...rowTextLists.map(row => row.length));
const rowCount = rowTextLists.length;
const columnCount = Math.max(...deltasLists.map(row => row.length));
const rowCount = deltasLists.length;
const columns: TableColumn[] = createIdAndOrder(columnCount).map(v => ({
columnId: v.id,
@@ -156,9 +156,9 @@ export const createTableProps = (rowTextLists: string[][]) => {
continue;
}
const cellId = `${row.rowId}:${column.columnId}`;
const text = rowTextLists[i]?.[j];
const text = deltasLists[i]?.[j];
cells[cellId] = {
text: AdapterTextUtils.createText(text ?? ''),
text: createRichText(text ?? []),
};
}
}
@@ -172,7 +172,8 @@ export const createTableProps = (rowTextLists: string[][]) => {
};
export const parseTableFromHtml = (
element: Element
element: Element,
astToDelta: (ast: HtmlAST) => RichTextType
): TableBlockPropsSerialized => {
const headerRows = getAllTag(element, 'thead').flatMap(node =>
getAllTag(node, 'tr').map(tr => getAllTag(tr, 'th'))
@@ -184,33 +185,26 @@ export const parseTableFromHtml = (
getAllTag(node, 'tr').map(tr => getAllTag(tr, 'td'))
);
const allRows = [...headerRows, ...bodyRows, ...footerRows];
const rowTextLists: string[][] = [];
const rowTextLists: RichTextType[][] = [];
allRows.forEach(cells => {
const row: string[] = [];
const row: RichTextType[] = [];
cells.forEach(cell => {
row.push(getTextFromElement(cell));
row.push(astToDelta(cell));
});
rowTextLists.push(row);
});
return createTableProps(rowTextLists);
};
const getTextFromTableCell = (node: TableCell) => {
const getTextFromPhrasingContent = (node: PhrasingContent) => {
if (node.type === 'text') {
return node.value;
}
return '';
};
return node.children.map(child => getTextFromPhrasingContent(child)).join('');
};
export const parseTableFromMarkdown = (node: MarkdownTable) => {
const rowTextLists: string[][] = [];
export const parseTableFromMarkdown = (
node: MarkdownTable,
astToDelta: (ast: MarkdownAST) => RichTextType
) => {
const rowTextLists: RichTextType[][] = [];
node.children.forEach(row => {
const rowText: string[] = [];
const rowText: RichTextType[] = [];
row.children.forEach(cell => {
rowText.push(getTextFromTableCell(cell));
rowText.push(astToDelta(cell));
});
rowTextLists.push(rowText);
});

View File

@@ -20,7 +20,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@lottiefiles/dotlottie-wc": "^0.4.0",
"@preact/signals-core": "^1.8.0",

View File

@@ -20,7 +20,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/store": "workspace:*",
"@emotion/hash": "^0.9.2",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -3,17 +3,30 @@ import { propertyType } from '../../core/property/property-config.js';
export const checkboxPropertyType = propertyType('checkbox');
const FALSE_VALUES = new Set([
'false',
'no',
'0',
'',
'undefined',
'null',
'否',
'不',
'错',
'错误',
'取消',
'关闭',
]);
export const checkboxPropertyModelConfig =
checkboxPropertyType.modelConfig<boolean>({
name: 'Checkbox',
type: () => t.boolean.instance(),
defaultData: () => ({}),
cellToString: ({ value }) => (value ? 'True' : 'False'),
cellFromString: ({ value }) => {
return {
value: value !== 'False',
};
},
cellFromString: ({ value }) => ({
value: !FALSE_VALUES.has((value?.trim() ?? '').toLowerCase()),
}),
cellToJson: ({ value }) => value ?? null,
cellFromJson: ({ value }) =>
typeof value !== 'boolean' ? undefined : value,

View File

@@ -23,7 +23,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -22,7 +22,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -19,7 +19,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -25,7 +25,7 @@
"@blocksuite/icons": "^2.2.1",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.2",
"@preact/signals-core": "^1.8.0",
"@toeverything/theme": "^1.1.12",

View File

@@ -24,7 +24,7 @@
"@blocksuite/icons": "^2.2.2",
"@blocksuite/inline": "workspace:*",
"@blocksuite/store": "workspace:*",
"@floating-ui/dom": "^1.6.10",
"@floating-ui/dom": "^1.6.13",
"@lit/context": "^1.1.3",
"@lottiefiles/dotlottie-wc": "^0.4.0",
"@preact/signals-core": "^1.8.0",

View File

@@ -3,15 +3,63 @@ edition = "2021"
name = "affine_common"
version = "0.1.0"
[features]
default = []
doc-loader = ["docx-parser", "infer", "path-ext", "pdf-extract", "readability", "serde_json", "strum_macros", "text-splitter", "thiserror", "tree-sitter", "url"]
tree-sitter = [
"cc",
"dep:tree-sitter",
"dep:tree-sitter-c",
"dep:tree-sitter-c-sharp",
"dep:tree-sitter-cpp",
"dep:tree-sitter-go",
"dep:tree-sitter-java",
"dep:tree-sitter-javascript",
"dep:tree-sitter-kotlin-ng",
"dep:tree-sitter-python",
"dep:tree-sitter-rust",
"dep:tree-sitter-scala",
"dep:tree-sitter-typescript",
]
[dependencies]
chrono = { workspace = true }
rand = { workspace = true }
sha3 = { workspace = true }
docx-parser = { git = "https://github.com/toeverything/docx-parser", optional = true }
infer = { version = "0.19.0", optional = true }
path-ext = { version = "0.1.1", optional = true }
pdf-extract = { version = "0.8.2", optional = true }
readability = { version = "0.3.0", optional = true, default-features = false }
serde_json = { version = "1.0", optional = true }
strum_macros = { version = "0.26.2", optional = true }
text-splitter = { version = "0.22", features = ["markdown", "tiktoken-rs"], optional = true }
thiserror = { version = "1", optional = true }
tree-sitter = { version = "0.25", optional = true }
tree-sitter-c = { version = "0.23", optional = true }
tree-sitter-c-sharp = { version = "0.23", optional = true }
tree-sitter-cpp = { version = "0.23", optional = true }
tree-sitter-go = { version = "0.23", optional = true }
tree-sitter-java = { version = "0.23", optional = true }
tree-sitter-javascript = { version = "0.23", optional = true }
tree-sitter-kotlin-ng = { version = "1.1", optional = true }
tree-sitter-python = { version = "0.23", optional = true }
tree-sitter-rust = { version = "0.23", optional = true }
tree-sitter-scala = { version = "0.23", optional = true }
tree-sitter-typescript = { version = "0.23", optional = true }
url = { version = "2.5", optional = true }
tiktoken-rs = { workspace = true }
[dev-dependencies]
criterion2 = { workspace = true }
rayon = { workspace = true }
[build-dependencies]
cc = { version = "1", optional = true }
[[bench]]
harness = false
name = "hashcash"

Binary file not shown.

View File

@@ -0,0 +1,28 @@
# DOCX Demo
# <a name="OLE_LINK1"></a><a name="OLE_LINK2"></a><a name="_Toc359077851"></a>Demonstration of DOCX support in calibre
This document demonstrates the ability of the calibre DOCX Input plugin to convert the various typographic features in a Microsoft Word (2007 and newer) document. Convert this document to a modern ebook format, such as AZW3 for Kindles or EPUB for other ebook readers, to see it in action.
There is support for images, tables, lists, footnotes, endnotes, links, dropcaps and various types of text and paragraph level formatting.
To see the DOCX conversion in action, simply add this file to calibre using the **“Add Books” **button and then click “**Convert”. ** Set the output format in the top right corner of the conversion dialog to EPUB or AZW3 and click **“OK”**.
# <a name="_Toc359077852"></a>Text Formatting
## <a name="_Toc359077853"></a>Inline formatting
Here, we demonstrate various types of inline text formatting and the use of embedded fonts.
Here is some **bold, ***italic, ****bold-italic, ***__underlined __and ~~struck out ~~ text. Then, we have a superscript and a subscript. Now we see some red, green and blue text. Some text with a yellow highlight. Some text in a box. Some text in inverse video.
A paragraph with styled text: subtle emphasis followed by strong text and intense emphasis. This paragraph uses document wide styles for styling rather than inline text properties as demonstrated in the previous paragraph — calibre can handle both with equal ease.
## <a name="_Toc359077854"></a>Fun with fonts
This document has embedded the Ubuntu font family. The body text is in the Ubuntu typeface, here is some text in the Ubuntu Mono typeface, notice how every letter has the same width, even i and m. Every embedded font will automatically be embedded in the output ebook during conversion.
## ***<a name="_Paragraph_level_formatting"></a>******<a name="_Toc359077855"></a>******Paragraph level formatting***
You can do crazy things with paragraphs, if the urge strikes you. For instance this paragraph is right aligned and has a right border. It has also been given a light gray background.

View File

@@ -0,0 +1,28 @@
For the lovers of poetry amongst you, paragraphs with hanging indents, like this often come in handy. You can use hanging indents to ensure that a line of poetry retains its individual identity as a line even when the screen is too narrow to display it as a single line. Not only does this paragraph have a hanging indent, it is also has an extra top margin, setting it apart from the preceding paragraph.
# <a name="_Toc359077856"></a>Tables
| | |
| ----------- | -------- |
| ITEM | NEEDED |
| Books | 1 |
| Pens | 3 |
| Pencils | 2 |
| Highlighter | 2 colors |
| Scissors | 1 pair |
Tables in Word can vary from the extremely simple to the extremely complex. calibre tries to do its best when converting tables. While you may run into trouble with the occasional table, the vast majority of common cases should be converted very well, as demonstrated in this section. Note that for optimum results, when creating tables in Word, you should set their widths using percentages, rather than absolute units. To the left of this paragraph is a floating two column table with a nice green border and header row.
Now lets look at a fancier table—one with alternating row colors and partial borders. This table is stretched out to take 100% of the available width.
| | | | | | |
| ------------ | ------- | ------- | ------- | ------- | ------- |
| City or Town | Point A | Point B | Point C | Point D | Point E |
| Point A | — | | | | |
| Point B | 87 | — | | | |
| Point C | 64 | 56 | — | | |
| Point D | 37 | 32 | 91 | — | |
| Point E | 93 | 35 | 54 | 43 | — |
Next, we see a table with special formatting in various locations. Notice how the formatting for the header row and sub header rows is preserved.

View File

@@ -0,0 +1,21 @@
| | | | |
| ---------------- | ------------- | ------------------- | ------ |
| College | New students | Graduating students | Change |
| | Undergraduate | | |
| Cedar University | 110 | 103 | +7 |
| Oak Institute | 202 | 210 | -8 |
| | Graduate | | |
| Cedar University | 24 | 20 | +4 |
| Elm College | 43 | 53 | -10 |
| Total | 998 | 908 | 90 |
Source: Fictitious data, for illustration purposes only
Next, we have something a little more complex, a nested table, i.e. a table inside another table. Additionally, the inner table has some of its cells merged. The table is displayed horizontally centered.
| | |
| --- | -------------------------------------------------------------- |
| | To the left is a table inside a table, with some cells merged. |
We end with a fancy calendar, note how much of the original formatting is preserved. Note that this table will only display correctly on relatively wide screens. In general, very wide tables or tables whose cells have fixed width requirements dont fare well in ebooks.

View File

@@ -0,0 +1,18 @@
| | | | | | | | | | | | | |
| ------------- | | --- | | --- | | --- | | --- | | --- | | --- |
| December 2007 | | | | | | | | | | | | |
| Sun | | Mon | | Tue | | Wed | | Thu | | Fri | | Sat |
| | | | | | | | | | | | | 1 |
| | | | | | | | | | | | | |
| 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 |
| | | | | | | | | | | | | |
| 9 | | 10 | | 11 | | 12 | | 13 | | 14 | | 15 |
| | | | | | | | | | | | | |
| 16 | | 17 | | 18 | | 19 | | 20 | | 21 | | 22 |
| | | | | | | | | | | | | |
| 23 | | 24 | | 25 | | 26 | | 27 | | 28 | | 29 |
| | | | | | | | | | | | | |
| 30 | | 31 | | | | | | | | | | |
# <a name="_Toc359077857"></a>Structural Elements

View File

@@ -0,0 +1,20 @@
Miscellaneous structural elements you can add to your document, like footnotes, endnotes, dropcaps and the like.
## <a name="_Toc359077858"></a>Footnotes & Endnotes
Footnotes and endnotes are automatically recognized and both are converted to endnotes, with backlinks for maximum ease of use in ebook devices.
## <a name="_Toc359077859"></a>Dropcaps
D
rop caps are used to emphasize the leading paragraph at the start of a section. In Word it is possible to specify how many lines of text a drop-cap should use. Because of limitations in ebook technology, this is not possible when converting. Instead, the converted drop cap will use font size and line height to simulate the effect as well as possible. While not as good as the original, the result is usually tolerable. This paragraph has a “D” dropcap set to occupy three lines of text with a font size of 58.5 pts. Depending on the screen width and capabilities of the device you view the book on, this dropcap can look anything from perfect to ugly.
## <a name="_Toc359077860"></a>Links
Two kinds of links are possible, those that refer to an external website and those that refer to locations inside the document itself. Both are supported by calibre. For example, here is a link pointing to the [calibre download page](http://calibre-ebook.com/download). Then we have a link that points back to the section on [paragraph level formatting](#_Paragraph_level_formatting) in this document.
## <a name="_Toc359077861"></a>Table of Contents
There are two approaches that calibre takes when generating a Table of Contents. The first is if the Word document has a Table of Contents itself. Provided that the Table of Contents uses hyperlinks, calibre will automatically use it. The levels of the Table of Contents are identified by their left indent, so if you want the ebook to have a multi-level Table of Contents, make sure you create a properly indented Table of Contents in Word.

View File

@@ -0,0 +1,30 @@
If no Table of Contents is found in the document, then a table of contents is automatically generated from the headings in the document. A heading is identified as something that has the Heading 1 or Heading 2, etc. style applied to it. These headings are turned into a Table of Contents with Heading 1 being the topmost level, Heading 2 the second level and so on.
You can see the Table of Contents created by calibre by clicking the Table of Contents button in whatever viewer you are using to view the converted ebook.
# <a name="_Toc359077862"></a>Images
Images can be of three main types. Inline images are images that are part of the normal text flow, like this image of a green dot ![dot_green.png](./media/image2.png). Inline images do not cause breaks in the text and are usually small in size. The next category of image is a floating image, one that “floats “ on the page and is surrounded by text. Word supports more types of floating images than are possible with current ebook technology, so the conversion maps floating images to simple left and right floats, as you can see with the left and right arrow images on the sides of this paragraph.
The final type of image is a “block” image, one that becomes a paragraph on its own and has no text on either side. Below is a centered green dot.
Centered images like this are useful for large pictures that should be a focus of attention.
Generally, it is not possible to translate the exact positioning of images from a Word document to an ebook. That is because in Word, image positioning is specified in absolute units from the page boundaries. There is no analogous technology in ebooks, so the conversion will usually end up placing the image either centered or floating close to the point in the text where it was inserted, not necessarily where it appears on the page in Word.
# <a name="_Toc359077863"></a>Lists
All types of lists are supported by the conversion, with the exception of lists that use fancy bullets, these get converted to regular bullets.
## <a name="_Toc359077864"></a>Bulleted List
- One
- Two
## <a name="_Toc359077865"></a>Numbered List
1. One, with a very long line to demonstrate that the hanging indent for the list is working correctly
2. Two

View File

@@ -0,0 +1,37 @@
## <a name="_Toc359077866"></a>Multi-level Lists
1. One
2. Two
3. Three
4. Four with a very long line to demonstrate that the hanging indent for the list is working correctly.
5. Five
6. Six
A Multi-level list with bullets:
- One
- Two
- This bullet uses an image as the bullet item
- Four
- Five
## <a name="_Toc359077867"></a>Continued Lists
i. One
j. Two
An interruption in our regularly scheduled listing, for this essential and very relevant public service announcement.
k. We now resume our normal programming
l. Four

View File

@@ -0,0 +1,182 @@
# DOCX Demo
# <a name="OLE_LINK1"></a><a name="OLE_LINK2"></a><a name="_Toc359077851"></a>Demonstration of DOCX support in calibre
This document demonstrates the ability of the calibre DOCX Input plugin to convert the various typographic features in a Microsoft Word (2007 and newer) document. Convert this document to a modern ebook format, such as AZW3 for Kindles or EPUB for other ebook readers, to see it in action.
There is support for images, tables, lists, footnotes, endnotes, links, dropcaps and various types of text and paragraph level formatting.
To see the DOCX conversion in action, simply add this file to calibre using the **“Add Books” **button and then click “**Convert”. ** Set the output format in the top right corner of the conversion dialog to EPUB or AZW3 and click **“OK”**.
# <a name="_Toc359077852"></a>Text Formatting
## <a name="_Toc359077853"></a>Inline formatting
Here, we demonstrate various types of inline text formatting and the use of embedded fonts.
Here is some **bold, ***italic, ****bold-italic, ***__underlined __and ~~struck out ~~ text. Then, we have a superscript and a subscript. Now we see some red, green and blue text. Some text with a yellow highlight. Some text in a box. Some text in inverse video.
A paragraph with styled text: subtle emphasis followed by strong text and intense emphasis. This paragraph uses document wide styles for styling rather than inline text properties as demonstrated in the previous paragraph — calibre can handle both with equal ease.
## <a name="_Toc359077854"></a>Fun with fonts
This document has embedded the Ubuntu font family. The body text is in the Ubuntu typeface, here is some text in the Ubuntu Mono typeface, notice how every letter has the same width, even i and m. Every embedded font will automatically be embedded in the output ebook during conversion.
## ***<a name="_Paragraph_level_formatting"></a>******<a name="_Toc359077855"></a>******Paragraph level formatting***
You can do crazy things with paragraphs, if the urge strikes you. For instance this paragraph is right aligned and has a right border. It has also been given a light gray background.
For the lovers of poetry amongst you, paragraphs with hanging indents, like this often come in handy. You can use hanging indents to ensure that a line of poetry retains its individual identity as a line even when the screen is too narrow to display it as a single line. Not only does this paragraph have a hanging indent, it is also has an extra top margin, setting it apart from the preceding paragraph.
# <a name="_Toc359077856"></a>Tables
| | |
| ----------- | -------- |
| ITEM | NEEDED |
| Books | 1 |
| Pens | 3 |
| Pencils | 2 |
| Highlighter | 2 colors |
| Scissors | 1 pair |
Tables in Word can vary from the extremely simple to the extremely complex. calibre tries to do its best when converting tables. While you may run into trouble with the occasional table, the vast majority of common cases should be converted very well, as demonstrated in this section. Note that for optimum results, when creating tables in Word, you should set their widths using percentages, rather than absolute units. To the left of this paragraph is a floating two column table with a nice green border and header row.
Now lets look at a fancier table—one with alternating row colors and partial borders. This table is stretched out to take 100% of the available width.
| | | | | | |
| ------------ | ------- | ------- | ------- | ------- | ------- |
| City or Town | Point A | Point B | Point C | Point D | Point E |
| Point A | — | | | | |
| Point B | 87 | — | | | |
| Point C | 64 | 56 | — | | |
| Point D | 37 | 32 | 91 | — | |
| Point E | 93 | 35 | 54 | 43 | — |
Next, we see a table with special formatting in various locations. Notice how the formatting for the header row and sub header rows is preserved.
| | | | |
| ---------------- | ------------- | ------------------- | ------ |
| College | New students | Graduating students | Change |
| | Undergraduate | | |
| Cedar University | 110 | 103 | +7 |
| Oak Institute | 202 | 210 | -8 |
| | Graduate | | |
| Cedar University | 24 | 20 | +4 |
| Elm College | 43 | 53 | -10 |
| Total | 998 | 908 | 90 |
Source: Fictitious data, for illustration purposes only
Next, we have something a little more complex, a nested table, i.e. a table inside another table. Additionally, the inner table has some of its cells merged. The table is displayed horizontally centered.
| | |
| --- | -------------------------------------------------------------- |
| | To the left is a table inside a table, with some cells merged. |
We end with a fancy calendar, note how much of the original formatting is preserved. Note that this table will only display correctly on relatively wide screens. In general, very wide tables or tables whose cells have fixed width requirements dont fare well in ebooks.
| | | | | | | | | | | | | |
| ------------- | | --- | | --- | | --- | | --- | | --- | | --- |
| December 2007 | | | | | | | | | | | | |
| Sun | | Mon | | Tue | | Wed | | Thu | | Fri | | Sat |
| | | | | | | | | | | | | 1 |
| | | | | | | | | | | | | |
| 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 |
| | | | | | | | | | | | | |
| 9 | | 10 | | 11 | | 12 | | 13 | | 14 | | 15 |
| | | | | | | | | | | | | |
| 16 | | 17 | | 18 | | 19 | | 20 | | 21 | | 22 |
| | | | | | | | | | | | | |
| 23 | | 24 | | 25 | | 26 | | 27 | | 28 | | 29 |
| | | | | | | | | | | | | |
| 30 | | 31 | | | | | | | | | | |
# <a name="_Toc359077857"></a>Structural Elements
Miscellaneous structural elements you can add to your document, like footnotes, endnotes, dropcaps and the like.
## <a name="_Toc359077858"></a>Footnotes & Endnotes
Footnotes and endnotes are automatically recognized and both are converted to endnotes, with backlinks for maximum ease of use in ebook devices.
## <a name="_Toc359077859"></a>Dropcaps
D
rop caps are used to emphasize the leading paragraph at the start of a section. In Word it is possible to specify how many lines of text a drop-cap should use. Because of limitations in ebook technology, this is not possible when converting. Instead, the converted drop cap will use font size and line height to simulate the effect as well as possible. While not as good as the original, the result is usually tolerable. This paragraph has a “D” dropcap set to occupy three lines of text with a font size of 58.5 pts. Depending on the screen width and capabilities of the device you view the book on, this dropcap can look anything from perfect to ugly.
## <a name="_Toc359077860"></a>Links
Two kinds of links are possible, those that refer to an external website and those that refer to locations inside the document itself. Both are supported by calibre. For example, here is a link pointing to the [calibre download page](http://calibre-ebook.com/download). Then we have a link that points back to the section on [paragraph level formatting](#_Paragraph_level_formatting) in this document.
## <a name="_Toc359077861"></a>Table of Contents
There are two approaches that calibre takes when generating a Table of Contents. The first is if the Word document has a Table of Contents itself. Provided that the Table of Contents uses hyperlinks, calibre will automatically use it. The levels of the Table of Contents are identified by their left indent, so if you want the ebook to have a multi-level Table of Contents, make sure you create a properly indented Table of Contents in Word.
If no Table of Contents is found in the document, then a table of contents is automatically generated from the headings in the document. A heading is identified as something that has the Heading 1 or Heading 2, etc. style applied to it. These headings are turned into a Table of Contents with Heading 1 being the topmost level, Heading 2 the second level and so on.
You can see the Table of Contents created by calibre by clicking the Table of Contents button in whatever viewer you are using to view the converted ebook.
# <a name="_Toc359077862"></a>Images
Images can be of three main types. Inline images are images that are part of the normal text flow, like this image of a green dot ![dot_green.png](./media/image2.png). Inline images do not cause breaks in the text and are usually small in size. The next category of image is a floating image, one that “floats “ on the page and is surrounded by text. Word supports more types of floating images than are possible with current ebook technology, so the conversion maps floating images to simple left and right floats, as you can see with the left and right arrow images on the sides of this paragraph.
The final type of image is a “block” image, one that becomes a paragraph on its own and has no text on either side. Below is a centered green dot.
Centered images like this are useful for large pictures that should be a focus of attention.
Generally, it is not possible to translate the exact positioning of images from a Word document to an ebook. That is because in Word, image positioning is specified in absolute units from the page boundaries. There is no analogous technology in ebooks, so the conversion will usually end up placing the image either centered or floating close to the point in the text where it was inserted, not necessarily where it appears on the page in Word.
# <a name="_Toc359077863"></a>Lists
All types of lists are supported by the conversion, with the exception of lists that use fancy bullets, these get converted to regular bullets.
## <a name="_Toc359077864"></a>Bulleted List
- One
- Two
## <a name="_Toc359077865"></a>Numbered List
1. One, with a very long line to demonstrate that the hanging indent for the list is working correctly
2. Two
## <a name="_Toc359077866"></a>Multi-level Lists
1. One
2. Two
3. Three
4. Four with a very long line to demonstrate that the hanging indent for the list is working correctly.
5. Five
6. Six
A Multi-level list with bullets:
- One
- Two
- This bullet uses an image as the bullet item
- Four
- Five
## <a name="_Toc359077867"></a>Continued Lists
i. One
j. Two
An interruption in our regularly scheduled listing, for this essential and very relevant public service announcement.
k. We now resume our normal programming
l. Four

View File

@@ -0,0 +1,7 @@
#include <stdio.h>
int main() {
printf("Hello, World!\n");
return 0;
}

View File

@@ -0,0 +1 @@
#include <stdio.h>

View File

@@ -0,0 +1,4 @@
int main() {
printf("Hello, World!\n");
return 0;
}

View File

@@ -0,0 +1,481 @@
<!doctype html>
<!-- saved from url=(0020)https://example.org/ -->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Example Domain</title>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style type="text/css">
body {
background-color: #f0f0f2;
margin: 0;
padding: 0;
font-family: -apple-system, system-ui, BlinkMacSystemFont, 'Segoe UI',
'Open Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
}
div {
width: 600px;
margin: 5em auto;
padding: 2em;
background-color: #fdfdff;
border-radius: 0.5em;
box-shadow: 2px 3px 7px 2px rgba(0, 0, 0, 0.02);
}
a:link,
a:visited {
color: #38488f;
text-decoration: none;
}
@media (max-width: 700px) {
div {
margin: 0 auto;
width: auto;
}
}
</style>
</head>
<body>
<div>
<h1>Example Domain</h1>
<p>
This domain is for use in illustrative examples in documents. You may
use this domain in literature without prior coordination or asking for
permission.
</p>
<p>
<a
href="https://www.iana.org/domains/example"
rel="noreferrer"
data-ss1736873651="1"
>More information...</a
>
</p>
</div>
</body>
<script type="text/javascript">
(function (
canvas,
canvasfont,
audioblock,
battery,
webgl,
webrtcdevice,
gamepad,
webvr,
bluetooth,
timezone,
clientrects,
clipboard,
browserplugins
) {
function processFunctions(scope) {
/* Browser Plugins */
if (browserplugins == 'true') {
scope.Object.defineProperty(navigator, 'plugins', {
enumerable: true,
configurable: true,
get: function () {
var browserplugins_triggerblock =
scope.document.createElement('div');
browserplugins_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_browserplugins';
browserplugins_triggerblock.title = 'navigator.plugins';
document.documentElement.appendChild(browserplugins_triggerblock);
return '';
},
});
}
/* Canvas */
if (canvas != 'false') {
var fakecanvas = scope.document.createElement('canvas');
fakecanvas.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_canvas';
if (canvas == 'random') {
var fakewidth = (fakecanvas.width =
Math.floor(Math.random() * 999) + 1);
var fakeheight = (fakecanvas.height =
Math.floor(Math.random() * 999) + 1);
}
var canvas_a = scope.HTMLCanvasElement;
var origToDataURL = canvas_a.prototype.toDataURL;
var origToBlob = canvas_a.prototype.toBlob;
canvas_a.prototype.toDataURL = function () {
fakecanvas.title = 'toDataURL';
document.documentElement.appendChild(fakecanvas);
if (canvas == 'block') return false;
else if (canvas == 'blank') {
fakecanvas.width = this.width;
fakecanvas.height = this.height;
return origToDataURL.apply(fakecanvas, arguments);
} else if (canvas == 'random') {
return origToDataURL.apply(fakecanvas, arguments);
}
};
canvas_a.prototype.toBlob = function () {
fakecanvas.title = 'toBlob';
document.documentElement.appendChild(fakecanvas);
if (canvas == 'block') return false;
else if (canvas == 'blank') {
fakecanvas.width = this.width;
fakecanvas.height = this.height;
return origToBlob.apply(fakecanvas, arguments);
} else if (canvas == 'random') {
return origToBlob.apply(fakecanvas, arguments);
}
};
var canvas_b = scope.CanvasRenderingContext2D;
var origGetImageData = canvas_b.prototype.getImageData;
canvas_b.prototype.getImageData = function () {
fakecanvas.title = 'getImageData';
document.documentElement.appendChild(fakecanvas);
if (canvas == 'block') return false;
else if (canvas == 'blank') {
fakecanvas.width = this.width;
fakecanvas.height = this.height;
return origGetImageData.apply(
fakecanvas.getContext('2d'),
arguments
);
} else if (canvas == 'random') {
return origGetImageData.apply(fakecanvas.getContext('2d'), [
Math.floor(Math.random() * fakewidth) + 1,
Math.floor(Math.random() * fakeheight) + 1,
Math.floor(Math.random() * fakewidth) + 1,
Math.floor(Math.random() * fakeheight) + 1,
]);
}
};
var origGetLineDash = canvas_b.prototype.getLineDash;
canvas_b.prototype.getLineDash = function () {
fakecanvas.title = 'getLineDash';
document.documentElement.appendChild(fakecanvas);
if (canvas == 'block') return false;
else if (canvas == 'blank') {
fakecanvas.width = this.width;
fakecanvas.height = this.height;
return origGetLineDash.apply(fakecanvas.getContext('2d'), [0, 0]);
} else if (canvas == 'random') {
return origGetLineDash.apply(fakecanvas.getContext('2d'), [
Math.floor(Math.random() * fakewidth) + 1,
Math.floor(Math.random() * fakeheight) + 1,
]);
}
};
var canvas_c = scope.WebGLRenderingContext;
var origReadPixels = canvas_c.prototype.readPixels;
canvas_c.prototype.readPixels = function () {
fakecanvas.title = 'readPixels';
document.documentElement.appendChild(fakecanvas);
if (canvas == 'block') return false;
else if (canvas == 'blank') {
fakecanvas.width = this.width;
fakecanvas.height = this.height;
return origReadPixels.apply(
fakecanvas.getContext('webgl'),
arguments
);
} else if (canvas == 'random') {
return origReadPixels.apply(fakecanvas.getContext('webgl'), [
Math.floor(Math.random() * fakewidth) + 1,
Math.floor(Math.random() * fakeheight) + 1,
Math.floor(Math.random() * fakewidth) + 1,
Math.floor(Math.random() * fakeheight) + 1,
arguments[4],
arguments[5],
arguments[6],
]);
}
};
}
/* Audio Block */
if (audioblock == 'true') {
var audioblock_triggerblock = scope.document.createElement('div');
audioblock_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_audio';
var audioblock_a = scope.AudioBuffer;
audioblock_a.prototype.copyFromChannel = function () {
audioblock_triggerblock.title = 'copyFromChannel';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
audioblock_a.prototype.getChannelData = function () {
audioblock_triggerblock.title = 'getChannelData';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
var audioblock_b = scope.AnalyserNode;
audioblock_b.prototype.getFloatFrequencyData = function () {
audioblock_triggerblock.title = 'getFloatFrequencyData';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
audioblock_b.prototype.getByteFrequencyData = function () {
audioblock_triggerblock.title = 'getByteFrequencyData';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
audioblock_b.prototype.getFloatTimeDomainData = function () {
audioblock_triggerblock.title = 'getFloatTimeDomainData';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
audioblock_b.prototype.getByteTimeDomainData = function () {
audioblock_triggerblock.title = 'getByteTimeDomainData';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
var audioblock_c = scope;
audioblock_c.AudioContext = function () {
audioblock_triggerblock.title = 'AudioContext';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
audioblock_c.webkitAudioContext = function () {
audioblock_triggerblock.title = 'webkitAudioContext';
document.documentElement.appendChild(audioblock_triggerblock);
return false;
};
}
/* Canvas Font */
if (canvasfont == 'true') {
var canvasfont_triggerblock = scope.document.createElement('div');
canvasfont_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_canvasfont';
var canvasfont_a = scope.CanvasRenderingContext2D;
canvasfont_a.prototype.measureText = function () {
canvasfont_triggerblock.title = 'measureText';
document.documentElement.appendChild(canvasfont_triggerblock);
return false;
};
}
/* Battery */
if (battery == 'true') {
var battery_triggerblock = scope.document.createElement('div');
battery_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_battery';
var battery_a = scope.navigator;
battery_a.getBattery = function () {
battery_triggerblock.title = 'getBattery';
document.documentElement.appendChild(battery_triggerblock);
return void 0;
};
}
/* WebGL */
if (webgl == 'true') {
var webgl_triggerblock = scope.document.createElement('div');
webgl_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webgl';
var webgl_a = scope.HTMLCanvasElement;
var origGetContext = webgl_a.prototype.getContext;
webgl_a.prototype.getContext = function (arg) {
if (arg.match(/webgl/i)) {
webgl_triggerblock.title = 'getContext';
document.documentElement.appendChild(webgl_triggerblock);
return false;
}
return origGetContext.apply(this, arguments);
};
}
/* WebRTC */
if (webrtcdevice == 'true') {
var webrtc_triggerblock = scope.document.createElement('div');
webrtc_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webrtc';
var webrtc_a = scope.MediaStreamTrack;
webrtc_a.getSources = function () {
webrtc_triggerblock.title = 'getSources';
document.documentElement.appendChild(webrtc_triggerblock);
return false;
};
webrtc_a.getMediaDevices = function () {
webrtc_triggerblock.title = 'getMediaDevices';
document.documentElement.appendChild(webrtc_triggerblock);
return false;
};
var webrtc_b = scope.navigator.mediaDevices;
webrtc_b.enumerateDevices = function () {
webrtc_triggerblock.title = 'enumerateDevices';
document.documentElement.appendChild(webrtc_triggerblock);
return false;
};
}
/* Gamepad */
if (gamepad == 'true') {
var gamepad_triggerblock = scope.document.createElement('div');
gamepad_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_gamepad';
var gamepad_a = scope.navigator;
gamepad_a.getGamepads = function () {
gamepad_triggerblock.title = 'getGamepads';
document.documentElement.appendChild(gamepad_triggerblock);
return false;
};
}
/* WebVR */
if (webvr == 'true') {
var webvr_triggerblock = scope.document.createElement('div');
webvr_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webvr';
var webvr_a = scope.navigator;
webvr_a.getVRDisplays = function () {
webvr_triggerblock.title = 'getVRDisplays';
document.documentElement.appendChild(webvr_triggerblock);
return false;
};
}
/* Bluetooth */
if (bluetooth == 'true') {
if (scope.navigator.bluetooth) {
var bluetooth_triggerblock = scope.document.createElement('div');
bluetooth_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_bluetooth';
var bluetooth_a = scope.navigator.bluetooth;
bluetooth_a.requestDevice = function () {
bluetooth_triggerblock.title = 'requestDevice';
document.documentElement.appendChild(bluetooth_triggerblock);
return false;
};
}
}
/* Client Rectangles */
if (clientrects == 'true') {
var clientrects_triggerblock = scope.document.createElement('div');
clientrects_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_clientrects';
Element.prototype.getClientRects = function () {
clientrects_triggerblock.title = 'getClientRects';
document.documentElement.appendChild(clientrects_triggerblock);
return [
{ top: 0, bottom: 0, left: 0, right: 0, height: 0, width: 0 },
];
};
}
/* Timezone */
if (timezone != 'false') {
var timezone_triggerblock = scope.document.createElement('div');
timezone_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_timezone';
var timezone_a = scope.Date;
timezone_a.prototype.getTimezoneOffset = function () {
timezone_triggerblock.title = 'getTimezoneOffset';
document.documentElement.appendChild(timezone_triggerblock);
if (timezone == 'random')
return [
'720',
'660',
'600',
'570',
'540',
'480',
'420',
'360',
'300',
'240',
'210',
'180',
'120',
'60',
'0',
'-60',
'-120',
'-180',
'-210',
'-240',
'-270',
'-300',
'-330',
'-345',
'-360',
'-390',
'-420',
'-480',
'-510',
'-525',
'-540',
'-570',
'-600',
'-630',
'-660',
'-720',
'-765',
'-780',
'-840',
][Math.floor(Math.random() * 39)];
return timezone;
};
}
/* Clipboard */
if (clipboard == 'true') {
var clipboard_triggerblock = scope.document.createElement('div');
clipboard_triggerblock.className =
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_clipboard';
var clipboard_a = document;
var origExecCommand = clipboard_a.execCommand;
clipboard_a.execCommand = function () {
clipboard_triggerblock.title = 'execCommand';
document.documentElement.appendChild(clipboard_triggerblock);
if (arguments[0] == 'cut' || arguments[0] == 'copy') return false;
return origExecCommand.apply(this, arguments);
};
}
}
processFunctions(window);
var iwin = HTMLIFrameElement.prototype.__lookupGetter__('contentWindow'),
idoc = HTMLIFrameElement.prototype.__lookupGetter__('contentDocument');
Object.defineProperties(HTMLIFrameElement.prototype, {
contentWindow: {
get: function () {
var frame = iwin.apply(this);
if (
this.src &&
this.src.indexOf('//') != -1 &&
location.host != this.src.split('/')[2]
)
return frame;
try {
frame.HTMLCanvasElement;
} catch (err) {
/* do nothing*/
}
processFunctions(frame);
return frame;
},
},
contentDocument: {
get: function () {
if (
this.src &&
this.src.indexOf('//') != -1 &&
location.host != this.src.split('/')[2]
)
return idoc.apply(this);
var frame = iwin.apply(this);
try {
frame.HTMLCanvasElement;
} catch (err) {
/* do nothing*/
}
processFunctions(frame);
return idoc.apply(this);
},
},
});
})(
'block',
'true',
'true',
'true',
'true',
'true',
'true',
'true',
'true',
'false',
'true',
'true',
'true'
);
</script>
</html>

View File

@@ -0,0 +1,6 @@
Example Domain
This domain is for use in illustrative examples in documents. You may
use this domain in literature without prior coordination or asking for
permission.
More information...

Binary file not shown.

View File

@@ -0,0 +1,17 @@
Sample PDF
This is a simple PDF file. Fun fun fun.
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Phasellus facilisis odio sed mi.
Curabitur suscipit. Nullam vel nisi. Etiam semper ipsum ut lectus. Proin aliquam, erat eget
pharetra commodo, eros mi condimentum quam, sed commodo justo quam ut velit.
Integer a erat. Cras laoreet ligula cursus enim. Aenean scelerisque velit et tellus.
Vestibulum dictum aliquet sem. Nulla facilisi. Vestibulum accumsan ante vitae elit. Nulla
erat dolor, blandit in, rutrum quis, semper pulvinar, enim. Nullam varius congue risus.
Vivamus sollicitudin, metus ut interdum eleifend, nisi tellus pellentesque elit, tristique
accumsan eros quam et risus. Suspendisse libero odio, mattis sit amet, aliquet eget,
hendrerit vel, nulla. Sed vitae augue. Aliquam erat volutpat. Aliquam feugiat vulputate nisl.
Suspendisse quis nulla pretium ante pretium mollis. Proin velit ligula, sagittis at, egestas a,
pulvinar quis, nisl.

View File

@@ -0,0 +1,9 @@
Pellentesque sit amet lectus. Praesent pulvinar, nunc quis iaculis sagittis, justo quam
lobortis tortor, sed vestibulum dui metus venenatis est. Nunc cursus ligula. Nulla facilisi.
Phasellus ullamcorper consectetuer ante. Duis tincidunt, urna id condimentum luctus, nibh
ante vulputate sapien, id sagittis massa orci ut enim. Pellentesque vestibulum convallis
sem. Nulla consequat quam ut nisl. Nullam est. Curabitur tincidunt dapibus lorem. Proin
velit turpis, scelerisque sit amet, iaculis nec, rhoncus ac, ipsum. Phasellus lorem arcu,
feugiat eu, gravida eu, consequat molestie, ipsum. Nullam vel est ut ipsum volutpat
feugiat. Aenean pellentesque.

View File

@@ -0,0 +1,16 @@
In mauris. Pellentesque dui nisi, iaculis eu, rhoncus in, venenatis ac, ante. Ut odio justo,
scelerisque vel, facilisis non, commodo a, pede. Cras nec massa sit amet tortor volutpat
varius. Donec lacinia, neque a luctus aliquet, pede massa imperdiet ante, at varius lorem
pede sed sapien. Fusce erat nibh, aliquet in, eleifend eget, commodo eget, erat. Fusce
consectetuer. Cras risus tortor, porttitor nec, tristique sed, convallis semper, eros. Fusce
vulputate ipsum a mauris. Phasellus mollis. Curabitur sed urna. Aliquam nec sapien non
nibh pulvinar convallis. Vivamus facilisis augue quis quam. Proin cursus aliquet metus.
Suspendisse lacinia. Nulla at tellus ac turpis eleifend scelerisque. Maecenas a pede vitae
enim commodo interdum. Donec odio. Sed sollicitudin dui vitae justo.
Morbi elit nunc, facilisis a, mollis a, molestie at, lectus. Suspendisse eget mauris eu tellus
molestie cursus. Duis ut magna at justo dignissim condimentum. Cum sociis natoque
penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vivamus varius. Ut sit
amet diam suscipit mauris ornare aliquam. Sed varius. Duis arcu. Etiam tristique massa
eget dui. Phasellus congue. Aenean est erat, tincidunt eget, venenatis quis, commodo at,
quam.

View File

@@ -0,0 +1,10 @@
fn factorial(n: u64) -> u64 {
if n == 0 {
return 1;
}
n * factorial(n - 1)
}
fn main() {
println!("Hello, world!");
}

View File

@@ -0,0 +1,6 @@
fn factorial(n: u64) -> u64 {
if n == 0 {
return 1;
}
n * factorial(n - 1)
}

View File

@@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View File

@@ -0,0 +1,3 @@
export default function sample() {
return 'sample';
}

View File

@@ -0,0 +1,3 @@
export default function sample() {
return 'sample';
}

View File

@@ -0,0 +1,169 @@
use std::{io::Cursor, path::PathBuf};
use path_ext::PathExt;
use super::*;
#[derive(Clone, Default)]
pub struct Chunk {
pub index: usize,
pub content: String,
pub start: Option<usize>,
pub end: Option<usize>,
}
pub struct DocOptions {
code_threshold: u64,
}
impl Default for DocOptions {
fn default() -> Self {
Self {
code_threshold: 1000,
}
}
}
pub struct Doc {
pub name: String,
pub chunks: Vec<Chunk>,
}
impl Doc {
pub fn new(file_path: &str, doc: &[u8]) -> Option<Self> {
Self::with_options(file_path, doc, DocOptions::default())
}
pub fn with_options(file_path: &str, doc: &[u8], options: DocOptions) -> Option<Self> {
if let Some(kind) =
infer::get(&doc[..4096.min(doc.len())]).or(infer::get_from_path(file_path).ok().flatten())
{
if kind.extension() == "pdf" {
return Self::load_pdf(file_path, doc);
} else if kind.extension() == "docx" {
return Self::load_docx(file_path, doc);
} else if kind.extension() == "html" {
return Self::load_html(file_path, doc);
}
} else if let Ok(string) = String::from_utf8(doc.to_vec()).or_else(|_| {
String::from_utf16(
&doc
.chunks_exact(2)
.map(|b| u16::from_le_bytes([b[0], b[1]]))
.collect::<Vec<_>>(),
)
}) {
let path = PathBuf::from(file_path);
match path.ext_str() {
"md" => {
let loader = TextLoader::new(string);
let splitter = MarkdownSplitter::default();
return Self::from_loader(file_path, loader, splitter).ok();
}
"rs" | "c" | "cpp" | "h" | "hpp" | "js" | "ts" | "tsx" | "go" | "py" => {
let name = path.full_str().to_string();
let loader =
SourceCodeLoader::from_string(string).with_parser_option(LanguageParserOptions {
language: get_language_by_filename(&name).ok()?,
parser_threshold: options.code_threshold,
});
let splitter = TokenSplitter::default();
return Self::from_loader(file_path, loader, splitter).ok();
}
_ => {}
}
let loader = TextLoader::new(string);
let splitter = TokenSplitter::default();
return Self::from_loader(file_path, loader, splitter).ok();
}
None
}
fn from_loader(
file_path: &str,
loader: impl Loader,
splitter: impl TextSplitter + 'static,
) -> Result<Doc, LoaderError> {
let name = file_path.to_string();
let chunks = Self::get_chunks_from_loader(loader, splitter)?;
Ok(Self { name, chunks })
}
fn get_chunks_from_loader(
loader: impl Loader,
splitter: impl TextSplitter + 'static,
) -> Result<Vec<Chunk>, LoaderError> {
let docs = loader.load_and_split(splitter)?;
Ok(
docs
.into_iter()
.enumerate()
.map(|(index, d)| Chunk {
index,
content: d.page_content,
..Chunk::default()
})
.collect(),
)
}
fn load_docx(file_path: &str, doc: &[u8]) -> Option<Self> {
let loader = DocxLoader::new(Cursor::new(doc))?;
let splitter = TokenSplitter::default();
Self::from_loader(file_path, loader, splitter).ok()
}
fn load_html(file_path: &str, doc: &[u8]) -> Option<Self> {
let loader = HtmlLoader::from_string(
String::from_utf8(doc.to_vec()).ok()?,
Url::parse(file_path)
.or(Url::parse("https://example.com/"))
.ok()?,
);
let splitter = TokenSplitter::default();
Self::from_loader(file_path, loader, splitter).ok()
}
fn load_pdf(file_path: &str, doc: &[u8]) -> Option<Self> {
let loader = PdfExtractLoader::new(Cursor::new(doc)).ok()?;
let splitter = TokenSplitter::default();
Self::from_loader(file_path, loader, splitter).ok()
}
}
#[cfg(test)]
mod tests {
use std::{
fs::{read, read_to_string},
path::PathBuf,
};
use super::*;
const FIXTURES: [&str; 6] = [
"demo.docx",
"sample.pdf",
"sample.html",
"sample.rs",
"sample.c",
"sample.ts",
];
fn get_fixtures() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures")
}
#[test]
fn test_fixtures() {
let fixtures = get_fixtures();
for fixture in FIXTURES.iter() {
let buffer = read(fixtures.join(fixture)).unwrap();
let doc = Doc::with_options(fixture, &buffer, DocOptions { code_threshold: 0 }).unwrap();
for chunk in doc.chunks.iter() {
let output =
read_to_string(fixtures.join(format!("{}.{}.md", fixture, chunk.index))).unwrap();
assert_eq!(chunk.content, output);
}
}
}
}

View File

@@ -0,0 +1,71 @@
use docx_parser::MarkdownDocument;
use super::*;
#[derive(Debug)]
pub struct DocxLoader {
document: MarkdownDocument,
}
impl DocxLoader {
pub fn new<R: Read + Seek>(reader: R) -> Option<Self> {
Some(Self {
document: MarkdownDocument::from_reader(reader)?,
})
}
fn extract_text(&self) -> String {
self.document.to_markdown(false)
}
fn extract_text_to_doc(&self) -> Document {
Document::new(self.extract_text())
}
}
impl Loader for DocxLoader {
fn load(self) -> Result<Vec<Document>, LoaderError> {
let doc = self.extract_text_to_doc();
Ok(vec![doc])
}
}
#[cfg(test)]
mod tests {
use std::{fs::read, io::Cursor, path::PathBuf};
use super::*;
fn get_fixtures_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures")
}
#[test]
fn test_parse_docx() {
let docx_buffer = include_bytes!("../../../fixtures/demo.docx");
let parsed_buffer = include_str!("../../../fixtures/demo.docx.md");
{
let loader = DocxLoader::new(Cursor::new(docx_buffer)).unwrap();
let documents = loader.load().unwrap();
assert_eq!(documents.len(), 1);
assert_eq!(documents[0].page_content, parsed_buffer);
}
{
let loader = DocxLoader::new(Cursor::new(docx_buffer)).unwrap();
let documents = loader.load_and_split(TokenSplitter::default()).unwrap();
for (idx, doc) in documents.into_iter().enumerate() {
assert_eq!(
doc.page_content,
String::from_utf8_lossy(
&read(get_fixtures_path().join(format!("demo.docx.{}.md", idx))).unwrap()
)
);
}
}
}
}

View File

@@ -0,0 +1,42 @@
use std::{io, str::Utf8Error, string::FromUtf8Error};
use thiserror::Error;
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
use super::*;
#[derive(Error, Debug)]
pub enum LoaderError {
#[error("{0}")]
TextSplitterError(#[from] TextSplitterError),
#[error(transparent)]
IOError(#[from] io::Error),
#[error(transparent)]
Utf8Error(#[from] Utf8Error),
#[error(transparent)]
FromUtf8Error(#[from] FromUtf8Error),
#[cfg(feature = "pdf-extract")]
#[error(transparent)]
PdfExtractError(#[from] pdf_extract::Error),
#[cfg(feature = "pdf-extract")]
#[error(transparent)]
PdfExtractOutputError(#[from] pdf_extract::OutputError),
#[error(transparent)]
ReadabilityError(#[from] readability::error::Error),
#[error("Unsupported source language")]
UnsupportedLanguage,
#[error("Error: {0}")]
OtherError(String),
}
pub type LoaderResult<T> = Result<T, LoaderError>;

View File

@@ -0,0 +1,87 @@
use std::{collections::HashMap, io::Cursor};
use serde_json::Value;
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
use super::*;
#[derive(Debug, Clone)]
pub struct HtmlLoader<R> {
html: R,
url: Url,
}
impl HtmlLoader<Cursor<Vec<u8>>> {
pub fn from_string<S: Into<String>>(input: S, url: Url) -> Self {
let input = input.into();
let reader = Cursor::new(input.into_bytes());
Self::new(reader, url)
}
}
impl<R: Read> HtmlLoader<R> {
pub fn new(html: R, url: Url) -> Self {
Self { html, url }
}
}
impl<R: Read + Send + Sync + 'static> Loader for HtmlLoader<R> {
fn load(mut self) -> Result<Vec<Document>, LoaderError> {
let cleaned_html = readability::extractor::extract(&mut self.html, &self.url)?;
let doc =
Document::new(format!("{}\n{}", cleaned_html.title, cleaned_html.text)).with_metadata(
HashMap::from([("source".to_string(), Value::from(self.url.as_str()))]),
);
Ok(vec![doc])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_html_loader() {
let input = "<p>Hello world!</p>";
let html_loader = HtmlLoader::new(
input.as_bytes(),
Url::parse("https://example.com/").unwrap(),
);
let documents = html_loader.load().unwrap();
let expected = "\nHello world!";
assert_eq!(documents.len(), 1);
assert_eq!(
documents[0].metadata.get("source").unwrap(),
&Value::from("https://example.com/")
);
assert_eq!(documents[0].page_content, expected);
}
#[test]
fn test_html_load_from_path() {
let buffer = include_bytes!("../../../fixtures/sample.html");
let html_loader = HtmlLoader::new(
Cursor::new(buffer),
Url::parse("https://example.com/").unwrap(),
);
let documents = html_loader.load().unwrap();
let expected = "Example Domain\n\n This domain is for use in illustrative examples in \
documents. You may\n use this domain in literature without prior \
coordination or asking for\n permission.\n More information...";
assert_eq!(documents.len(), 1);
assert_eq!(
documents[0].metadata.get("source").unwrap(),
&Value::from("https://example.com/")
);
assert_eq!(documents[0].page_content, expected);
}
}

View File

@@ -0,0 +1,33 @@
mod docx;
mod error;
mod html;
mod pdf;
mod source;
mod text;
use std::io::{Read, Seek};
use super::*;
// modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
pub trait Loader: Send + Sync {
fn load(self) -> Result<Vec<Document>, LoaderError>;
fn load_and_split<TS: TextSplitter + 'static>(
self,
splitter: TS,
) -> Result<Vec<Document>, LoaderError>
where
Self: Sized,
{
let docs = self.load()?;
Ok(splitter.split_documents(&docs)?)
}
}
pub use docx::DocxLoader;
pub use error::{LoaderError, LoaderResult};
pub use html::HtmlLoader;
pub use pdf::PdfExtractLoader;
pub use source::{get_language_by_filename, LanguageParserOptions, SourceCodeLoader};
pub use text::TextLoader;
pub use url::Url;

View File

@@ -0,0 +1,70 @@
use pdf_extract::{output_doc, output_doc_encrypted, PlainTextOutput};
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
use super::*;
#[derive(Debug, Clone)]
pub struct PdfExtractLoader {
document: pdf_extract::Document,
}
impl PdfExtractLoader {
pub fn new<R: Read>(reader: R) -> Result<Self, LoaderError> {
let document = pdf_extract::Document::load_from(reader)
.map_err(|e| LoaderError::OtherError(e.to_string()))?;
Ok(Self { document })
}
}
impl PdfExtractLoader {
fn extract_text(&self) -> Result<String, LoaderError> {
let mut doc = self.document.clone();
let mut buffer: Vec<u8> = Vec::new();
let mut output = PlainTextOutput::new(&mut buffer as &mut dyn std::io::Write);
if doc.is_encrypted() {
output_doc_encrypted(&mut doc, &mut output, "")?;
} else {
output_doc(&doc, &mut output)?;
}
Ok(String::from_utf8(buffer)?)
}
fn extract_text_to_doc(&self) -> Result<Document, LoaderError> {
let text = self.extract_text()?;
Ok(Document::new(text))
}
}
impl Loader for PdfExtractLoader {
fn load(self) -> Result<Vec<Document>, LoaderError> {
let doc = self.extract_text_to_doc()?;
Ok(vec![doc])
}
}
#[cfg(test)]
mod tests {
use std::{fs::read, io::Cursor, path::PathBuf};
use super::*;
#[test]
fn test_parse_pdf() {
let fixtures = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures");
let buffer = read(fixtures.join("sample.pdf")).unwrap();
let reader = Cursor::new(buffer);
let loader = PdfExtractLoader::new(reader).expect("Failed to create PdfExtractLoader");
let docs = loader.load().unwrap();
assert_eq!(docs.len(), 1);
assert_eq!(
&docs[0].page_content[..100],
"\n\nSample PDF\nThis is a simple PDF file. Fun fun fun.\n\nLorem ipsum dolor sit amet, \
consectetuer a"
);
}
}

View File

@@ -0,0 +1,61 @@
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
mod parser;
pub use parser::{get_language_by_filename, LanguageParser, LanguageParserOptions};
use super::*;
#[derive(Debug, Clone)]
pub struct SourceCodeLoader {
content: String,
parser_option: LanguageParserOptions,
}
impl SourceCodeLoader {
pub fn from_string<S: Into<String>>(input: S) -> Self {
Self {
content: input.into(),
parser_option: LanguageParserOptions::default(),
}
}
}
impl SourceCodeLoader {
pub fn with_parser_option(mut self, parser_option: LanguageParserOptions) -> Self {
self.parser_option = parser_option;
self
}
}
impl Loader for SourceCodeLoader {
fn load(self) -> Result<Vec<Document>, LoaderError> {
let options = self.parser_option.clone();
let docs = LanguageParser::from_language(options.language)
.with_parser_threshold(options.parser_threshold)
.parse_code(&self.content)?;
Ok(docs)
}
}
#[cfg(test)]
mod tests {
use parser::Language;
use super::*;
#[test]
fn test_source_code_loader() {
let content = include_str!("../../../../fixtures/sample.rs");
let loader = SourceCodeLoader::from_string(content).with_parser_option(LanguageParserOptions {
language: Language::Rust,
..Default::default()
});
let documents_with_content = loader.load().unwrap();
assert_eq!(documents_with_content.len(), 1);
}
}

View File

@@ -0,0 +1,246 @@
use std::{collections::HashMap, fmt::Debug, string::ToString};
use strum_macros::Display;
use tree_sitter::{Parser, Tree};
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
use super::*;
#[derive(Display, Debug, Clone)]
pub enum Language {
Rust,
C,
Cpp,
Javascript,
Typescript,
Go,
Python,
}
pub enum LanguageContentTypes {
SimplifiedCode,
FunctionsImpls,
}
impl std::fmt::Display for LanguageContentTypes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
LanguageContentTypes::SimplifiedCode => "simplified_code",
LanguageContentTypes::FunctionsImpls => "functions_impls",
}
)
}
}
#[derive(Debug, Clone)]
pub struct LanguageParserOptions {
pub parser_threshold: u64,
pub language: Language,
}
impl Default for LanguageParserOptions {
fn default() -> Self {
Self {
parser_threshold: 1000,
language: Language::Rust,
}
}
}
pub struct LanguageParser {
parser: Parser,
parser_options: LanguageParserOptions,
}
impl Debug for LanguageParser {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"LanguageParser {{ language: {:?} }}",
self.parser_options.language
)
}
}
impl Clone for LanguageParser {
fn clone(&self) -> Self {
LanguageParser {
parser: get_language_parser(&self.parser_options.language),
parser_options: self.parser_options.clone(),
}
}
}
pub fn get_language_by_filename(name: &str) -> LoaderResult<Language> {
let extension = name
.split('.')
.last()
.ok_or(LoaderError::UnsupportedLanguage)?;
let language = match extension.to_lowercase().as_str() {
"rs" => Language::Rust,
"c" => Language::C,
"cpp" => Language::Cpp,
"h" => Language::C,
"hpp" => Language::Cpp,
"js" => Language::Javascript,
"ts" => Language::Typescript,
"tsx" => Language::Typescript,
"go" => Language::Go,
"py" => Language::Python,
_ => return Err(LoaderError::UnsupportedLanguage),
};
Ok(language)
}
fn get_language_parser(language: &Language) -> Parser {
let mut parser = Parser::new();
let lang = match language {
Language::Rust => tree_sitter_rust::LANGUAGE,
Language::C => tree_sitter_c::LANGUAGE,
Language::Cpp => tree_sitter_cpp::LANGUAGE,
Language::Javascript => tree_sitter_javascript::LANGUAGE,
Language::Typescript => tree_sitter_typescript::LANGUAGE_TSX,
Language::Go => tree_sitter_go::LANGUAGE,
Language::Python => tree_sitter_python::LANGUAGE,
};
parser
.set_language(&lang.into())
.unwrap_or_else(|_| panic!("Error loading grammar for language: {:?}", language));
parser
}
impl LanguageParser {
pub fn from_language(language: Language) -> Self {
Self {
parser: get_language_parser(&language),
parser_options: LanguageParserOptions {
language,
..LanguageParserOptions::default()
},
}
}
pub fn with_parser_threshold(mut self, threshold: u64) -> Self {
self.parser_options.parser_threshold = threshold;
self
}
}
impl LanguageParser {
pub fn parse_code(&mut self, code: &String) -> LoaderResult<Vec<Document>> {
let tree = self
.parser
.parse(code, None)
.ok_or(LoaderError::UnsupportedLanguage)?;
if self.parser_options.parser_threshold > tree.root_node().end_position().row as u64 {
return Ok(vec![Document::new(code).with_metadata(HashMap::from([
(
"content_type".to_string(),
serde_json::Value::from(LanguageContentTypes::SimplifiedCode.to_string()),
),
(
"language".to_string(),
serde_json::Value::from(self.parser_options.language.to_string()),
),
]))]);
}
self.extract_functions_classes(tree, code)
}
pub fn extract_functions_classes(
&self,
tree: Tree,
code: &String,
) -> LoaderResult<Vec<Document>> {
let mut chunks = Vec::new();
let count = tree.root_node().child_count();
for i in 0..count {
let Some(node) = tree.root_node().child(i) else {
continue;
};
let source_code = node.utf8_text(code.as_bytes())?.to_string();
let lang_meta = (
"language".to_string(),
serde_json::Value::from(self.parser_options.language.to_string()),
);
if node.kind() == "function_item" || node.kind() == "impl_item" {
let doc = Document::new(source_code).with_metadata(HashMap::from([
lang_meta.clone(),
(
"content_type".to_string(),
serde_json::Value::from(LanguageContentTypes::FunctionsImpls.to_string()),
),
]));
chunks.push(doc);
} else {
let doc = Document::new(source_code).with_metadata(HashMap::from([
lang_meta.clone(),
(
"content_type".to_string(),
serde_json::Value::from(LanguageContentTypes::SimplifiedCode.to_string()),
),
]));
chunks.push(doc);
}
}
Ok(chunks)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_code_parser() {
let code = r#"
fn main() {
println!("Hello, world!");
}
pub struct Person {
name: String,
age: i32,
}
impl Person {
pub fn new(name: String, age: i32) -> Self {
Self { name, age }
}
pub fn get_name(&self) -> &str {
&self.name
}
pub fn get_age(&self) -> i32 {
self.age
}
}
"#;
let mut parser = LanguageParser::from_language(Language::Rust);
let documents = parser.parse_code(&code.to_string()).unwrap();
assert_eq!(documents.len(), 1);
// Set the parser threshold to 10 for testing
let mut parser = parser.with_parser_threshold(10);
let documents = parser.parse_code(&code.to_string()).unwrap();
assert_eq!(documents.len(), 3);
assert_eq!(
documents[0].page_content,
"fn main() {\n println!(\"Hello, world!\");\n }"
);
assert_eq!(
documents[1].metadata.get("content_type").unwrap(),
LanguageContentTypes::SimplifiedCode.to_string().as_str()
);
}
}

View File

@@ -0,0 +1,24 @@
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
*/
use super::*;
#[derive(Debug, Clone)]
pub struct TextLoader {
content: String,
}
impl TextLoader {
pub fn new<T: Into<String>>(input: T) -> Self {
Self {
content: input.into(),
}
}
}
impl Loader for TextLoader {
fn load(self) -> Result<Vec<Document>, LoaderError> {
let doc = Document::new(self.content);
Ok(vec![doc])
}
}

View File

@@ -0,0 +1,12 @@
mod document;
mod loader;
mod splitter;
mod types;
pub use document::{Chunk, Doc};
use loader::{
get_language_by_filename, DocxLoader, HtmlLoader, LanguageParserOptions, Loader, LoaderError,
PdfExtractLoader, SourceCodeLoader, TextLoader, Url,
};
use splitter::{MarkdownSplitter, TextSplitter, TextSplitterError, TokenSplitter};
use types::Document;

View File

@@ -0,0 +1,35 @@
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
*/
use text_splitter::ChunkConfigError;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum TextSplitterError {
#[error("Empty input text")]
EmptyInputText,
#[error("Mismatch metadata and text")]
MetadataTextMismatch,
#[error("Tokenizer not found")]
TokenizerNotFound,
#[error("Tokenizer creation failed due to invalid tokenizer")]
InvalidTokenizer,
#[error("Tokenizer creation failed due to invalid model")]
InvalidModel,
#[error("Invalid chunk overlap and size")]
InvalidSplitterOptions,
#[error("Error: {0}")]
OtherError(String),
}
impl From<ChunkConfigError> for TextSplitterError {
fn from(_: ChunkConfigError) -> Self {
Self::InvalidSplitterOptions
}
}

View File

@@ -0,0 +1,36 @@
use text_splitter::ChunkConfig;
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
*/
use super::*;
pub struct MarkdownSplitter {
splitter_options: SplitterOptions,
}
impl Default for MarkdownSplitter {
fn default() -> Self {
MarkdownSplitter::new(SplitterOptions::default())
}
}
impl MarkdownSplitter {
pub fn new(options: SplitterOptions) -> MarkdownSplitter {
MarkdownSplitter {
splitter_options: options,
}
}
}
impl TextSplitter for MarkdownSplitter {
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError> {
let chunk_config = ChunkConfig::try_from(&self.splitter_options)?;
Ok(
text_splitter::MarkdownSplitter::new(chunk_config)
.chunks(text)
.map(|x| x.to_string())
.collect(),
)
}
}

View File

@@ -0,0 +1,58 @@
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
*/
mod error;
mod markdown;
mod options;
mod token;
use std::collections::HashMap;
pub use error::TextSplitterError;
pub use markdown::MarkdownSplitter;
use options::SplitterOptions;
use serde_json::Value;
pub use token::TokenSplitter;
use super::*;
pub trait TextSplitter: Send + Sync {
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError>;
fn split_documents(&self, documents: &[Document]) -> Result<Vec<Document>, TextSplitterError> {
let mut texts: Vec<String> = Vec::new();
let mut metadatas: Vec<HashMap<String, Value>> = Vec::new();
documents.iter().for_each(|d| {
texts.push(d.page_content.clone());
metadatas.push(d.metadata.clone());
});
self.create_documents(&texts, &metadatas)
}
fn create_documents(
&self,
text: &[String],
metadatas: &[HashMap<String, Value>],
) -> Result<Vec<Document>, TextSplitterError> {
let mut metadatas = metadatas.to_vec();
if metadatas.is_empty() {
metadatas = vec![HashMap::new(); text.len()];
}
if text.len() != metadatas.len() {
return Err(TextSplitterError::MetadataTextMismatch);
}
let mut documents: Vec<Document> = Vec::new();
for i in 0..text.len() {
let chunks = self.split_text(&text[i])?;
for chunk in chunks {
let document = Document::new(chunk).with_metadata(metadatas[i].clone());
documents.push(document);
}
}
Ok(documents)
}
}

View File

@@ -0,0 +1,96 @@
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
*/
use text_splitter::ChunkConfig;
use tiktoken_rs::{get_bpe_from_model, get_bpe_from_tokenizer, tokenizer::Tokenizer, CoreBPE};
use super::TextSplitterError;
// Options is a struct that contains options for a text splitter.
#[derive(Debug, Clone)]
pub struct SplitterOptions {
pub chunk_size: usize,
pub chunk_overlap: usize,
pub model_name: String,
pub encoding_name: String,
pub trim_chunks: bool,
}
impl Default for SplitterOptions {
fn default() -> Self {
Self::new()
}
}
impl SplitterOptions {
pub fn new() -> Self {
SplitterOptions {
chunk_size: 512,
chunk_overlap: 0,
model_name: String::from("gpt-3.5-turbo"),
encoding_name: String::from("cl100k_base"),
trim_chunks: false,
}
}
}
// Builder pattern for Options struct
impl SplitterOptions {
pub fn with_chunk_size(mut self, chunk_size: usize) -> Self {
self.chunk_size = chunk_size;
self
}
pub fn with_chunk_overlap(mut self, chunk_overlap: usize) -> Self {
self.chunk_overlap = chunk_overlap;
self
}
pub fn with_model_name(mut self, model_name: &str) -> Self {
self.model_name = String::from(model_name);
self
}
pub fn with_encoding_name(mut self, encoding_name: &str) -> Self {
self.encoding_name = String::from(encoding_name);
self
}
pub fn with_trim_chunks(mut self, trim_chunks: bool) -> Self {
self.trim_chunks = trim_chunks;
self
}
pub fn get_tokenizer_from_str(s: &str) -> Option<Tokenizer> {
match s.to_lowercase().as_str() {
"cl100k_base" => Some(Tokenizer::Cl100kBase),
"p50k_base" => Some(Tokenizer::P50kBase),
"r50k_base" => Some(Tokenizer::R50kBase),
"p50k_edit" => Some(Tokenizer::P50kEdit),
"gpt2" => Some(Tokenizer::Gpt2),
_ => None,
}
}
}
impl TryFrom<&SplitterOptions> for ChunkConfig<CoreBPE> {
type Error = TextSplitterError;
fn try_from(options: &SplitterOptions) -> Result<Self, Self::Error> {
let tk = if !options.encoding_name.is_empty() {
let tokenizer = SplitterOptions::get_tokenizer_from_str(&options.encoding_name)
.ok_or(TextSplitterError::TokenizerNotFound)?;
get_bpe_from_tokenizer(tokenizer).map_err(|_| TextSplitterError::InvalidTokenizer)?
} else {
get_bpe_from_model(&options.model_name).map_err(|_| TextSplitterError::InvalidModel)?
};
Ok(
ChunkConfig::new(options.chunk_size)
.with_sizer(tk)
.with_trim(options.trim_chunks)
.with_overlap(options.chunk_overlap)?,
)
}
}

View File

@@ -0,0 +1,37 @@
use text_splitter::ChunkConfig;
/**
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
*/
use super::*;
#[derive(Debug, Clone)]
pub struct TokenSplitter {
splitter_options: SplitterOptions,
}
impl Default for TokenSplitter {
fn default() -> Self {
TokenSplitter::new(SplitterOptions::default())
}
}
impl TokenSplitter {
pub fn new(options: SplitterOptions) -> TokenSplitter {
TokenSplitter {
splitter_options: options,
}
}
}
impl TextSplitter for TokenSplitter {
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError> {
let chunk_config = ChunkConfig::try_from(&self.splitter_options)?;
Ok(
text_splitter::TextSplitter::new(chunk_config)
.chunks(text)
.map(|x| x.to_string())
.collect(),
)
}
}

View File

@@ -0,0 +1,37 @@
use std::collections::HashMap;
use serde_json::Value;
#[derive(Debug, Clone)]
pub struct Document {
pub page_content: String,
pub metadata: HashMap<String, Value>,
}
impl Document {
/// Constructs a new `Document` with provided `page_content`, an empty
/// `metadata` map and a `score` of 0.
pub fn new<S: Into<String>>(page_content: S) -> Self {
Document {
page_content: page_content.into(),
metadata: HashMap::new(),
}
}
/// Sets the `metadata` Map of the `Document` to the provided HashMap.
pub fn with_metadata(mut self, metadata: HashMap<String, Value>) -> Self {
self.metadata = metadata;
self
}
}
impl Default for Document {
/// Provides a default `Document` with an empty `page_content`, an empty
/// `metadata` map and a `score` of 0.
fn default() -> Self {
Document {
page_content: "".to_string(),
metadata: HashMap::new(),
}
}
}

View File

@@ -1 +1,3 @@
#[cfg(feature = "doc-loader")]
pub mod doc_loader;
pub mod hashcash;

View File

@@ -15,8 +15,12 @@ export class BlobFrontend {
return this.sync.uploadBlob(blob);
}
fullSync() {
return this.sync.fullSync();
fullDownload() {
return this.sync.fullDownload();
}
fullUpload() {
return this.sync.fullUpload();
}
addPriority(_id: string, _priority: number) {

View File

@@ -399,31 +399,23 @@ export class DocFrontend {
this.statusUpdatedSubject$.next(job.docId);
}
/**
* skip listen doc update when apply update
*/
private skipDocUpdate = false;
applyUpdate(docId: string, update: Uint8Array) {
const doc = this.status.docs.get(docId);
if (doc && !isEmptyUpdate(update)) {
try {
this.skipDocUpdate = true;
applyUpdate(doc, update, NBSTORE_ORIGIN);
} catch (err) {
console.error('failed to apply update yjs doc', err);
} finally {
this.skipDocUpdate = false;
}
}
}
private readonly handleDocUpdate = (
update: Uint8Array,
_origin: any,
origin: any,
doc: YDoc
) => {
if (this.skipDocUpdate) {
if (origin === NBSTORE_ORIGIN) {
return;
}
if (!this.status.docs.has(doc.guid)) {

View File

@@ -9,6 +9,8 @@ import type { PeerStorageOptions } from '../types';
export interface BlobSyncState {
isStorageOverCapacity: boolean;
total: number;
synced: number;
}
export interface BlobSync {
@@ -18,7 +20,8 @@ export interface BlobSync {
signal?: AbortSignal
): Promise<BlobRecord | null>;
uploadBlob(blob: BlobRecord, signal?: AbortSignal): Promise<void>;
fullSync(signal?: AbortSignal): Promise<void>;
fullDownload(signal?: AbortSignal): Promise<void>;
fullUpload(signal?: AbortSignal): Promise<void>;
setMaxBlobSize(size: number): void;
onReachedMaxBlobSize(cb: (byteSize: number) => void): () => void;
}
@@ -26,6 +29,8 @@ export interface BlobSync {
export class BlobSyncImpl implements BlobSync {
readonly state$ = new BehaviorSubject<BlobSyncState>({
isStorageOverCapacity: false,
total: Object.values(this.storages.remotes).length ? 1 : 0,
synced: 0,
});
private abort: AbortController | null = null;
private maxBlobSize: number = 1024 * 1024 * 100; // 100MB
@@ -34,19 +39,24 @@ export class BlobSyncImpl implements BlobSync {
constructor(readonly storages: PeerStorageOptions<BlobStorage>) {}
async downloadBlob(blobId: string, signal?: AbortSignal) {
const localBlob = await this.storages.local.get(blobId, signal);
if (localBlob) {
return localBlob;
}
for (const storage of Object.values(this.storages.remotes)) {
const data = await storage.get(blobId, signal);
if (data) {
await this.storages.local.set(data, signal);
return data;
try {
const localBlob = await this.storages.local.get(blobId, signal);
if (localBlob) {
return localBlob;
}
for (const storage of Object.values(this.storages.remotes)) {
const data = await storage.get(blobId, signal);
if (data) {
await this.storages.local.set(data, signal);
return data;
}
}
return null;
} catch (e) {
console.error('error when download blob', e);
return null;
}
return null;
}
async uploadBlob(blob: BlobRecord, signal?: AbortSignal) {
@@ -62,7 +72,11 @@ export class BlobSyncImpl implements BlobSync {
return await remote.set(blob, signal);
} catch (err) {
if (err instanceof OverCapacityError) {
this.state$.next({ isStorageOverCapacity: true });
this.state$.next({
isStorageOverCapacity: true,
total: this.state$.value.total,
synced: this.state$.value.synced,
});
}
throw err;
}
@@ -70,71 +84,95 @@ export class BlobSyncImpl implements BlobSync {
);
}
async fullSync(signal?: AbortSignal) {
async fullDownload(signal?: AbortSignal) {
throwIfAborted(signal);
await this.storages.local.connection.waitForConnected(signal);
const localList = (await this.storages.local.list(signal)).map(b => b.key);
this.state$.next({
...this.state$.value,
synced: localList.length,
});
for (const [remotePeer, remote] of Object.entries(this.storages.remotes)) {
let localList: string[] = [];
let remoteList: string[] = [];
await Promise.allSettled(
Object.entries(this.storages.remotes).map(
async ([remotePeer, remote]) => {
await remote.connection.waitForConnected(signal);
await remote.connection.waitForConnected(signal);
const remoteList = (await remote.list(signal)).map(b => b.key);
try {
localList = (await this.storages.local.list(signal)).map(b => b.key);
throwIfAborted(signal);
remoteList = (await remote.list(signal)).map(b => b.key);
throwIfAborted(signal);
} catch (err) {
if (err === MANUALLY_STOP) {
throw err;
}
console.error(`error when sync`, err);
continue;
}
this.state$.next({
...this.state$.value,
total: Math.max(this.state$.value.total, remoteList.length),
});
const needUpload = difference(localList, remoteList);
for (const key of needUpload) {
try {
const data = await this.storages.local.get(key, signal);
throwIfAborted(signal);
if (data) {
await remote.set(data, signal);
throwIfAborted(signal);
const needDownload = difference(remoteList, localList);
for (const key of needDownload) {
try {
const data = await remote.get(key, signal);
throwIfAborted(signal);
if (data) {
await this.storages.local.set(data, signal);
this.state$.next({
...this.state$.value,
synced: this.state$.value.synced + 1,
});
throwIfAborted(signal);
}
} catch (err) {
if (err === MANUALLY_STOP) {
throw err;
}
console.error(
`error when sync ${key} from [${remotePeer}] to [local]`,
err
);
}
}
} catch (err) {
if (err === MANUALLY_STOP) {
throw err;
}
console.error(
`error when sync ${key} from [local] to [${remotePeer}]`,
err
);
}
}
)
);
}
const needDownload = difference(remoteList, localList);
async fullUpload(signal?: AbortSignal) {
throwIfAborted(signal);
await this.storages.local.connection.waitForConnected(signal);
const localList = (await this.storages.local.list(signal)).map(b => b.key);
await Promise.allSettled(
Object.entries(this.storages.remotes).map(
async ([remotePeer, remote]) => {
await remote.connection.waitForConnected(signal);
const remoteList = (await remote.list(signal)).map(b => b.key);
for (const key of needDownload) {
try {
const data = await remote.get(key, signal);
throwIfAborted(signal);
if (data) {
await this.storages.local.set(data, signal);
throwIfAborted(signal);
const needUpload = difference(localList, remoteList);
for (const key of needUpload) {
try {
const data = await this.storages.local.get(key, signal);
throwIfAborted(signal);
if (data) {
await remote.set(data, signal);
throwIfAborted(signal);
}
} catch (err) {
if (err === MANUALLY_STOP) {
throw err;
}
console.error(
`error when sync ${key} from [local] to [${remotePeer}]`,
err
);
}
}
} catch (err) {
if (err === MANUALLY_STOP) {
throw err;
}
console.error(
`error when sync ${key} from [${remotePeer}] to [local]`,
err
);
}
}
}
)
);
}
start() {
@@ -144,16 +182,12 @@ export class BlobSyncImpl implements BlobSync {
const abort = new AbortController();
this.abort = abort;
// TODO(@eyhn): fix this, large blob may cause iOS to crash?
if (!BUILD_CONFIG.isIOS) {
this.fullSync(abort.signal).catch(error => {
if (error === MANUALLY_STOP) {
return;
}
console.error('sync blob error', error);
});
}
this.fullUpload(abort.signal).catch(error => {
if (error === MANUALLY_STOP) {
return;
}
console.error('sync blob error', error);
});
}
stop() {

View File

@@ -272,7 +272,7 @@ export class DocSyncPeer {
jobs: (Job & { type: 'push' })[],
signal?: AbortSignal
) => {
if (this.status.connectedDocs.has(docId)) {
if (this.status.connectedDocs.has(docId) && !this.remote.isReadonly) {
const maxClock = jobs.reduce(
(a, b) => (a.getTime() > b.clock.getTime() ? a : b.clock),
new Date(0)

View File

@@ -257,26 +257,23 @@ class WorkerBlobSync implements BlobSync {
uploadBlob(blob: BlobRecord, _signal?: AbortSignal): Promise<void> {
return this.client.call('blobSync.uploadBlob', blob);
}
fullSync(signal?: AbortSignal): Promise<void> {
return new Promise((resolve, reject) => {
const abortListener = () => {
reject(signal?.reason);
subscription.unsubscribe();
};
fullDownload(signal?: AbortSignal): Promise<void> {
const download = this.client.call('blobSync.fullDownload');
signal?.addEventListener('abort', abortListener);
const subscription = this.client.ob$('blobSync.fullSync').subscribe({
next() {
signal?.removeEventListener('abort', abortListener);
resolve();
},
error(err) {
signal?.removeEventListener('abort', abortListener);
reject(err);
},
});
signal?.addEventListener('abort', () => {
download.cancel();
});
return download;
}
fullUpload(signal?: AbortSignal): Promise<void> {
const upload = this.client.call('blobSync.fullUpload');
signal?.addEventListener('abort', () => {
upload.cancel();
});
return upload;
}
}

View File

@@ -234,20 +234,10 @@ class StoreConsumer {
'docSync.resetSync': () => this.docSync.resetSync(),
'blobSync.downloadBlob': key => this.blobSync.downloadBlob(key),
'blobSync.uploadBlob': blob => this.blobSync.uploadBlob(blob),
'blobSync.fullSync': () =>
new Observable(subscriber => {
const abortController = new AbortController();
this.blobSync
.fullSync(abortController.signal)
.then(() => {
subscriber.next(true);
subscriber.complete();
})
.catch(error => {
subscriber.error(error);
});
return () => abortController.abort(MANUALLY_STOP);
}),
'blobSync.fullDownload': (_, { signal }) =>
this.blobSync.fullDownload(signal),
'blobSync.fullUpload': (_, { signal }) =>
this.blobSync.fullUpload(signal),
'blobSync.state': () => this.blobSync.state$,
'blobSync.setMaxBlobSize': size => this.blobSync.setMaxBlobSize(size),
'blobSync.onReachedMaxBlobSize': () =>

View File

@@ -87,7 +87,8 @@ interface GroupedWorkerOps {
blobSync: {
downloadBlob: [string, BlobRecord | null];
uploadBlob: [BlobRecord, void];
fullSync: [void, boolean];
fullDownload: [void, void];
fullUpload: [void, void];
setMaxBlobSize: [number, void];
onReachedMaxBlobSize: [void, number];
state: [void, BlobSyncState];

View File

@@ -24,7 +24,7 @@
"@dotlottie/player-component": "^2.7.12",
"@emotion/cache": "^11.14.0",
"@emotion/react": "^11.14.0",
"@floating-ui/dom": "^1.6.12",
"@floating-ui/dom": "^1.6.13",
"@juggle/resize-observer": "^3.4.0",
"@marsidev/react-turnstile": "^1.1.0",
"@preact/signals-core": "^1.8.0",

View File

@@ -1,5 +1,5 @@
import { registerAIEffects } from '@affine/core/blocksuite/ai/effects';
import { effects as editorEffects } from '@affine/core/blocksuite/editors';
import { editorEffects } from '@affine/core/blocksuite/editors';
import { effects as bsEffects } from '@blocksuite/affine/effects';
import { registerTemplates } from './register-templates';

View File

@@ -231,7 +231,7 @@ export const BlocksuiteDocEditor = forwardRef<
if (typeof externalTitleRef === 'function') {
externalTitleRef(el);
} else {
(externalTitleRef as any).current = el;
externalTitleRef.current = el;
}
}
},

View File

@@ -23,7 +23,7 @@ export const LitEdgelessEditor = createReactComponentFromLit({
elementClass: EdgelessEditor,
});
export function effects() {
export function editorEffects() {
customElements.define('page-editor', PageEditor);
customElements.define('edgeless-editor', EdgelessEditor);
}

View File

@@ -145,16 +145,16 @@ const ActionButton = ({ detail, recurring }: PlanCardProps) => {
// if currentRecurring !== recurring => 'Change to {recurring} Billing'
// else => 'Upgrade'
// not signed in
if (!loggedIn) {
return <SignUpAction>{signUpText}</SignUpAction>;
}
// team
if (detail.plan === SubscriptionPlan.Team) {
return <UpgradeToTeam recurring={recurring} />;
}
// not signed in
if (!loggedIn) {
return <SignUpAction>{signUpText}</SignUpAction>;
}
// lifetime
if (isBeliever) {
return (

View File

@@ -4,33 +4,60 @@ import { Button } from '@affine/component/ui/button';
import { useAsyncCallback } from '@affine/core/components/hooks/affine-async-hooks';
import { useSystemOnline } from '@affine/core/components/hooks/use-system-online';
import { DesktopApiService } from '@affine/core/modules/desktop-api';
import { WorkspacePermissionService } from '@affine/core/modules/permissions';
import type { Workspace } from '@affine/core/modules/workspace';
import { useI18n } from '@affine/i18n';
import { universalId } from '@affine/nbstore';
import track from '@affine/track';
import { useLiveData, useService } from '@toeverything/infra';
import { useState } from 'react';
import { LiveData, useLiveData, useService } from '@toeverything/infra';
import { useMemo, useState } from 'react';
interface ExportPanelProps {
workspace: Workspace;
}
export const DesktopExportPanel = ({ workspace }: ExportPanelProps) => {
const workspacePermissionService = useService(
WorkspacePermissionService
).permission;
const isTeam = useLiveData(workspacePermissionService.isTeam$);
const isOwner = useLiveData(workspacePermissionService.isOwner$);
const isAdmin = useLiveData(workspacePermissionService.isAdmin$);
const t = useI18n();
const [saving, setSaving] = useState(false);
const isOnline = useSystemOnline();
const desktopApi = useService(DesktopApiService);
const isLocalWorkspace = workspace.flavour === 'local';
const docSyncState = useLiveData(
useMemo(() => {
return workspace
? LiveData.from(workspace.engine.doc.state$, null).throttleTime(500)
: null;
}, [workspace])
);
const blobSyncState = useLiveData(
useMemo(() => {
return workspace
? LiveData.from(workspace.engine.blob.state$, null).throttleTime(500)
: null;
}, [workspace])
);
const docSynced = !docSyncState?.syncing;
const blobSynced =
!blobSyncState || blobSyncState.synced === blobSyncState.total;
const [fullSynced, setFullSynced] = useState(false);
const shouldWaitForFullSync =
isLocalWorkspace || !isOnline || (fullSynced && docSynced && blobSynced);
const fullSyncing = fullSynced && (!docSynced || !blobSynced);
const fullSync = useAsyncCallback(async () => {
// NOTE: doc full sync is always started by default
// await workspace.engine.doc.waitForSynced();
workspace.engine.blob.fullDownload().catch(() => {
/* noop */
});
setFullSynced(true);
}, [workspace.engine.blob]);
const onExport = useAsyncCallback(async () => {
if (saving || !workspace) {
if (saving) {
return;
}
setSaving(true);
@@ -38,10 +65,6 @@ export const DesktopExportPanel = ({ workspace }: ExportPanelProps) => {
track.$.settingsPanel.workspace.export({
type: 'workspace',
});
if (isOnline) {
await workspace.engine.doc.waitForSynced();
await workspace.engine.blob.fullSync();
}
const result = await desktopApi.handler?.dialog.saveDBFileAs(
universalId({
@@ -61,20 +84,37 @@ export const DesktopExportPanel = ({ workspace }: ExportPanelProps) => {
} finally {
setSaving(false);
}
}, [desktopApi, isOnline, saving, t, workspace]);
}, [desktopApi, saving, t, workspace]);
if (isTeam && !isOwner && !isAdmin) {
return null;
if (!shouldWaitForFullSync) {
return (
<SettingRow name={t['Export']()} desc={t['Full Sync Description']()}>
<Button
data-testid="export-affine-full-sync"
onClick={fullSync}
loading={fullSyncing}
>
{t['Full Sync']()}
</Button>
</SettingRow>
);
}
const button =
isLocalWorkspace || isOnline ? t['Export']() : t['Export(Offline)']();
const desc =
isLocalWorkspace || isOnline
? t['Export Description']()
: t['Export Description(Offline)']();
return (
<SettingRow name={t['Export']()} desc={t['Export Description']()}>
<SettingRow name={t['Export']()} desc={desc}>
<Button
data-testid="export-affine-backup"
onClick={onExport}
disabled={saving}
>
{t['Export']()}
{button}
</Button>
</SettingRow>
);

View File

@@ -23,6 +23,9 @@ export const WorkspaceSettingStorage = ({
WorkspacePermissionService
).permission;
const isTeam = useLiveData(workspacePermissionService.isTeam$);
const isOwner = useLiveData(workspacePermissionService.isOwner$);
const canExport = !isTeam || isOwner;
return (
<>
<SettingHeader
@@ -46,7 +49,7 @@ export const WorkspaceSettingStorage = ({
</SettingWrapper>
) : null}
{BUILD_CONFIG.isElectron && (
{BUILD_CONFIG.isElectron && canExport && (
<SettingWrapper>
<DesktopExportPanel workspace={workspace} />
</SettingWrapper>

View File

@@ -189,7 +189,7 @@ const SharePageInner = ({
]);
const t = useI18n();
const pageTitle = useLiveData(page?.title$) ?? t['unnamed']();
const pageTitle = useLiveData(page?.title$);
const { jumpToPageBlock, openPage } = useNavigateHelper();
usePageDocumentTitle(pageTitle);
@@ -235,7 +235,7 @@ const SharePageInner = ({
}
if (!workspace || !page || !editor) {
return;
return <AppContainer fallback />;
}
return (
@@ -243,7 +243,7 @@ const SharePageInner = ({
<FrameworkScope scope={page.scope}>
<FrameworkScope scope={editor.scope}>
<ViewIcon icon={publishMode === 'page' ? 'doc' : 'edgeless'} />
<ViewTitle title={pageTitle} />
<ViewTitle title={pageTitle ?? t['unnamed']()} />
<div className={styles.root}>
<div className={styles.mainContainer}>
<ShareHeader

View File

@@ -121,45 +121,26 @@ export class UnusedBlobs extends Entity {
}
private async getUsedBlobs(): Promise<string[]> {
const batchSize = 100;
let offset = 0;
const unusedBlobKeys: string[] = [];
while (true) {
const result = await this.docsSearchService.indexer.blockIndex.aggregate(
{
type: 'boolean',
occur: 'must',
queries: [
{
type: 'exists',
field: 'blob',
},
],
},
'blob',
{
pagination: {
limit: batchSize,
skip: offset,
const result = await this.docsSearchService.indexer.blockIndex.aggregate(
{
type: 'boolean',
occur: 'must',
queries: [
{
type: 'exists',
field: 'blob',
},
}
);
if (!result.buckets.length) {
break;
],
},
'blob',
{
pagination: {
limit: Number.MAX_SAFE_INTEGER,
},
}
);
unusedBlobKeys.push(...result.buckets.map(bucket => bucket.key));
offset += batchSize;
// If we got less results than the batch size, we've reached the end
if (result.buckets.length < batchSize) {
break;
}
}
return unusedBlobKeys;
return result.buckets.map(bucket => bucket.key);
}
async hydrateBlob(

View File

@@ -1,4 +1,5 @@
import {
IconButton,
MenuItem,
MenuSeparator,
toast,
@@ -22,10 +23,11 @@ import {
InformationIcon,
LinkedPageIcon,
OpenInNewIcon,
PlusIcon,
SplitViewIcon,
} from '@blocksuite/icons/rc';
import { useLiveData, useServices } from '@toeverything/infra';
import { useCallback, useMemo } from 'react';
import { useCallback, useMemo, useState } from 'react';
import type { NodeOperation } from '../../tree/types';
@@ -52,6 +54,7 @@ export const useExplorerDocNodeOperations = (
});
const { openConfirmModal } = useConfirmModal();
const [addLinkedPageLoading, setAddLinkedPageLoading] = useState(false);
const docRecord = useLiveData(docsService.list.doc$(docId));
const { createPage } = usePageHelper(
@@ -117,17 +120,22 @@ export const useExplorerDocNodeOperations = (
}, [docId, workbenchService.workbench]);
const handleAddLinkedPage = useAsyncCallback(async () => {
const canEdit = await guardService.can('Doc_Update', docId);
if (!canEdit) {
toast(t['com.affine.no-permission']());
return;
setAddLinkedPageLoading(true);
try {
const canEdit = await guardService.can('Doc_Update', docId);
if (!canEdit) {
toast(t['com.affine.no-permission']());
return;
}
const newDoc = createPage();
// TODO: handle timeout & error
await docsService.addLinkedDoc(docId, newDoc.id);
track.$.navigationPanel.docs.createDoc({ control: 'linkDoc' });
track.$.navigationPanel.docs.linkDoc({ control: 'createDoc' });
options.openNodeCollapsed();
} finally {
setAddLinkedPageLoading(false);
}
const newDoc = createPage();
// TODO: handle timeout & error
await docsService.addLinkedDoc(docId, newDoc.id);
track.$.navigationPanel.docs.createDoc({ control: 'linkDoc' });
track.$.navigationPanel.docs.linkDoc({ control: 'createDoc' });
options.openNodeCollapsed();
}, [createPage, guardService, docId, docsService, options, t]);
const handleToggleFavoriteDoc = useCallback(() => {
@@ -139,6 +147,20 @@ export const useExplorerDocNodeOperations = (
return useMemo(
() => [
{
index: 0,
inline: true,
view: (
<IconButton
size="16"
icon={<PlusIcon />}
tooltip={t['com.affine.rootAppSidebar.explorer.doc-add-tooltip']()}
onClick={handleAddLinkedPage}
loading={addLinkedPageLoading}
disabled={addLinkedPageLoading}
/>
),
},
{
index: 50,
view: (
@@ -233,6 +255,7 @@ export const useExplorerDocNodeOperations = (
},
],
[
addLinkedPageLoading,
docId,
favorite,
handleAddLinkedPage,

View File

@@ -107,9 +107,10 @@ export const InviteMemberEditor = ({
selectedMemberIds,
inviteDocRoleType
);
onClickCancel();
notify.success({
title: 'Invite successful',
title: t['Invitation sent'](),
});
} catch (error) {
const err = UserFriendlyError.fromAnyError(error);
@@ -117,7 +118,13 @@ export const InviteMemberEditor = ({
title: t[`error.${err.name}`](err.data),
});
}
}, [docGrantedUsersService, inviteDocRoleType, selectedMembers, t]);
}, [
docGrantedUsersService,
inviteDocRoleType,
onClickCancel,
selectedMembers,
t,
]);
const handleCompositionStart: CompositionEventHandler<HTMLInputElement> =
useCallback(() => {

View File

@@ -195,10 +195,26 @@ export function useAFFiNEI18N(): {
* `Export`
*/
Export(): string;
/**
* `Export (Offline)`
*/
["Export(Offline)"](): string;
/**
* `Full Sync`
*/
["Full Sync"](): string;
/**
* `You can export the entire Workspace data for backup, and the exported data can be re-imported.`
*/
["Export Description"](): string;
/**
* `You can export the entire Workspace data for backup, and the exported data can be re-imported, but you are offline now which will cause the exported data not up to date.`
*/
["Export Description(Offline)"](): string;
/**
* `You can export the entire Workspace data for backup, and the exported data can be re-imported, but you must sync all cloud data first to keep your exported data up to date.`
*/
["Full Sync Description"](): string;
/**
* `Export failed`
*/
@@ -2675,6 +2691,10 @@ export function useAFFiNEI18N(): {
* `Workspace name`
*/
["com.affine.nameWorkspace.subtitle.workspace-name"](): string;
/**
* `Workspace type`
*/
["com.affine.nameWorkspace.subtitle.workspace-type"](): string;
/**
* `Name your workspace`
*/
@@ -3513,11 +3533,11 @@ export function useAFFiNEI18N(): {
*/
["com.affine.payment.cloud.free.benefit.g2-5"](): string;
/**
* `Open-source under MIT license.`
* `Local Editor under MIT license.`
*/
["com.affine.payment.cloud.free.description"](): string;
/**
* `FOSS + Basic`
* `Local FOSS + Cloud Basic`
*/
["com.affine.payment.cloud.free.name"](): string;
/**

View File

@@ -39,7 +39,11 @@
"Enable AFFiNE Cloud Description": "If enabled, the data in this workspace will be backed up and synchronised via AFFiNE Cloud.",
"Enable cloud hint": "The following functions rely on AFFiNE Cloud. All data is stored on the current device. You can enable AFFiNE Cloud for this workspace to keep data in sync with the cloud.",
"Export": "Export",
"Export(Offline)": "Export (Offline)",
"Full Sync": "Full Sync",
"Export Description": "You can export the entire Workspace data for backup, and the exported data can be re-imported.",
"Export Description(Offline)": "You can export the entire Workspace data for backup, and the exported data can be re-imported. But you are offline now which will cause the exported data not up to date.",
"Full Sync Description": "You can export the entire Workspace data for backup, and the exported data can be re-imported. But you must sync all cloud data first to keep your exported data up to date.",
"Export failed": "Export failed",
"Export success": "Export success",
"Export to HTML": "Export to HTML",

View File

@@ -0,0 +1,2 @@
recordings
.env

View File

@@ -0,0 +1,43 @@
{
"name": "@affine/media-capture-playground",
"private": true,
"type": "module",
"version": "0.0.0",
"scripts": {
"dev:web": "vite",
"dev:server": "tsx --env-file=.env --watch server/main.ts"
},
"dependencies": {
"@affine/native": "workspace:*",
"@google/generative-ai": "^0.21.0",
"@tailwindcss/vite": "^4.0.6",
"@types/express": "^4",
"@types/multer": "^1",
"@types/react": "^19.0.8",
"@types/react-dom": "^19.0.3",
"@types/socket.io": "^3.0.2",
"@types/socket.io-client": "^3.0.0",
"@vitejs/plugin-react": "^4.3.4",
"chokidar": "^4.0.3",
"express": "^4.21.2",
"express-rate-limit": "^7.1.5",
"fs-extra": "^11.3.0",
"multer": "^1.4.5-lts.1",
"openai": "^4.85.1",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-markdown": "^9.0.3",
"rxjs": "^7.8.1",
"socket.io": "^4.7.4",
"socket.io-client": "^4.7.4",
"swr": "^2.3.2",
"tailwindcss": "^4.0.6",
"tsx": "^4.19.2",
"vite": "^6.1.0"
},
"devDependencies": {
"@types/fs-extra": "^11",
"@types/react": "^19.0.1",
"@types/react-dom": "^19.0.2"
}
}

View File

@@ -0,0 +1,200 @@
import { GoogleGenerativeAI } from '@google/generative-ai';
import {
GoogleAIFileManager,
type UploadFileResponse,
} from '@google/generative-ai/server';
const DEFAULT_MODEL = 'gemini-2.0-flash';
export interface TranscriptionResult {
title: string;
summary: string;
segments: {
speaker: string;
start_time: string;
end_time: string;
transcription: string;
}[];
}
const PROMPT_TRANSCRIPTION = `
Generate audio transcription and diarization for the recording.
The recording source is most likely from a video call with multiple speakers.
Output in JSON format with the following structure:
{
"segments": [
{
"speaker": "Speaker A",
"start_time": "MM:SS",
"end_time": "MM:SS",
"transcription": "..."
},
...
],
}
- Use consistent speaker labels throughout
- Accurate timestamps in MM:SS format
- Clean transcription with proper punctuation
- Identify speakers by name if possible, otherwise use "Speaker A/B/C"
`;
const PROMPT_SUMMARY = `
Generate a short title and summary of the conversation. The input is in the following JSON format:
{
"segments": [
{
"speaker": "Speaker A",
"start_time": "MM:SS",
"end_time": "MM:SS",
"transcription": "..."
},
...
],
}
Output in JSON format with the following structure:
{
"title": "Title of the recording",
"summary": "Summary of the conversation in markdown format"
}
1. Summary Structure:
- The sumary should be inferred from the speakers' language and context
- All insights should be derived directly from speakers' language and context
- Use hierarchical organization for clear information structure
- Use markdown format for the summary. Use bullet points, lists and other markdown styles when appropriate
2. Title:
- Come up with a title for the recording.
- The title should be a short description of the recording.
- The title should be a single sentence or a few words.
`;
export async function gemini(
audioFilePath: string,
options?: {
model?: 'gemini-2.0-flash' | 'gemini-1.5-flash';
mode?: 'transcript' | 'summary';
}
) {
if (!process.env.GOOGLE_GEMINI_API_KEY) {
console.error('Missing GOOGLE_GEMINI_API_KEY environment variable');
throw new Error('GOOGLE_GEMINI_API_KEY is not set');
}
// Initialize GoogleGenerativeAI and FileManager with your API_KEY
const genAI = new GoogleGenerativeAI(process.env.GOOGLE_GEMINI_API_KEY);
const fileManager = new GoogleAIFileManager(
process.env.GOOGLE_GEMINI_API_KEY
);
async function transcribe(
audioFilePath: string
): Promise<TranscriptionResult | null> {
let uploadResult: UploadFileResponse | null = null;
try {
// Upload the audio file
uploadResult = await fileManager.uploadFile(audioFilePath, {
mimeType: 'audio/wav',
displayName: 'audio_transcription.wav',
});
console.log('File uploaded:', uploadResult.file.uri);
// Initialize a Gemini model appropriate for your use case.
const model = genAI.getGenerativeModel({
model: options?.model || DEFAULT_MODEL,
generationConfig: {
responseMimeType: 'application/json',
},
});
// Generate content using a prompt and the uploaded file
const result = await model.generateContent([
{
fileData: {
fileUri: uploadResult.file.uri,
mimeType: uploadResult.file.mimeType,
},
},
{
text: PROMPT_TRANSCRIPTION,
},
]);
const text = result.response.text();
try {
const parsed = JSON.parse(text);
return parsed;
} catch (e) {
console.error('Failed to parse transcription JSON:', e);
console.error('Raw text that failed to parse:', text);
return null;
}
} catch (e) {
console.error('Error during transcription:', e);
return null;
} finally {
if (uploadResult) {
await fileManager.deleteFile(uploadResult.file.name);
}
}
}
async function summarize(transcription: TranscriptionResult) {
try {
const model = genAI.getGenerativeModel({
model: options?.model || DEFAULT_MODEL,
generationConfig: {
responseMimeType: 'application/json',
},
});
const result = await model.generateContent([
{
text: PROMPT_SUMMARY + '\n\n' + JSON.stringify(transcription),
},
]);
const text = result.response.text();
try {
const parsed = JSON.parse(text);
return parsed;
} catch (e) {
console.error('Failed to parse summary JSON:', e);
console.error('Raw text that failed to parse:', text);
return null;
}
} catch (e) {
console.error('Error during summarization:', e);
return null;
}
}
const transcription = await transcribe(audioFilePath);
if (!transcription) {
console.error('Transcription failed');
return null;
}
const summary = await summarize(transcription);
if (!summary) {
console.error('Summary generation failed');
return transcription;
}
const result = {
...transcription,
...summary,
};
console.log('Processing completed:', {
title: result.title,
segmentsCount: result.segments?.length,
});
return result;
}

View File

@@ -0,0 +1,759 @@
/* eslint-disable @typescript-eslint/no-misused-promises */
import { exec } from 'node:child_process';
import { createServer } from 'node:http';
import { promisify } from 'node:util';
import {
type Application,
type AudioTapStream,
ShareableContent,
} from '@affine/native';
import type { FSWatcher } from 'chokidar';
import chokidar from 'chokidar';
import express from 'express';
import rateLimit from 'express-rate-limit';
import fs from 'fs-extra';
import { Server } from 'socket.io';
import { gemini, type TranscriptionResult } from './gemini';
import { WavWriter } from './wav-writer';
// Constants
const RECORDING_DIR = './recordings';
const PORT = process.env.PORT || 6544;
// Ensure recordings directory exists
fs.ensureDirSync(RECORDING_DIR);
console.log(`📁 Ensuring recordings directory exists at ${RECORDING_DIR}`);
// Types
interface Recording {
app: Application;
appGroup: Application | null;
buffers: Float32Array[];
stream: AudioTapStream;
startTime: number;
isWriting: boolean;
}
interface RecordingStatus {
processId: number;
bundleIdentifier: string;
name: string;
startTime: number;
duration: number;
}
interface RecordingMetadata {
appName: string;
bundleIdentifier: string;
processId: number;
recordingStartTime: number;
recordingEndTime: number;
recordingDuration: number;
sampleRate: number;
totalSamples: number;
}
interface AppInfo {
app: Application;
processId: number;
processGroupId: number | null;
bundleIdentifier: string;
name: string;
running: boolean;
}
interface TranscriptionMetadata {
transcriptionStartTime: number;
transcriptionEndTime: number;
transcriptionStatus: 'not_started' | 'pending' | 'completed' | 'error';
transcription?: TranscriptionResult;
error?: string;
}
// State
const recordingMap = new Map<number, Recording>();
let appsSubscriber = () => {};
let fsWatcher: FSWatcher | null = null;
// Server setup
const app = express();
const httpServer = createServer(app);
const io = new Server(httpServer, {
cors: { origin: '*' },
});
app.use(express.json());
// Update the static file serving to handle the new folder structure
app.use(
'/recordings',
(req, res, next) => {
// Extract the folder name from the path
const parts = req.path.split('/');
if (parts.length < 2) {
return res.status(400).json({ error: 'Invalid request path' });
}
const folderName = parts[1];
if (!validateAndSanitizeFolderName(folderName)) {
return res.status(400).json({ error: 'Invalid folder name format' });
}
if (req.path.endsWith('.wav')) {
res.setHeader('Content-Type', 'audio/wav');
} else if (req.path.endsWith('.png')) {
res.setHeader('Content-Type', 'image/png');
}
next();
},
express.static(RECORDING_DIR)
);
// Recording management
async function saveRecording(recording: Recording): Promise<string | null> {
try {
recording.isWriting = true;
const app = recording.appGroup || recording.app;
const totalSamples = recording.buffers.reduce(
(acc, buf) => acc + buf.length,
0
);
const recordingEndTime = Date.now();
const recordingDuration = (recordingEndTime - recording.startTime) / 1000;
const expectedSamples = recordingDuration * 44100;
console.log(`💾 Saving recording for ${app.name}:`);
console.log(`- Process ID: ${app.processId}`);
console.log(`- Bundle ID: ${app.bundleIdentifier}`);
console.log(`- Actual duration: ${recordingDuration.toFixed(2)}s`);
console.log(`- Expected samples: ${Math.floor(expectedSamples)}`);
console.log(`- Actual samples: ${totalSamples}`);
console.log(
`- Sample ratio: ${(totalSamples / expectedSamples).toFixed(2)}`
);
// Create a buffer for the mono audio
const buffer = new Float32Array(totalSamples);
let offset = 0;
recording.buffers.forEach(buf => {
buffer.set(buf, offset);
offset += buf.length;
});
await fs.ensureDir(RECORDING_DIR);
const timestamp = Date.now();
const baseFilename = `${recording.app.bundleIdentifier}-${recording.app.processId}-${timestamp}`;
const recordingDir = `${RECORDING_DIR}/${baseFilename}`;
await fs.ensureDir(recordingDir);
const wavFilename = `${recordingDir}/recording.wav`;
const transcriptionWavFilename = `${recordingDir}/transcription.wav`;
const metadataFilename = `${recordingDir}/metadata.json`;
const iconFilename = `${recordingDir}/icon.png`;
// Save high-quality WAV file for playback (44.1kHz)
console.log(`📝 Writing high-quality WAV file to ${wavFilename}`);
const writer = new WavWriter(wavFilename, { targetSampleRate: 44100 });
writer.write(buffer);
await writer.end();
console.log('✅ High-quality WAV file written successfully');
// Save low-quality WAV file for transcription (8kHz)
console.log(
`📝 Writing transcription WAV file to ${transcriptionWavFilename}`
);
const transcriptionWriter = new WavWriter(transcriptionWavFilename, {
targetSampleRate: 8000,
});
transcriptionWriter.write(buffer);
await transcriptionWriter.end();
console.log('✅ Transcription WAV file written successfully');
// Save app icon if available
if (app.icon) {
console.log(`📝 Writing app icon to ${iconFilename}`);
await fs.writeFile(iconFilename, app.icon);
console.log('✅ App icon written successfully');
}
console.log(`📝 Writing metadata to ${metadataFilename}`);
// Save metadata (without icon)
const metadata: RecordingMetadata = {
appName: app.name,
bundleIdentifier: app.bundleIdentifier,
processId: app.processId,
recordingStartTime: recording.startTime,
recordingEndTime,
recordingDuration,
sampleRate: 44100,
totalSamples,
};
await fs.writeJson(metadataFilename, metadata, { spaces: 2 });
console.log('✅ Metadata file written successfully');
return baseFilename;
} catch (error) {
console.error('❌ Error saving recording:', error);
return null;
}
}
function getRecordingStatus(): RecordingStatus[] {
return Array.from(recordingMap.entries()).map(([processId, recording]) => ({
processId,
bundleIdentifier: recording.app.bundleIdentifier,
name: recording.app.name,
startTime: recording.startTime,
duration: Date.now() - recording.startTime,
}));
}
function emitRecordingStatus() {
io.emit('apps:recording', { recordings: getRecordingStatus() });
}
async function startRecording(app: Application) {
if (recordingMap.has(app.processId)) {
console.log(
`⚠️ Recording already in progress for ${app.name} (PID: ${app.processId})`
);
return;
}
// Find the root app of the process group
const processGroupId = await getProcessGroupId(app.processId);
const rootApp = processGroupId
? (shareableContent
.applications()
.find(a => a.processId === processGroupId) ?? app)
: app;
console.log(
`🎙️ Starting recording for ${rootApp.name} (PID: ${rootApp.processId})`
);
const buffers: Float32Array[] = [];
const stream = app.tapAudio((err, samples) => {
if (err) {
console.error(`❌ Audio stream error for ${rootApp.name}:`, err);
return;
}
const recording = recordingMap.get(app.processId);
if (recording && !recording.isWriting) {
buffers.push(new Float32Array(samples));
}
});
recordingMap.set(app.processId, {
app,
appGroup: rootApp,
buffers,
stream,
startTime: Date.now(),
isWriting: false,
});
console.log(`✅ Recording started successfully for ${rootApp.name}`);
emitRecordingStatus();
}
async function stopRecording(processId: number) {
const recording = recordingMap.get(processId);
if (!recording) {
console.log(` No active recording found for process ID ${processId}`);
return;
}
const app = recording.appGroup || recording.app;
console.log(`⏹️ Stopping recording for ${app.name} (PID: ${app.processId})`);
console.log(
`⏱️ Recording duration: ${((Date.now() - recording.startTime) / 1000).toFixed(2)}s`
);
recording.stream.stop();
const filename = await saveRecording(recording);
recordingMap.delete(processId);
if (filename) {
console.log(`✅ Recording saved successfully to ${filename}`);
} else {
console.error(`❌ Failed to save recording for ${app.name}`);
}
emitRecordingStatus();
return filename;
}
// File management
async function getRecordings(): Promise<
{
wav: string;
metadata?: RecordingMetadata;
transcription?: TranscriptionMetadata;
}[]
> {
try {
const allItems = await fs.readdir(RECORDING_DIR);
// First filter out non-directories
const dirs = (
await Promise.all(
allItems.map(async item => {
const fullPath = `${RECORDING_DIR}/${item}`;
try {
const stat = await fs.stat(fullPath);
return stat.isDirectory() ? item : null;
} catch {
return null;
}
})
)
).filter((d): d is string => d !== null);
const recordings = await Promise.all(
dirs.map(async dir => {
try {
const recordingPath = `${RECORDING_DIR}/${dir}`;
const metadataPath = `${recordingPath}/metadata.json`;
const transcriptionPath = `${recordingPath}/transcription.json`;
let metadata: RecordingMetadata | undefined;
try {
metadata = await fs.readJson(metadataPath);
} catch {
// Metadata might not exist
}
let transcription: TranscriptionMetadata | undefined;
try {
// Check if transcription file exists
const transcriptionExists = await fs.pathExists(transcriptionPath);
if (transcriptionExists) {
transcription = await fs.readJson(transcriptionPath);
} else {
// If transcription.wav exists but no transcription.json, it means transcription is available but not started
transcription = {
transcriptionStartTime: 0,
transcriptionEndTime: 0,
transcriptionStatus: 'not_started',
};
}
} catch (error) {
console.error(`Error reading transcription for ${dir}:`, error);
}
return {
wav: dir,
metadata,
transcription,
};
} catch (error) {
console.error(`Error processing directory ${dir}:`, error);
return null;
}
})
);
// Filter out nulls and sort by recording start time
return recordings
.filter((r): r is NonNullable<typeof r> => r !== null)
.sort(
(a, b) =>
(b.metadata?.recordingStartTime ?? 0) -
(a.metadata?.recordingStartTime ?? 0)
);
} catch (error) {
console.error('Error reading recordings directory:', error);
return [];
}
}
async function setupRecordingsWatcher() {
if (fsWatcher) {
console.log('🔄 Closing existing recordings watcher');
await fsWatcher.close();
}
try {
console.log('👀 Setting up recordings watcher...');
const files = await getRecordings();
console.log(`📊 Found ${files.length} existing recordings`);
io.emit('apps:saved', { recordings: files });
fsWatcher = chokidar.watch(RECORDING_DIR, {
ignored: /(^|[/\\])\../, // ignore dotfiles
persistent: true,
ignoreInitial: true,
awaitWriteFinish: {
stabilityThreshold: 500,
pollInterval: 100,
},
});
// Handle file events
fsWatcher
.on('add', async path => {
if (path.endsWith('.wav') || path.endsWith('.json')) {
console.log(`📝 File added: ${path}`);
const files = await getRecordings();
io.emit('apps:saved', { recordings: files });
}
})
.on('change', async path => {
if (path.endsWith('.wav') || path.endsWith('.json')) {
console.log(`📝 File changed: ${path}`);
const files = await getRecordings();
io.emit('apps:saved', { recordings: files });
}
})
.on('unlink', async path => {
if (path.endsWith('.wav') || path.endsWith('.json')) {
console.log(`🗑️ File removed: ${path}`);
const files = await getRecordings();
io.emit('apps:saved', { recordings: files });
}
})
.on('error', error => {
console.error('❌ Error watching recordings directory:', error);
})
.on('ready', () => {
console.log('✅ Recordings watcher setup complete');
});
} catch (error) {
console.error('❌ Error setting up recordings watcher:', error);
}
}
// Process management
async function getProcessGroupId(pid: number): Promise<number | null> {
try {
const execAsync = promisify(exec);
const { stdout } = await execAsync(`ps -o pgid -p ${pid}`);
const lines = stdout.trim().split('\n');
if (lines.length < 2) return null;
const pgid = parseInt(lines[1].trim(), 10);
return isNaN(pgid) ? null : pgid;
} catch {
return null;
}
}
// Application management
const shareableContent = new ShareableContent();
async function getAllApps(): Promise<AppInfo[]> {
const apps = await Promise.all(
shareableContent.applications().map(async app => {
try {
return {
app,
processId: app.processId,
processGroupId: await getProcessGroupId(app.processId),
bundleIdentifier: app.bundleIdentifier,
name: app.name,
running: app.isRunning,
};
} catch (error) {
console.error(error);
return null;
}
})
);
const filteredApps = apps.filter(
(v): v is AppInfo =>
v !== null && !v.bundleIdentifier.startsWith('com.apple')
);
// Stop recording if app is not listed
await Promise.all(
filteredApps.map(async ({ app }) => {
if (!filteredApps.some(a => a.processId === app.processId)) {
await stopRecording(app.processId);
}
})
);
return filteredApps;
}
function listenToAppStateChanges(apps: AppInfo[]) {
const subscribers = apps.map(({ app }) => {
return ShareableContent.onAppStateChanged(app, () => {
setTimeout(() => {
console.log(
`🔄 Application state changed: ${app.name} (PID: ${app.processId}) is now ${
app.isRunning ? '▶️ running' : '⏹️ stopped'
}`
);
io.emit('apps:state-changed', {
processId: app.processId,
running: app.isRunning,
});
if (!app.isRunning) {
stopRecording(app.processId).catch(error => {
console.error('❌ Error stopping recording:', error);
});
}
}, 50);
});
});
appsSubscriber();
appsSubscriber = () => {
subscribers.forEach(subscriber => subscriber.unsubscribe());
};
}
// Socket.IO setup
io.on('connection', async socket => {
console.log('🔌 New client connected');
const initialApps = await getAllApps();
console.log(`📤 Sending ${initialApps.length} applications to new client`);
socket.emit('apps:all', { apps: initialApps });
socket.emit('apps:recording', { recordings: getRecordingStatus() });
const files = await getRecordings();
console.log(`📤 Sending ${files.length} saved recordings to new client`);
socket.emit('apps:saved', { recordings: files });
listenToAppStateChanges(initialApps);
socket.on('disconnect', () => {
console.log('🔌 Client disconnected');
});
});
// Application list change listener
ShareableContent.onApplicationListChanged(() => {
(async () => {
try {
console.log('🔄 Application list changed, updating clients...');
const apps = await getAllApps();
console.log(`📢 Broadcasting ${apps.length} applications to all clients`);
io.emit('apps:all', { apps });
} catch (error) {
console.error('❌ Error handling application list change:', error);
}
})().catch(error => {
console.error('❌ Error in application list change handler:', error);
});
});
// API Routes
const rateLimiter = rateLimit({
windowMs: 1000,
max: 200,
message: { error: 'Too many requests, please try again later.' },
});
app.get('/permissions', (req, res) => {
const permission = shareableContent.checkRecordingPermissions();
res.json({ permission });
});
app.get('/apps', async (_req, res) => {
const apps = await getAllApps();
listenToAppStateChanges(apps);
res.json({ apps });
});
app.get('/apps/saved', rateLimiter, async (_req, res) => {
const files = await getRecordings();
res.json({ recordings: files });
});
// Utility function to validate and sanitize folder name
function validateAndSanitizeFolderName(folderName: string): string | null {
// Allow alphanumeric characters, hyphens, dots (for bundle IDs)
// Format: bundleId-processId-timestamp
if (!/^[\w.-]+-\d+-\d+$/.test(folderName)) {
return null;
}
// Remove any path traversal attempts
const sanitized = folderName.replace(/^\.+|\.+$/g, '').replace(/[/\\]/g, '');
return sanitized;
}
app.delete('/recordings/:foldername', rateLimiter, async (req, res) => {
const foldername = validateAndSanitizeFolderName(req.params.foldername);
if (!foldername) {
console.error('❌ Invalid folder name format:', req.params.foldername);
return res.status(400).json({ error: 'Invalid folder name format' });
}
const recordingDir = `${RECORDING_DIR}/${foldername}`;
try {
// Ensure the resolved path is within RECORDING_DIR
const resolvedPath = await fs.realpath(recordingDir);
const recordingDirPath = await fs.realpath(RECORDING_DIR);
if (!resolvedPath.startsWith(recordingDirPath)) {
console.error('❌ Path traversal attempt detected:', {
resolvedPath,
recordingDirPath,
requestedFile: foldername,
});
return res.status(403).json({ error: 'Access denied' });
}
console.log(`🗑️ Deleting recording folder: ${foldername}`);
await fs.remove(recordingDir);
console.log('✅ Recording folder deleted successfully');
res.status(200).json({ success: true });
} catch (error) {
const typedError = error as NodeJS.ErrnoException;
if (typedError.code === 'ENOENT') {
console.error('❌ Folder not found:', recordingDir);
res.status(404).json({ error: 'Folder not found' });
} else {
console.error('❌ Error deleting folder:', {
error: typedError,
code: typedError.code,
message: typedError.message,
path: recordingDir,
});
res.status(500).json({
error: `Failed to delete folder: ${typedError.message || 'Unknown error'}`,
});
}
}
});
app.get('/apps/:process_id/icon', (req, res) => {
const processId = parseInt(req.params.process_id);
try {
const app = shareableContent.applicationWithProcessId(processId);
const icon = app.icon;
res.set('Content-Type', 'image/png');
res.send(icon);
} catch {
res.status(404).json({ error: 'App icon not found' });
}
});
app.post('/apps/:process_id/record', async (req, res) => {
const processId = parseInt(req.params.process_id);
const app = shareableContent.applicationWithProcessId(processId);
await startRecording(app);
res.json({ success: true });
});
app.post('/apps/:process_id/stop', async (req, res) => {
const processId = parseInt(req.params.process_id);
await stopRecording(processId);
res.json({ success: true });
});
// Update transcription endpoint to use folder validation
app.post(
'/recordings/:foldername/transcribe',
rateLimiter,
async (req, res) => {
const foldername = validateAndSanitizeFolderName(req.params.foldername);
if (!foldername) {
console.error('❌ Invalid folder name format:', req.params.foldername);
return res.status(400).json({ error: 'Invalid folder name format' });
}
const recordingDir = `${RECORDING_DIR}/${foldername}`;
try {
// Check if directory exists
await fs.access(recordingDir);
const transcriptionWavPath = `${recordingDir}/transcription.wav`;
const transcriptionMetadataPath = `${recordingDir}/transcription.json`;
// Check if transcription file exists
await fs.access(transcriptionWavPath);
// Create initial transcription metadata
const initialMetadata: TranscriptionMetadata = {
transcriptionStartTime: Date.now(),
transcriptionEndTime: 0,
transcriptionStatus: 'pending',
};
await fs.writeJson(transcriptionMetadataPath, initialMetadata);
// Notify clients that transcription has started
io.emit('apps:recording-transcription-start', { filename: foldername });
const transcription = await gemini(transcriptionWavPath, {
mode: 'transcript',
});
// Update transcription metadata with results
const metadata: TranscriptionMetadata = {
transcriptionStartTime: initialMetadata.transcriptionStartTime,
transcriptionEndTime: Date.now(),
transcriptionStatus: 'completed',
transcription: transcription ?? undefined,
};
await fs.writeJson(transcriptionMetadataPath, metadata);
// Notify clients that transcription is complete
io.emit('apps:recording-transcription-end', {
filename: foldername,
success: true,
transcription,
});
res.json({ success: true });
} catch (error) {
console.error('❌ Error during transcription:', error);
// Update transcription metadata with error
const metadata: TranscriptionMetadata = {
transcriptionStartTime: Date.now(),
transcriptionEndTime: Date.now(),
transcriptionStatus: 'error',
error: error instanceof Error ? error.message : 'Unknown error',
};
await fs
.writeJson(`${recordingDir}/transcription.json`, metadata)
.catch(err => {
console.error('❌ Error saving transcription metadata:', err);
});
// Notify clients of transcription error
io.emit('apps:recording-transcription-end', {
filename: foldername,
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
});
res.status(500).json({
error: error instanceof Error ? error.message : 'Unknown error',
});
}
}
);
// Start server
httpServer.listen(PORT, () => {
console.log(`
🎙️ Media Capture Server started successfully:
- Port: ${PORT}
- Recordings directory: ${RECORDING_DIR}
- Sample rate: 44.1kHz
- Channels: Mono
`);
});
// Initialize file watcher
setupRecordingsWatcher().catch(error => {
console.error('Failed to setup recordings watcher:', error);
});

View File

@@ -0,0 +1,4 @@
declare module '*.txt' {
const content: string;
export default content;
}

View File

@@ -0,0 +1,125 @@
import fs from 'fs-extra';
interface WavWriterConfig {
targetSampleRate?: number;
}
export class WavWriter {
private readonly file: fs.WriteStream;
private readonly originalSampleRate: number = 44100;
private readonly targetSampleRate: number;
private readonly numChannels = 1; // The audio is mono
private samplesWritten = 0;
private readonly tempFilePath: string;
private readonly finalFilePath: string;
constructor(finalPath: string, config: WavWriterConfig = {}) {
this.finalFilePath = finalPath;
this.tempFilePath = finalPath + '.tmp';
this.targetSampleRate = config.targetSampleRate ?? this.originalSampleRate;
this.file = fs.createWriteStream(this.tempFilePath);
this.writeHeader(); // Always write header immediately
}
private writeHeader() {
const buffer = Buffer.alloc(44); // WAV header is 44 bytes
// RIFF chunk descriptor
buffer.write('RIFF', 0);
buffer.writeUInt32LE(36, 4); // Initial file size - 8 (will be updated later)
buffer.write('WAVE', 8);
// fmt sub-chunk
buffer.write('fmt ', 12);
buffer.writeUInt32LE(16, 16); // Subchunk1Size (16 for PCM)
buffer.writeUInt16LE(3, 20); // AudioFormat (3 for IEEE float)
buffer.writeUInt16LE(this.numChannels, 22); // NumChannels
buffer.writeUInt32LE(this.targetSampleRate, 24); // SampleRate
buffer.writeUInt32LE(this.targetSampleRate * this.numChannels * 4, 28); // ByteRate
buffer.writeUInt16LE(this.numChannels * 4, 32); // BlockAlign
buffer.writeUInt16LE(32, 34); // BitsPerSample (32 for float)
// data sub-chunk
buffer.write('data', 36);
buffer.writeUInt32LE(0, 40); // Initial data size (will be updated later)
this.file.write(buffer);
}
private resample(samples: Float32Array): Float32Array {
const ratio = this.originalSampleRate / this.targetSampleRate;
const newLength = Math.floor(samples.length / ratio);
const result = new Float32Array(newLength);
for (let i = 0; i < newLength; i++) {
const position = i * ratio;
const index = Math.floor(position);
const fraction = position - index;
// Linear interpolation between adjacent samples
if (index + 1 < samples.length) {
result[i] =
samples[index] * (1 - fraction) + samples[index + 1] * fraction;
} else {
result[i] = samples[index];
}
}
return result;
}
write(samples: Float32Array) {
// Resample the input samples
const resampledData = this.resample(samples);
// Create a buffer with the correct size (4 bytes per float)
const buffer = Buffer.alloc(resampledData.length * 4);
// Write each float value properly
for (let i = 0; i < resampledData.length; i++) {
buffer.writeFloatLE(resampledData[i], i * 4);
}
this.file.write(buffer);
this.samplesWritten += resampledData.length;
}
async end(): Promise<void> {
return new Promise<void>((resolve, reject) => {
this.file.end(() => {
void this.updateHeaderAndCleanup().then(resolve).catch(reject);
});
});
}
private async updateHeaderAndCleanup(): Promise<void> {
// Read the entire temporary file
const data = await fs.promises.readFile(this.tempFilePath);
// Update the header with correct sizes
const dataSize = this.samplesWritten * 4;
const fileSize = dataSize + 36;
data.writeUInt32LE(fileSize, 4); // Update RIFF chunk size
data.writeUInt32LE(dataSize, 40); // Update data chunk size
// Write the updated file
await fs.promises.writeFile(this.finalFilePath, data);
// Clean up temp file
await fs.promises.unlink(this.tempFilePath);
}
}
/**
* Creates a Buffer from Float32Array audio data
* @param float32Array - The Float32Array containing audio samples
* @returns FileData - The audio data as a Buffer
*/
export function FileData(float32Array: Float32Array): Buffer {
const buffer = Buffer.alloc(float32Array.length * 4); // 4 bytes per float
for (let i = 0; i < float32Array.length; i++) {
buffer.writeFloatLE(float32Array[i], i * 4);
}
return buffer;
}

View File

@@ -0,0 +1,7 @@
{
"extends": "../../../tsconfig.node.json",
"compilerOptions": {
"rootDir": "./server"
},
"include": ["./server"]
}

View File

@@ -0,0 +1,10 @@
{
"extends": "../../../tsconfig.web.json",
"compilerOptions": {
"rootDir": "./web",
"outDir": "./dist",
"tsBuildInfoFile": "./dist/tsconfig.tsbuildinfo"
},
"include": ["./web", "server/types.d.ts"],
"references": [{ "path": "../native" }]
}

View File

@@ -0,0 +1,18 @@
import tailwindcss from '@tailwindcss/vite';
import react from '@vitejs/plugin-react';
import { defineConfig } from 'vite';
// https://vite.dev/config/
export default defineConfig({
plugins: [react(), tailwindcss()],
root: './web',
server: {
proxy: {
'/api': {
target: 'http://localhost:6544',
changeOrigin: true,
rewrite: path => path.replace(/^\/api/, ''),
},
},
},
});

Some files were not shown because too many files have changed in this diff Show More