mirror of
https://github.com/toeverything/AFFiNE.git
synced 2026-02-12 04:18:54 +00:00
feat(native): doc loader for common native (#9941)
This commit is contained in:
@@ -3,15 +3,63 @@ edition = "2021"
|
||||
name = "affine_common"
|
||||
version = "0.1.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
doc-loader = ["docx-parser", "infer", "path-ext", "pdf-extract", "readability", "serde_json", "strum_macros", "text-splitter", "thiserror", "tree-sitter", "url"]
|
||||
tree-sitter = [
|
||||
"cc",
|
||||
"dep:tree-sitter",
|
||||
"dep:tree-sitter-c",
|
||||
"dep:tree-sitter-c-sharp",
|
||||
"dep:tree-sitter-cpp",
|
||||
"dep:tree-sitter-go",
|
||||
"dep:tree-sitter-java",
|
||||
"dep:tree-sitter-javascript",
|
||||
"dep:tree-sitter-kotlin-ng",
|
||||
"dep:tree-sitter-python",
|
||||
"dep:tree-sitter-rust",
|
||||
"dep:tree-sitter-scala",
|
||||
"dep:tree-sitter-typescript",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
chrono = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
sha3 = { workspace = true }
|
||||
|
||||
docx-parser = { git = "https://github.com/toeverything/docx-parser", optional = true }
|
||||
infer = { version = "0.19.0", optional = true }
|
||||
path-ext = { version = "0.1.1", optional = true }
|
||||
pdf-extract = { version = "0.8.2", optional = true }
|
||||
readability = { version = "0.3.0", optional = true, default-features = false }
|
||||
serde_json = { version = "1.0", optional = true }
|
||||
strum_macros = { version = "0.26.2", optional = true }
|
||||
text-splitter = { version = "0.22", features = ["markdown", "tiktoken-rs"], optional = true }
|
||||
thiserror = { version = "1", optional = true }
|
||||
tree-sitter = { version = "0.25", optional = true }
|
||||
tree-sitter-c = { version = "0.23", optional = true }
|
||||
tree-sitter-c-sharp = { version = "0.23", optional = true }
|
||||
tree-sitter-cpp = { version = "0.23", optional = true }
|
||||
tree-sitter-go = { version = "0.23", optional = true }
|
||||
tree-sitter-java = { version = "0.23", optional = true }
|
||||
tree-sitter-javascript = { version = "0.23", optional = true }
|
||||
tree-sitter-kotlin-ng = { version = "1.1", optional = true }
|
||||
tree-sitter-python = { version = "0.23", optional = true }
|
||||
tree-sitter-rust = { version = "0.23", optional = true }
|
||||
tree-sitter-scala = { version = "0.23", optional = true }
|
||||
tree-sitter-typescript = { version = "0.23", optional = true }
|
||||
url = { version = "2.5", optional = true }
|
||||
|
||||
|
||||
tiktoken-rs = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion2 = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1", optional = true }
|
||||
|
||||
[[bench]]
|
||||
harness = false
|
||||
name = "hashcash"
|
||||
|
||||
BIN
packages/common/native/fixtures/demo.docx
Normal file
BIN
packages/common/native/fixtures/demo.docx
Normal file
Binary file not shown.
28
packages/common/native/fixtures/demo.docx.0.md
Normal file
28
packages/common/native/fixtures/demo.docx.0.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# DOCX Demo
|
||||
|
||||
# <a name="OLE_LINK1"></a><a name="OLE_LINK2"></a><a name="_Toc359077851"></a>Demonstration of DOCX support in calibre
|
||||
|
||||
This document demonstrates the ability of the calibre DOCX Input plugin to convert the various typographic features in a Microsoft Word (2007 and newer) document. Convert this document to a modern ebook format, such as AZW3 for Kindles or EPUB for other ebook readers, to see it in action.
|
||||
|
||||
There is support for images, tables, lists, footnotes, endnotes, links, dropcaps and various types of text and paragraph level formatting.
|
||||
|
||||
To see the DOCX conversion in action, simply add this file to calibre using the **“Add Books” **button and then click “**Convert”. ** Set the output format in the top right corner of the conversion dialog to EPUB or AZW3 and click **“OK”**.
|
||||
|
||||
# <a name="_Toc359077852"></a>Text Formatting
|
||||
|
||||
## <a name="_Toc359077853"></a>Inline formatting
|
||||
|
||||
Here, we demonstrate various types of inline text formatting and the use of embedded fonts.
|
||||
|
||||
Here is some **bold, ***italic, ****bold-italic, ***__underlined __and ~~struck out ~~ text. Then, we have a superscript and a subscript. Now we see some red, green and blue text. Some text with a yellow highlight. Some text in a box. Some text in inverse video.
|
||||
|
||||
A paragraph with styled text: subtle emphasis followed by strong text and intense emphasis. This paragraph uses document wide styles for styling rather than inline text properties as demonstrated in the previous paragraph — calibre can handle both with equal ease.
|
||||
|
||||
## <a name="_Toc359077854"></a>Fun with fonts
|
||||
|
||||
This document has embedded the Ubuntu font family. The body text is in the Ubuntu typeface, here is some text in the Ubuntu Mono typeface, notice how every letter has the same width, even i and m. Every embedded font will automatically be embedded in the output ebook during conversion.
|
||||
|
||||
## ***<a name="_Paragraph_level_formatting"></a>******<a name="_Toc359077855"></a>******Paragraph level formatting***
|
||||
|
||||
You can do crazy things with paragraphs, if the urge strikes you. For instance this paragraph is right aligned and has a right border. It has also been given a light gray background.
|
||||
|
||||
28
packages/common/native/fixtures/demo.docx.1.md
Normal file
28
packages/common/native/fixtures/demo.docx.1.md
Normal file
@@ -0,0 +1,28 @@
|
||||
For the lovers of poetry amongst you, paragraphs with hanging indents, like this often come in handy. You can use hanging indents to ensure that a line of poetry retains its individual identity as a line even when the screen is too narrow to display it as a single line. Not only does this paragraph have a hanging indent, it is also has an extra top margin, setting it apart from the preceding paragraph.
|
||||
|
||||
# <a name="_Toc359077856"></a>Tables
|
||||
|
||||
| | |
|
||||
| ----------- | -------- |
|
||||
| ITEM | NEEDED |
|
||||
| Books | 1 |
|
||||
| Pens | 3 |
|
||||
| Pencils | 2 |
|
||||
| Highlighter | 2 colors |
|
||||
| Scissors | 1 pair |
|
||||
|
||||
Tables in Word can vary from the extremely simple to the extremely complex. calibre tries to do its best when converting tables. While you may run into trouble with the occasional table, the vast majority of common cases should be converted very well, as demonstrated in this section. Note that for optimum results, when creating tables in Word, you should set their widths using percentages, rather than absolute units. To the left of this paragraph is a floating two column table with a nice green border and header row.
|
||||
|
||||
Now let’s look at a fancier table—one with alternating row colors and partial borders. This table is stretched out to take 100% of the available width.
|
||||
|
||||
| | | | | | |
|
||||
| ------------ | ------- | ------- | ------- | ------- | ------- |
|
||||
| City or Town | Point A | Point B | Point C | Point D | Point E |
|
||||
| Point A | — | | | | |
|
||||
| Point B | 87 | — | | | |
|
||||
| Point C | 64 | 56 | — | | |
|
||||
| Point D | 37 | 32 | 91 | — | |
|
||||
| Point E | 93 | 35 | 54 | 43 | — |
|
||||
|
||||
Next, we see a table with special formatting in various locations. Notice how the formatting for the header row and sub header rows is preserved.
|
||||
|
||||
21
packages/common/native/fixtures/demo.docx.2.md
Normal file
21
packages/common/native/fixtures/demo.docx.2.md
Normal file
@@ -0,0 +1,21 @@
|
||||
| | | | |
|
||||
| ---------------- | ------------- | ------------------- | ------ |
|
||||
| College | New students | Graduating students | Change |
|
||||
| | Undergraduate | | |
|
||||
| Cedar University | 110 | 103 | +7 |
|
||||
| Oak Institute | 202 | 210 | -8 |
|
||||
| | Graduate | | |
|
||||
| Cedar University | 24 | 20 | +4 |
|
||||
| Elm College | 43 | 53 | -10 |
|
||||
| Total | 998 | 908 | 90 |
|
||||
|
||||
Source: Fictitious data, for illustration purposes only
|
||||
|
||||
Next, we have something a little more complex, a nested table, i.e. a table inside another table. Additionally, the inner table has some of its cells merged. The table is displayed horizontally centered.
|
||||
|
||||
| | |
|
||||
| --- | -------------------------------------------------------------- |
|
||||
| | To the left is a table inside a table, with some cells merged. |
|
||||
|
||||
We end with a fancy calendar, note how much of the original formatting is preserved. Note that this table will only display correctly on relatively wide screens. In general, very wide tables or tables whose cells have fixed width requirements don’t fare well in ebooks.
|
||||
|
||||
18
packages/common/native/fixtures/demo.docx.3.md
Normal file
18
packages/common/native/fixtures/demo.docx.3.md
Normal file
@@ -0,0 +1,18 @@
|
||||
| | | | | | | | | | | | | |
|
||||
| ------------- | | --- | | --- | | --- | | --- | | --- | | --- |
|
||||
| December 2007 | | | | | | | | | | | | |
|
||||
| Sun | | Mon | | Tue | | Wed | | Thu | | Fri | | Sat |
|
||||
| | | | | | | | | | | | | 1 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 9 | | 10 | | 11 | | 12 | | 13 | | 14 | | 15 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 16 | | 17 | | 18 | | 19 | | 20 | | 21 | | 22 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 23 | | 24 | | 25 | | 26 | | 27 | | 28 | | 29 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 30 | | 31 | | | | | | | | | | |
|
||||
|
||||
# <a name="_Toc359077857"></a>Structural Elements
|
||||
|
||||
20
packages/common/native/fixtures/demo.docx.4.md
Normal file
20
packages/common/native/fixtures/demo.docx.4.md
Normal file
@@ -0,0 +1,20 @@
|
||||
Miscellaneous structural elements you can add to your document, like footnotes, endnotes, dropcaps and the like.
|
||||
|
||||
## <a name="_Toc359077858"></a>Footnotes & Endnotes
|
||||
|
||||
Footnotes and endnotes are automatically recognized and both are converted to endnotes, with backlinks for maximum ease of use in ebook devices.
|
||||
|
||||
## <a name="_Toc359077859"></a>Dropcaps
|
||||
|
||||
D
|
||||
|
||||
rop caps are used to emphasize the leading paragraph at the start of a section. In Word it is possible to specify how many lines of text a drop-cap should use. Because of limitations in ebook technology, this is not possible when converting. Instead, the converted drop cap will use font size and line height to simulate the effect as well as possible. While not as good as the original, the result is usually tolerable. This paragraph has a “D” dropcap set to occupy three lines of text with a font size of 58.5 pts. Depending on the screen width and capabilities of the device you view the book on, this dropcap can look anything from perfect to ugly.
|
||||
|
||||
## <a name="_Toc359077860"></a>Links
|
||||
|
||||
Two kinds of links are possible, those that refer to an external website and those that refer to locations inside the document itself. Both are supported by calibre. For example, here is a link pointing to the [calibre download page](http://calibre-ebook.com/download). Then we have a link that points back to the section on [paragraph level formatting](#_Paragraph_level_formatting) in this document.
|
||||
|
||||
## <a name="_Toc359077861"></a>Table of Contents
|
||||
|
||||
There are two approaches that calibre takes when generating a Table of Contents. The first is if the Word document has a Table of Contents itself. Provided that the Table of Contents uses hyperlinks, calibre will automatically use it. The levels of the Table of Contents are identified by their left indent, so if you want the ebook to have a multi-level Table of Contents, make sure you create a properly indented Table of Contents in Word.
|
||||
|
||||
30
packages/common/native/fixtures/demo.docx.5.md
Normal file
30
packages/common/native/fixtures/demo.docx.5.md
Normal file
@@ -0,0 +1,30 @@
|
||||
If no Table of Contents is found in the document, then a table of contents is automatically generated from the headings in the document. A heading is identified as something that has the Heading 1 or Heading 2, etc. style applied to it. These headings are turned into a Table of Contents with Heading 1 being the topmost level, Heading 2 the second level and so on.
|
||||
|
||||
You can see the Table of Contents created by calibre by clicking the Table of Contents button in whatever viewer you are using to view the converted ebook.
|
||||
|
||||
# <a name="_Toc359077862"></a>Images
|
||||
|
||||
Images can be of three main types. Inline images are images that are part of the normal text flow, like this image of a green dot . Inline images do not cause breaks in the text and are usually small in size. The next category of image is a floating image, one that “floats “ on the page and is surrounded by text. Word supports more types of floating images than are possible with current ebook technology, so the conversion maps floating images to simple left and right floats, as you can see with the left and right arrow images on the sides of this paragraph.
|
||||
|
||||
The final type of image is a “block” image, one that becomes a paragraph on its own and has no text on either side. Below is a centered green dot.
|
||||
|
||||
Centered images like this are useful for large pictures that should be a focus of attention.
|
||||
|
||||
Generally, it is not possible to translate the exact positioning of images from a Word document to an ebook. That is because in Word, image positioning is specified in absolute units from the page boundaries. There is no analogous technology in ebooks, so the conversion will usually end up placing the image either centered or floating close to the point in the text where it was inserted, not necessarily where it appears on the page in Word.
|
||||
|
||||
# <a name="_Toc359077863"></a>Lists
|
||||
|
||||
All types of lists are supported by the conversion, with the exception of lists that use fancy bullets, these get converted to regular bullets.
|
||||
|
||||
## <a name="_Toc359077864"></a>Bulleted List
|
||||
|
||||
- One
|
||||
|
||||
- Two
|
||||
|
||||
## <a name="_Toc359077865"></a>Numbered List
|
||||
|
||||
1. One, with a very long line to demonstrate that the hanging indent for the list is working correctly
|
||||
|
||||
2. Two
|
||||
|
||||
37
packages/common/native/fixtures/demo.docx.6.md
Normal file
37
packages/common/native/fixtures/demo.docx.6.md
Normal file
@@ -0,0 +1,37 @@
|
||||
## <a name="_Toc359077866"></a>Multi-level Lists
|
||||
|
||||
1. One
|
||||
|
||||
2. Two
|
||||
|
||||
3. Three
|
||||
|
||||
4. Four with a very long line to demonstrate that the hanging indent for the list is working correctly.
|
||||
|
||||
5. Five
|
||||
|
||||
6. Six
|
||||
|
||||
A Multi-level list with bullets:
|
||||
|
||||
- One
|
||||
|
||||
- Two
|
||||
|
||||
- This bullet uses an image as the bullet item
|
||||
|
||||
- Four
|
||||
|
||||
- Five
|
||||
|
||||
## <a name="_Toc359077867"></a>Continued Lists
|
||||
|
||||
i. One
|
||||
|
||||
j. Two
|
||||
|
||||
An interruption in our regularly scheduled listing, for this essential and very relevant public service announcement.
|
||||
|
||||
k. We now resume our normal programming
|
||||
|
||||
l. Four
|
||||
182
packages/common/native/fixtures/demo.docx.md
Normal file
182
packages/common/native/fixtures/demo.docx.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# DOCX Demo
|
||||
|
||||
# <a name="OLE_LINK1"></a><a name="OLE_LINK2"></a><a name="_Toc359077851"></a>Demonstration of DOCX support in calibre
|
||||
|
||||
This document demonstrates the ability of the calibre DOCX Input plugin to convert the various typographic features in a Microsoft Word (2007 and newer) document. Convert this document to a modern ebook format, such as AZW3 for Kindles or EPUB for other ebook readers, to see it in action.
|
||||
|
||||
There is support for images, tables, lists, footnotes, endnotes, links, dropcaps and various types of text and paragraph level formatting.
|
||||
|
||||
To see the DOCX conversion in action, simply add this file to calibre using the **“Add Books” **button and then click “**Convert”. ** Set the output format in the top right corner of the conversion dialog to EPUB or AZW3 and click **“OK”**.
|
||||
|
||||
# <a name="_Toc359077852"></a>Text Formatting
|
||||
|
||||
## <a name="_Toc359077853"></a>Inline formatting
|
||||
|
||||
Here, we demonstrate various types of inline text formatting and the use of embedded fonts.
|
||||
|
||||
Here is some **bold, ***italic, ****bold-italic, ***__underlined __and ~~struck out ~~ text. Then, we have a superscript and a subscript. Now we see some red, green and blue text. Some text with a yellow highlight. Some text in a box. Some text in inverse video.
|
||||
|
||||
A paragraph with styled text: subtle emphasis followed by strong text and intense emphasis. This paragraph uses document wide styles for styling rather than inline text properties as demonstrated in the previous paragraph — calibre can handle both with equal ease.
|
||||
|
||||
## <a name="_Toc359077854"></a>Fun with fonts
|
||||
|
||||
This document has embedded the Ubuntu font family. The body text is in the Ubuntu typeface, here is some text in the Ubuntu Mono typeface, notice how every letter has the same width, even i and m. Every embedded font will automatically be embedded in the output ebook during conversion.
|
||||
|
||||
## ***<a name="_Paragraph_level_formatting"></a>******<a name="_Toc359077855"></a>******Paragraph level formatting***
|
||||
|
||||
You can do crazy things with paragraphs, if the urge strikes you. For instance this paragraph is right aligned and has a right border. It has also been given a light gray background.
|
||||
|
||||
For the lovers of poetry amongst you, paragraphs with hanging indents, like this often come in handy. You can use hanging indents to ensure that a line of poetry retains its individual identity as a line even when the screen is too narrow to display it as a single line. Not only does this paragraph have a hanging indent, it is also has an extra top margin, setting it apart from the preceding paragraph.
|
||||
|
||||
# <a name="_Toc359077856"></a>Tables
|
||||
|
||||
| | |
|
||||
| ----------- | -------- |
|
||||
| ITEM | NEEDED |
|
||||
| Books | 1 |
|
||||
| Pens | 3 |
|
||||
| Pencils | 2 |
|
||||
| Highlighter | 2 colors |
|
||||
| Scissors | 1 pair |
|
||||
|
||||
Tables in Word can vary from the extremely simple to the extremely complex. calibre tries to do its best when converting tables. While you may run into trouble with the occasional table, the vast majority of common cases should be converted very well, as demonstrated in this section. Note that for optimum results, when creating tables in Word, you should set their widths using percentages, rather than absolute units. To the left of this paragraph is a floating two column table with a nice green border and header row.
|
||||
|
||||
Now let’s look at a fancier table—one with alternating row colors and partial borders. This table is stretched out to take 100% of the available width.
|
||||
|
||||
| | | | | | |
|
||||
| ------------ | ------- | ------- | ------- | ------- | ------- |
|
||||
| City or Town | Point A | Point B | Point C | Point D | Point E |
|
||||
| Point A | — | | | | |
|
||||
| Point B | 87 | — | | | |
|
||||
| Point C | 64 | 56 | — | | |
|
||||
| Point D | 37 | 32 | 91 | — | |
|
||||
| Point E | 93 | 35 | 54 | 43 | — |
|
||||
|
||||
Next, we see a table with special formatting in various locations. Notice how the formatting for the header row and sub header rows is preserved.
|
||||
|
||||
| | | | |
|
||||
| ---------------- | ------------- | ------------------- | ------ |
|
||||
| College | New students | Graduating students | Change |
|
||||
| | Undergraduate | | |
|
||||
| Cedar University | 110 | 103 | +7 |
|
||||
| Oak Institute | 202 | 210 | -8 |
|
||||
| | Graduate | | |
|
||||
| Cedar University | 24 | 20 | +4 |
|
||||
| Elm College | 43 | 53 | -10 |
|
||||
| Total | 998 | 908 | 90 |
|
||||
|
||||
Source: Fictitious data, for illustration purposes only
|
||||
|
||||
Next, we have something a little more complex, a nested table, i.e. a table inside another table. Additionally, the inner table has some of its cells merged. The table is displayed horizontally centered.
|
||||
|
||||
| | |
|
||||
| --- | -------------------------------------------------------------- |
|
||||
| | To the left is a table inside a table, with some cells merged. |
|
||||
|
||||
We end with a fancy calendar, note how much of the original formatting is preserved. Note that this table will only display correctly on relatively wide screens. In general, very wide tables or tables whose cells have fixed width requirements don’t fare well in ebooks.
|
||||
|
||||
| | | | | | | | | | | | | |
|
||||
| ------------- | | --- | | --- | | --- | | --- | | --- | | --- |
|
||||
| December 2007 | | | | | | | | | | | | |
|
||||
| Sun | | Mon | | Tue | | Wed | | Thu | | Fri | | Sat |
|
||||
| | | | | | | | | | | | | 1 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 9 | | 10 | | 11 | | 12 | | 13 | | 14 | | 15 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 16 | | 17 | | 18 | | 19 | | 20 | | 21 | | 22 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 23 | | 24 | | 25 | | 26 | | 27 | | 28 | | 29 |
|
||||
| | | | | | | | | | | | | |
|
||||
| 30 | | 31 | | | | | | | | | | |
|
||||
|
||||
# <a name="_Toc359077857"></a>Structural Elements
|
||||
|
||||
Miscellaneous structural elements you can add to your document, like footnotes, endnotes, dropcaps and the like.
|
||||
|
||||
## <a name="_Toc359077858"></a>Footnotes & Endnotes
|
||||
|
||||
Footnotes and endnotes are automatically recognized and both are converted to endnotes, with backlinks for maximum ease of use in ebook devices.
|
||||
|
||||
## <a name="_Toc359077859"></a>Dropcaps
|
||||
|
||||
D
|
||||
|
||||
rop caps are used to emphasize the leading paragraph at the start of a section. In Word it is possible to specify how many lines of text a drop-cap should use. Because of limitations in ebook technology, this is not possible when converting. Instead, the converted drop cap will use font size and line height to simulate the effect as well as possible. While not as good as the original, the result is usually tolerable. This paragraph has a “D” dropcap set to occupy three lines of text with a font size of 58.5 pts. Depending on the screen width and capabilities of the device you view the book on, this dropcap can look anything from perfect to ugly.
|
||||
|
||||
## <a name="_Toc359077860"></a>Links
|
||||
|
||||
Two kinds of links are possible, those that refer to an external website and those that refer to locations inside the document itself. Both are supported by calibre. For example, here is a link pointing to the [calibre download page](http://calibre-ebook.com/download). Then we have a link that points back to the section on [paragraph level formatting](#_Paragraph_level_formatting) in this document.
|
||||
|
||||
## <a name="_Toc359077861"></a>Table of Contents
|
||||
|
||||
There are two approaches that calibre takes when generating a Table of Contents. The first is if the Word document has a Table of Contents itself. Provided that the Table of Contents uses hyperlinks, calibre will automatically use it. The levels of the Table of Contents are identified by their left indent, so if you want the ebook to have a multi-level Table of Contents, make sure you create a properly indented Table of Contents in Word.
|
||||
|
||||
If no Table of Contents is found in the document, then a table of contents is automatically generated from the headings in the document. A heading is identified as something that has the Heading 1 or Heading 2, etc. style applied to it. These headings are turned into a Table of Contents with Heading 1 being the topmost level, Heading 2 the second level and so on.
|
||||
|
||||
You can see the Table of Contents created by calibre by clicking the Table of Contents button in whatever viewer you are using to view the converted ebook.
|
||||
|
||||
# <a name="_Toc359077862"></a>Images
|
||||
|
||||
Images can be of three main types. Inline images are images that are part of the normal text flow, like this image of a green dot . Inline images do not cause breaks in the text and are usually small in size. The next category of image is a floating image, one that “floats “ on the page and is surrounded by text. Word supports more types of floating images than are possible with current ebook technology, so the conversion maps floating images to simple left and right floats, as you can see with the left and right arrow images on the sides of this paragraph.
|
||||
|
||||
The final type of image is a “block” image, one that becomes a paragraph on its own and has no text on either side. Below is a centered green dot.
|
||||
|
||||
Centered images like this are useful for large pictures that should be a focus of attention.
|
||||
|
||||
Generally, it is not possible to translate the exact positioning of images from a Word document to an ebook. That is because in Word, image positioning is specified in absolute units from the page boundaries. There is no analogous technology in ebooks, so the conversion will usually end up placing the image either centered or floating close to the point in the text where it was inserted, not necessarily where it appears on the page in Word.
|
||||
|
||||
# <a name="_Toc359077863"></a>Lists
|
||||
|
||||
All types of lists are supported by the conversion, with the exception of lists that use fancy bullets, these get converted to regular bullets.
|
||||
|
||||
## <a name="_Toc359077864"></a>Bulleted List
|
||||
|
||||
- One
|
||||
|
||||
- Two
|
||||
|
||||
## <a name="_Toc359077865"></a>Numbered List
|
||||
|
||||
1. One, with a very long line to demonstrate that the hanging indent for the list is working correctly
|
||||
|
||||
2. Two
|
||||
|
||||
## <a name="_Toc359077866"></a>Multi-level Lists
|
||||
|
||||
1. One
|
||||
|
||||
2. Two
|
||||
|
||||
3. Three
|
||||
|
||||
4. Four with a very long line to demonstrate that the hanging indent for the list is working correctly.
|
||||
|
||||
5. Five
|
||||
|
||||
6. Six
|
||||
|
||||
A Multi-level list with bullets:
|
||||
|
||||
- One
|
||||
|
||||
- Two
|
||||
|
||||
- This bullet uses an image as the bullet item
|
||||
|
||||
- Four
|
||||
|
||||
- Five
|
||||
|
||||
## <a name="_Toc359077867"></a>Continued Lists
|
||||
|
||||
i. One
|
||||
|
||||
j. Two
|
||||
|
||||
An interruption in our regularly scheduled listing, for this essential and very relevant public service announcement.
|
||||
|
||||
k. We now resume our normal programming
|
||||
|
||||
l. Four
|
||||
7
packages/common/native/fixtures/sample.c
Normal file
7
packages/common/native/fixtures/sample.c
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
printf("Hello, World!\n");
|
||||
return 0;
|
||||
}
|
||||
1
packages/common/native/fixtures/sample.c.0.md
Normal file
1
packages/common/native/fixtures/sample.c.0.md
Normal file
@@ -0,0 +1 @@
|
||||
#include <stdio.h>
|
||||
4
packages/common/native/fixtures/sample.c.1.md
Normal file
4
packages/common/native/fixtures/sample.c.1.md
Normal file
@@ -0,0 +1,4 @@
|
||||
int main() {
|
||||
printf("Hello, World!\n");
|
||||
return 0;
|
||||
}
|
||||
481
packages/common/native/fixtures/sample.html
Normal file
481
packages/common/native/fixtures/sample.html
Normal file
@@ -0,0 +1,481 @@
|
||||
<!doctype html>
|
||||
<!-- saved from url=(0020)https://example.org/ -->
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||
<title>Example Domain</title>
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<style type="text/css">
|
||||
body {
|
||||
background-color: #f0f0f2;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
font-family: -apple-system, system-ui, BlinkMacSystemFont, 'Segoe UI',
|
||||
'Open Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
|
||||
}
|
||||
div {
|
||||
width: 600px;
|
||||
margin: 5em auto;
|
||||
padding: 2em;
|
||||
background-color: #fdfdff;
|
||||
border-radius: 0.5em;
|
||||
box-shadow: 2px 3px 7px 2px rgba(0, 0, 0, 0.02);
|
||||
}
|
||||
a:link,
|
||||
a:visited {
|
||||
color: #38488f;
|
||||
text-decoration: none;
|
||||
}
|
||||
@media (max-width: 700px) {
|
||||
div {
|
||||
margin: 0 auto;
|
||||
width: auto;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div>
|
||||
<h1>Example Domain</h1>
|
||||
<p>
|
||||
This domain is for use in illustrative examples in documents. You may
|
||||
use this domain in literature without prior coordination or asking for
|
||||
permission.
|
||||
</p>
|
||||
<p>
|
||||
<a
|
||||
href="https://www.iana.org/domains/example"
|
||||
rel="noreferrer"
|
||||
data-ss1736873651="1"
|
||||
>More information...</a
|
||||
>
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
<script type="text/javascript">
|
||||
(function (
|
||||
canvas,
|
||||
canvasfont,
|
||||
audioblock,
|
||||
battery,
|
||||
webgl,
|
||||
webrtcdevice,
|
||||
gamepad,
|
||||
webvr,
|
||||
bluetooth,
|
||||
timezone,
|
||||
clientrects,
|
||||
clipboard,
|
||||
browserplugins
|
||||
) {
|
||||
function processFunctions(scope) {
|
||||
/* Browser Plugins */
|
||||
if (browserplugins == 'true') {
|
||||
scope.Object.defineProperty(navigator, 'plugins', {
|
||||
enumerable: true,
|
||||
configurable: true,
|
||||
get: function () {
|
||||
var browserplugins_triggerblock =
|
||||
scope.document.createElement('div');
|
||||
browserplugins_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_browserplugins';
|
||||
browserplugins_triggerblock.title = 'navigator.plugins';
|
||||
document.documentElement.appendChild(browserplugins_triggerblock);
|
||||
return '';
|
||||
},
|
||||
});
|
||||
}
|
||||
/* Canvas */
|
||||
if (canvas != 'false') {
|
||||
var fakecanvas = scope.document.createElement('canvas');
|
||||
fakecanvas.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_canvas';
|
||||
if (canvas == 'random') {
|
||||
var fakewidth = (fakecanvas.width =
|
||||
Math.floor(Math.random() * 999) + 1);
|
||||
var fakeheight = (fakecanvas.height =
|
||||
Math.floor(Math.random() * 999) + 1);
|
||||
}
|
||||
var canvas_a = scope.HTMLCanvasElement;
|
||||
var origToDataURL = canvas_a.prototype.toDataURL;
|
||||
var origToBlob = canvas_a.prototype.toBlob;
|
||||
canvas_a.prototype.toDataURL = function () {
|
||||
fakecanvas.title = 'toDataURL';
|
||||
document.documentElement.appendChild(fakecanvas);
|
||||
if (canvas == 'block') return false;
|
||||
else if (canvas == 'blank') {
|
||||
fakecanvas.width = this.width;
|
||||
fakecanvas.height = this.height;
|
||||
return origToDataURL.apply(fakecanvas, arguments);
|
||||
} else if (canvas == 'random') {
|
||||
return origToDataURL.apply(fakecanvas, arguments);
|
||||
}
|
||||
};
|
||||
canvas_a.prototype.toBlob = function () {
|
||||
fakecanvas.title = 'toBlob';
|
||||
document.documentElement.appendChild(fakecanvas);
|
||||
if (canvas == 'block') return false;
|
||||
else if (canvas == 'blank') {
|
||||
fakecanvas.width = this.width;
|
||||
fakecanvas.height = this.height;
|
||||
return origToBlob.apply(fakecanvas, arguments);
|
||||
} else if (canvas == 'random') {
|
||||
return origToBlob.apply(fakecanvas, arguments);
|
||||
}
|
||||
};
|
||||
var canvas_b = scope.CanvasRenderingContext2D;
|
||||
var origGetImageData = canvas_b.prototype.getImageData;
|
||||
canvas_b.prototype.getImageData = function () {
|
||||
fakecanvas.title = 'getImageData';
|
||||
document.documentElement.appendChild(fakecanvas);
|
||||
if (canvas == 'block') return false;
|
||||
else if (canvas == 'blank') {
|
||||
fakecanvas.width = this.width;
|
||||
fakecanvas.height = this.height;
|
||||
return origGetImageData.apply(
|
||||
fakecanvas.getContext('2d'),
|
||||
arguments
|
||||
);
|
||||
} else if (canvas == 'random') {
|
||||
return origGetImageData.apply(fakecanvas.getContext('2d'), [
|
||||
Math.floor(Math.random() * fakewidth) + 1,
|
||||
Math.floor(Math.random() * fakeheight) + 1,
|
||||
Math.floor(Math.random() * fakewidth) + 1,
|
||||
Math.floor(Math.random() * fakeheight) + 1,
|
||||
]);
|
||||
}
|
||||
};
|
||||
var origGetLineDash = canvas_b.prototype.getLineDash;
|
||||
canvas_b.prototype.getLineDash = function () {
|
||||
fakecanvas.title = 'getLineDash';
|
||||
document.documentElement.appendChild(fakecanvas);
|
||||
if (canvas == 'block') return false;
|
||||
else if (canvas == 'blank') {
|
||||
fakecanvas.width = this.width;
|
||||
fakecanvas.height = this.height;
|
||||
return origGetLineDash.apply(fakecanvas.getContext('2d'), [0, 0]);
|
||||
} else if (canvas == 'random') {
|
||||
return origGetLineDash.apply(fakecanvas.getContext('2d'), [
|
||||
Math.floor(Math.random() * fakewidth) + 1,
|
||||
Math.floor(Math.random() * fakeheight) + 1,
|
||||
]);
|
||||
}
|
||||
};
|
||||
var canvas_c = scope.WebGLRenderingContext;
|
||||
var origReadPixels = canvas_c.prototype.readPixels;
|
||||
canvas_c.prototype.readPixels = function () {
|
||||
fakecanvas.title = 'readPixels';
|
||||
document.documentElement.appendChild(fakecanvas);
|
||||
if (canvas == 'block') return false;
|
||||
else if (canvas == 'blank') {
|
||||
fakecanvas.width = this.width;
|
||||
fakecanvas.height = this.height;
|
||||
return origReadPixels.apply(
|
||||
fakecanvas.getContext('webgl'),
|
||||
arguments
|
||||
);
|
||||
} else if (canvas == 'random') {
|
||||
return origReadPixels.apply(fakecanvas.getContext('webgl'), [
|
||||
Math.floor(Math.random() * fakewidth) + 1,
|
||||
Math.floor(Math.random() * fakeheight) + 1,
|
||||
Math.floor(Math.random() * fakewidth) + 1,
|
||||
Math.floor(Math.random() * fakeheight) + 1,
|
||||
arguments[4],
|
||||
arguments[5],
|
||||
arguments[6],
|
||||
]);
|
||||
}
|
||||
};
|
||||
}
|
||||
/* Audio Block */
|
||||
if (audioblock == 'true') {
|
||||
var audioblock_triggerblock = scope.document.createElement('div');
|
||||
audioblock_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_audio';
|
||||
var audioblock_a = scope.AudioBuffer;
|
||||
audioblock_a.prototype.copyFromChannel = function () {
|
||||
audioblock_triggerblock.title = 'copyFromChannel';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
audioblock_a.prototype.getChannelData = function () {
|
||||
audioblock_triggerblock.title = 'getChannelData';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
var audioblock_b = scope.AnalyserNode;
|
||||
audioblock_b.prototype.getFloatFrequencyData = function () {
|
||||
audioblock_triggerblock.title = 'getFloatFrequencyData';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
audioblock_b.prototype.getByteFrequencyData = function () {
|
||||
audioblock_triggerblock.title = 'getByteFrequencyData';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
audioblock_b.prototype.getFloatTimeDomainData = function () {
|
||||
audioblock_triggerblock.title = 'getFloatTimeDomainData';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
audioblock_b.prototype.getByteTimeDomainData = function () {
|
||||
audioblock_triggerblock.title = 'getByteTimeDomainData';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
var audioblock_c = scope;
|
||||
audioblock_c.AudioContext = function () {
|
||||
audioblock_triggerblock.title = 'AudioContext';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
audioblock_c.webkitAudioContext = function () {
|
||||
audioblock_triggerblock.title = 'webkitAudioContext';
|
||||
document.documentElement.appendChild(audioblock_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
/* Canvas Font */
|
||||
if (canvasfont == 'true') {
|
||||
var canvasfont_triggerblock = scope.document.createElement('div');
|
||||
canvasfont_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_canvasfont';
|
||||
var canvasfont_a = scope.CanvasRenderingContext2D;
|
||||
canvasfont_a.prototype.measureText = function () {
|
||||
canvasfont_triggerblock.title = 'measureText';
|
||||
document.documentElement.appendChild(canvasfont_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
/* Battery */
|
||||
if (battery == 'true') {
|
||||
var battery_triggerblock = scope.document.createElement('div');
|
||||
battery_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_battery';
|
||||
var battery_a = scope.navigator;
|
||||
battery_a.getBattery = function () {
|
||||
battery_triggerblock.title = 'getBattery';
|
||||
document.documentElement.appendChild(battery_triggerblock);
|
||||
return void 0;
|
||||
};
|
||||
}
|
||||
/* WebGL */
|
||||
if (webgl == 'true') {
|
||||
var webgl_triggerblock = scope.document.createElement('div');
|
||||
webgl_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webgl';
|
||||
var webgl_a = scope.HTMLCanvasElement;
|
||||
var origGetContext = webgl_a.prototype.getContext;
|
||||
webgl_a.prototype.getContext = function (arg) {
|
||||
if (arg.match(/webgl/i)) {
|
||||
webgl_triggerblock.title = 'getContext';
|
||||
document.documentElement.appendChild(webgl_triggerblock);
|
||||
return false;
|
||||
}
|
||||
return origGetContext.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
/* WebRTC */
|
||||
if (webrtcdevice == 'true') {
|
||||
var webrtc_triggerblock = scope.document.createElement('div');
|
||||
webrtc_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webrtc';
|
||||
var webrtc_a = scope.MediaStreamTrack;
|
||||
webrtc_a.getSources = function () {
|
||||
webrtc_triggerblock.title = 'getSources';
|
||||
document.documentElement.appendChild(webrtc_triggerblock);
|
||||
return false;
|
||||
};
|
||||
webrtc_a.getMediaDevices = function () {
|
||||
webrtc_triggerblock.title = 'getMediaDevices';
|
||||
document.documentElement.appendChild(webrtc_triggerblock);
|
||||
return false;
|
||||
};
|
||||
var webrtc_b = scope.navigator.mediaDevices;
|
||||
webrtc_b.enumerateDevices = function () {
|
||||
webrtc_triggerblock.title = 'enumerateDevices';
|
||||
document.documentElement.appendChild(webrtc_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
/* Gamepad */
|
||||
if (gamepad == 'true') {
|
||||
var gamepad_triggerblock = scope.document.createElement('div');
|
||||
gamepad_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_gamepad';
|
||||
var gamepad_a = scope.navigator;
|
||||
gamepad_a.getGamepads = function () {
|
||||
gamepad_triggerblock.title = 'getGamepads';
|
||||
document.documentElement.appendChild(gamepad_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
/* WebVR */
|
||||
if (webvr == 'true') {
|
||||
var webvr_triggerblock = scope.document.createElement('div');
|
||||
webvr_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_webvr';
|
||||
var webvr_a = scope.navigator;
|
||||
webvr_a.getVRDisplays = function () {
|
||||
webvr_triggerblock.title = 'getVRDisplays';
|
||||
document.documentElement.appendChild(webvr_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
/* Bluetooth */
|
||||
if (bluetooth == 'true') {
|
||||
if (scope.navigator.bluetooth) {
|
||||
var bluetooth_triggerblock = scope.document.createElement('div');
|
||||
bluetooth_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_bluetooth';
|
||||
var bluetooth_a = scope.navigator.bluetooth;
|
||||
bluetooth_a.requestDevice = function () {
|
||||
bluetooth_triggerblock.title = 'requestDevice';
|
||||
document.documentElement.appendChild(bluetooth_triggerblock);
|
||||
return false;
|
||||
};
|
||||
}
|
||||
}
|
||||
/* Client Rectangles */
|
||||
if (clientrects == 'true') {
|
||||
var clientrects_triggerblock = scope.document.createElement('div');
|
||||
clientrects_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_clientrects';
|
||||
Element.prototype.getClientRects = function () {
|
||||
clientrects_triggerblock.title = 'getClientRects';
|
||||
document.documentElement.appendChild(clientrects_triggerblock);
|
||||
return [
|
||||
{ top: 0, bottom: 0, left: 0, right: 0, height: 0, width: 0 },
|
||||
];
|
||||
};
|
||||
}
|
||||
/* Timezone */
|
||||
if (timezone != 'false') {
|
||||
var timezone_triggerblock = scope.document.createElement('div');
|
||||
timezone_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_timezone';
|
||||
var timezone_a = scope.Date;
|
||||
timezone_a.prototype.getTimezoneOffset = function () {
|
||||
timezone_triggerblock.title = 'getTimezoneOffset';
|
||||
document.documentElement.appendChild(timezone_triggerblock);
|
||||
if (timezone == 'random')
|
||||
return [
|
||||
'720',
|
||||
'660',
|
||||
'600',
|
||||
'570',
|
||||
'540',
|
||||
'480',
|
||||
'420',
|
||||
'360',
|
||||
'300',
|
||||
'240',
|
||||
'210',
|
||||
'180',
|
||||
'120',
|
||||
'60',
|
||||
'0',
|
||||
'-60',
|
||||
'-120',
|
||||
'-180',
|
||||
'-210',
|
||||
'-240',
|
||||
'-270',
|
||||
'-300',
|
||||
'-330',
|
||||
'-345',
|
||||
'-360',
|
||||
'-390',
|
||||
'-420',
|
||||
'-480',
|
||||
'-510',
|
||||
'-525',
|
||||
'-540',
|
||||
'-570',
|
||||
'-600',
|
||||
'-630',
|
||||
'-660',
|
||||
'-720',
|
||||
'-765',
|
||||
'-780',
|
||||
'-840',
|
||||
][Math.floor(Math.random() * 39)];
|
||||
return timezone;
|
||||
};
|
||||
}
|
||||
/* Clipboard */
|
||||
if (clipboard == 'true') {
|
||||
var clipboard_triggerblock = scope.document.createElement('div');
|
||||
clipboard_triggerblock.className =
|
||||
'scriptsafe_oiigbmnaadbkfbmpbfijlflahbdbdgdf_clipboard';
|
||||
var clipboard_a = document;
|
||||
var origExecCommand = clipboard_a.execCommand;
|
||||
clipboard_a.execCommand = function () {
|
||||
clipboard_triggerblock.title = 'execCommand';
|
||||
document.documentElement.appendChild(clipboard_triggerblock);
|
||||
if (arguments[0] == 'cut' || arguments[0] == 'copy') return false;
|
||||
return origExecCommand.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
}
|
||||
processFunctions(window);
|
||||
var iwin = HTMLIFrameElement.prototype.__lookupGetter__('contentWindow'),
|
||||
idoc = HTMLIFrameElement.prototype.__lookupGetter__('contentDocument');
|
||||
Object.defineProperties(HTMLIFrameElement.prototype, {
|
||||
contentWindow: {
|
||||
get: function () {
|
||||
var frame = iwin.apply(this);
|
||||
if (
|
||||
this.src &&
|
||||
this.src.indexOf('//') != -1 &&
|
||||
location.host != this.src.split('/')[2]
|
||||
)
|
||||
return frame;
|
||||
try {
|
||||
frame.HTMLCanvasElement;
|
||||
} catch (err) {
|
||||
/* do nothing*/
|
||||
}
|
||||
processFunctions(frame);
|
||||
return frame;
|
||||
},
|
||||
},
|
||||
contentDocument: {
|
||||
get: function () {
|
||||
if (
|
||||
this.src &&
|
||||
this.src.indexOf('//') != -1 &&
|
||||
location.host != this.src.split('/')[2]
|
||||
)
|
||||
return idoc.apply(this);
|
||||
var frame = iwin.apply(this);
|
||||
try {
|
||||
frame.HTMLCanvasElement;
|
||||
} catch (err) {
|
||||
/* do nothing*/
|
||||
}
|
||||
processFunctions(frame);
|
||||
return idoc.apply(this);
|
||||
},
|
||||
},
|
||||
});
|
||||
})(
|
||||
'block',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'true',
|
||||
'false',
|
||||
'true',
|
||||
'true',
|
||||
'true'
|
||||
);
|
||||
</script>
|
||||
</html>
|
||||
6
packages/common/native/fixtures/sample.html.0.md
Normal file
6
packages/common/native/fixtures/sample.html.0.md
Normal file
@@ -0,0 +1,6 @@
|
||||
Example Domain
|
||||
|
||||
This domain is for use in illustrative examples in documents. You may
|
||||
use this domain in literature without prior coordination or asking for
|
||||
permission.
|
||||
More information...
|
||||
BIN
packages/common/native/fixtures/sample.pdf
Normal file
BIN
packages/common/native/fixtures/sample.pdf
Normal file
Binary file not shown.
17
packages/common/native/fixtures/sample.pdf.0.md
Normal file
17
packages/common/native/fixtures/sample.pdf.0.md
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
|
||||
Sample PDF
|
||||
This is a simple PDF file. Fun fun fun.
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Phasellus facilisis odio sed mi.
|
||||
Curabitur suscipit. Nullam vel nisi. Etiam semper ipsum ut lectus. Proin aliquam, erat eget
|
||||
pharetra commodo, eros mi condimentum quam, sed commodo justo quam ut velit.
|
||||
Integer a erat. Cras laoreet ligula cursus enim. Aenean scelerisque velit et tellus.
|
||||
Vestibulum dictum aliquet sem. Nulla facilisi. Vestibulum accumsan ante vitae elit. Nulla
|
||||
erat dolor, blandit in, rutrum quis, semper pulvinar, enim. Nullam varius congue risus.
|
||||
Vivamus sollicitudin, metus ut interdum eleifend, nisi tellus pellentesque elit, tristique
|
||||
accumsan eros quam et risus. Suspendisse libero odio, mattis sit amet, aliquet eget,
|
||||
hendrerit vel, nulla. Sed vitae augue. Aliquam erat volutpat. Aliquam feugiat vulputate nisl.
|
||||
Suspendisse quis nulla pretium ante pretium mollis. Proin velit ligula, sagittis at, egestas a,
|
||||
pulvinar quis, nisl.
|
||||
|
||||
9
packages/common/native/fixtures/sample.pdf.1.md
Normal file
9
packages/common/native/fixtures/sample.pdf.1.md
Normal file
@@ -0,0 +1,9 @@
|
||||
Pellentesque sit amet lectus. Praesent pulvinar, nunc quis iaculis sagittis, justo quam
|
||||
lobortis tortor, sed vestibulum dui metus venenatis est. Nunc cursus ligula. Nulla facilisi.
|
||||
Phasellus ullamcorper consectetuer ante. Duis tincidunt, urna id condimentum luctus, nibh
|
||||
ante vulputate sapien, id sagittis massa orci ut enim. Pellentesque vestibulum convallis
|
||||
sem. Nulla consequat quam ut nisl. Nullam est. Curabitur tincidunt dapibus lorem. Proin
|
||||
velit turpis, scelerisque sit amet, iaculis nec, rhoncus ac, ipsum. Phasellus lorem arcu,
|
||||
feugiat eu, gravida eu, consequat molestie, ipsum. Nullam vel est ut ipsum volutpat
|
||||
feugiat. Aenean pellentesque.
|
||||
|
||||
16
packages/common/native/fixtures/sample.pdf.2.md
Normal file
16
packages/common/native/fixtures/sample.pdf.2.md
Normal file
@@ -0,0 +1,16 @@
|
||||
In mauris. Pellentesque dui nisi, iaculis eu, rhoncus in, venenatis ac, ante. Ut odio justo,
|
||||
scelerisque vel, facilisis non, commodo a, pede. Cras nec massa sit amet tortor volutpat
|
||||
varius. Donec lacinia, neque a luctus aliquet, pede massa imperdiet ante, at varius lorem
|
||||
pede sed sapien. Fusce erat nibh, aliquet in, eleifend eget, commodo eget, erat. Fusce
|
||||
consectetuer. Cras risus tortor, porttitor nec, tristique sed, convallis semper, eros. Fusce
|
||||
vulputate ipsum a mauris. Phasellus mollis. Curabitur sed urna. Aliquam nec sapien non
|
||||
nibh pulvinar convallis. Vivamus facilisis augue quis quam. Proin cursus aliquet metus.
|
||||
Suspendisse lacinia. Nulla at tellus ac turpis eleifend scelerisque. Maecenas a pede vitae
|
||||
enim commodo interdum. Donec odio. Sed sollicitudin dui vitae justo.
|
||||
|
||||
Morbi elit nunc, facilisis a, mollis a, molestie at, lectus. Suspendisse eget mauris eu tellus
|
||||
molestie cursus. Duis ut magna at justo dignissim condimentum. Cum sociis natoque
|
||||
penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vivamus varius. Ut sit
|
||||
amet diam suscipit mauris ornare aliquam. Sed varius. Duis arcu. Etiam tristique massa
|
||||
eget dui. Phasellus congue. Aenean est erat, tincidunt eget, venenatis quis, commodo at,
|
||||
quam.
|
||||
10
packages/common/native/fixtures/sample.rs
Normal file
10
packages/common/native/fixtures/sample.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
fn factorial(n: u64) -> u64 {
|
||||
if n == 0 {
|
||||
return 1;
|
||||
}
|
||||
n * factorial(n - 1)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
||||
6
packages/common/native/fixtures/sample.rs.0.md
Normal file
6
packages/common/native/fixtures/sample.rs.0.md
Normal file
@@ -0,0 +1,6 @@
|
||||
fn factorial(n: u64) -> u64 {
|
||||
if n == 0 {
|
||||
return 1;
|
||||
}
|
||||
n * factorial(n - 1)
|
||||
}
|
||||
3
packages/common/native/fixtures/sample.rs.1.md
Normal file
3
packages/common/native/fixtures/sample.rs.1.md
Normal file
@@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
||||
3
packages/common/native/fixtures/sample.ts
Normal file
3
packages/common/native/fixtures/sample.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export default function sample() {
|
||||
return 'sample';
|
||||
}
|
||||
3
packages/common/native/fixtures/sample.ts.0.md
Normal file
3
packages/common/native/fixtures/sample.ts.0.md
Normal file
@@ -0,0 +1,3 @@
|
||||
export default function sample() {
|
||||
return 'sample';
|
||||
}
|
||||
169
packages/common/native/src/doc_loader/document.rs
Normal file
169
packages/common/native/src/doc_loader/document.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::{io::Cursor, path::PathBuf};
|
||||
|
||||
use path_ext::PathExt;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Chunk {
|
||||
pub index: usize,
|
||||
pub content: String,
|
||||
pub start: Option<usize>,
|
||||
pub end: Option<usize>,
|
||||
}
|
||||
|
||||
pub struct DocOptions {
|
||||
code_threshold: u64,
|
||||
}
|
||||
|
||||
impl Default for DocOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
code_threshold: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Doc {
|
||||
pub name: String,
|
||||
pub chunks: Vec<Chunk>,
|
||||
}
|
||||
|
||||
impl Doc {
|
||||
pub fn new(file_path: &str, doc: &[u8]) -> Option<Self> {
|
||||
Self::with_options(file_path, doc, DocOptions::default())
|
||||
}
|
||||
|
||||
pub fn with_options(file_path: &str, doc: &[u8], options: DocOptions) -> Option<Self> {
|
||||
if let Some(kind) =
|
||||
infer::get(&doc[..4096.min(doc.len())]).or(infer::get_from_path(file_path).ok().flatten())
|
||||
{
|
||||
if kind.extension() == "pdf" {
|
||||
return Self::load_pdf(file_path, doc);
|
||||
} else if kind.extension() == "docx" {
|
||||
return Self::load_docx(file_path, doc);
|
||||
} else if kind.extension() == "html" {
|
||||
return Self::load_html(file_path, doc);
|
||||
}
|
||||
} else if let Ok(string) = String::from_utf8(doc.to_vec()).or_else(|_| {
|
||||
String::from_utf16(
|
||||
&doc
|
||||
.chunks_exact(2)
|
||||
.map(|b| u16::from_le_bytes([b[0], b[1]]))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}) {
|
||||
let path = PathBuf::from(file_path);
|
||||
match path.ext_str() {
|
||||
"md" => {
|
||||
let loader = TextLoader::new(string);
|
||||
let splitter = MarkdownSplitter::default();
|
||||
return Self::from_loader(file_path, loader, splitter).ok();
|
||||
}
|
||||
"rs" | "c" | "cpp" | "h" | "hpp" | "js" | "ts" | "tsx" | "go" | "py" => {
|
||||
let name = path.full_str().to_string();
|
||||
let loader =
|
||||
SourceCodeLoader::from_string(string).with_parser_option(LanguageParserOptions {
|
||||
language: get_language_by_filename(&name).ok()?,
|
||||
parser_threshold: options.code_threshold,
|
||||
});
|
||||
let splitter = TokenSplitter::default();
|
||||
return Self::from_loader(file_path, loader, splitter).ok();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let loader = TextLoader::new(string);
|
||||
let splitter = TokenSplitter::default();
|
||||
return Self::from_loader(file_path, loader, splitter).ok();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn from_loader(
|
||||
file_path: &str,
|
||||
loader: impl Loader,
|
||||
splitter: impl TextSplitter + 'static,
|
||||
) -> Result<Doc, LoaderError> {
|
||||
let name = file_path.to_string();
|
||||
let chunks = Self::get_chunks_from_loader(loader, splitter)?;
|
||||
Ok(Self { name, chunks })
|
||||
}
|
||||
|
||||
fn get_chunks_from_loader(
|
||||
loader: impl Loader,
|
||||
splitter: impl TextSplitter + 'static,
|
||||
) -> Result<Vec<Chunk>, LoaderError> {
|
||||
let docs = loader.load_and_split(splitter)?;
|
||||
Ok(
|
||||
docs
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(index, d)| Chunk {
|
||||
index,
|
||||
content: d.page_content,
|
||||
..Chunk::default()
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn load_docx(file_path: &str, doc: &[u8]) -> Option<Self> {
|
||||
let loader = DocxLoader::new(Cursor::new(doc))?;
|
||||
let splitter = TokenSplitter::default();
|
||||
Self::from_loader(file_path, loader, splitter).ok()
|
||||
}
|
||||
|
||||
fn load_html(file_path: &str, doc: &[u8]) -> Option<Self> {
|
||||
let loader = HtmlLoader::from_string(
|
||||
String::from_utf8(doc.to_vec()).ok()?,
|
||||
Url::parse(file_path)
|
||||
.or(Url::parse("https://example.com/"))
|
||||
.ok()?,
|
||||
);
|
||||
let splitter = TokenSplitter::default();
|
||||
Self::from_loader(file_path, loader, splitter).ok()
|
||||
}
|
||||
|
||||
fn load_pdf(file_path: &str, doc: &[u8]) -> Option<Self> {
|
||||
let loader = PdfExtractLoader::new(Cursor::new(doc)).ok()?;
|
||||
let splitter = TokenSplitter::default();
|
||||
Self::from_loader(file_path, loader, splitter).ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{
|
||||
fs::{read, read_to_string},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
const FIXTURES: [&str; 6] = [
|
||||
"demo.docx",
|
||||
"sample.pdf",
|
||||
"sample.html",
|
||||
"sample.rs",
|
||||
"sample.c",
|
||||
"sample.ts",
|
||||
];
|
||||
|
||||
fn get_fixtures() -> PathBuf {
|
||||
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fixtures() {
|
||||
let fixtures = get_fixtures();
|
||||
for fixture in FIXTURES.iter() {
|
||||
let buffer = read(fixtures.join(fixture)).unwrap();
|
||||
let doc = Doc::with_options(fixture, &buffer, DocOptions { code_threshold: 0 }).unwrap();
|
||||
for chunk in doc.chunks.iter() {
|
||||
let output =
|
||||
read_to_string(fixtures.join(format!("{}.{}.md", fixture, chunk.index))).unwrap();
|
||||
assert_eq!(chunk.content, output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
71
packages/common/native/src/doc_loader/loader/docx.rs
Normal file
71
packages/common/native/src/doc_loader/loader/docx.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use docx_parser::MarkdownDocument;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DocxLoader {
|
||||
document: MarkdownDocument,
|
||||
}
|
||||
|
||||
impl DocxLoader {
|
||||
pub fn new<R: Read + Seek>(reader: R) -> Option<Self> {
|
||||
Some(Self {
|
||||
document: MarkdownDocument::from_reader(reader)?,
|
||||
})
|
||||
}
|
||||
|
||||
fn extract_text(&self) -> String {
|
||||
self.document.to_markdown(false)
|
||||
}
|
||||
|
||||
fn extract_text_to_doc(&self) -> Document {
|
||||
Document::new(self.extract_text())
|
||||
}
|
||||
}
|
||||
|
||||
impl Loader for DocxLoader {
|
||||
fn load(self) -> Result<Vec<Document>, LoaderError> {
|
||||
let doc = self.extract_text_to_doc();
|
||||
Ok(vec![doc])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{fs::read, io::Cursor, path::PathBuf};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_fixtures_path() -> PathBuf {
|
||||
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_docx() {
|
||||
let docx_buffer = include_bytes!("../../../fixtures/demo.docx");
|
||||
let parsed_buffer = include_str!("../../../fixtures/demo.docx.md");
|
||||
|
||||
{
|
||||
let loader = DocxLoader::new(Cursor::new(docx_buffer)).unwrap();
|
||||
|
||||
let documents = loader.load().unwrap();
|
||||
|
||||
assert_eq!(documents.len(), 1);
|
||||
assert_eq!(documents[0].page_content, parsed_buffer);
|
||||
}
|
||||
|
||||
{
|
||||
let loader = DocxLoader::new(Cursor::new(docx_buffer)).unwrap();
|
||||
let documents = loader.load_and_split(TokenSplitter::default()).unwrap();
|
||||
|
||||
for (idx, doc) in documents.into_iter().enumerate() {
|
||||
assert_eq!(
|
||||
doc.page_content,
|
||||
String::from_utf8_lossy(
|
||||
&read(get_fixtures_path().join(format!("demo.docx.{}.md", idx))).unwrap()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
42
packages/common/native/src/doc_loader/loader/error.rs
Normal file
42
packages/common/native/src/doc_loader/loader/error.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use std::{io, str::Utf8Error, string::FromUtf8Error};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum LoaderError {
|
||||
#[error("{0}")]
|
||||
TextSplitterError(#[from] TextSplitterError),
|
||||
|
||||
#[error(transparent)]
|
||||
IOError(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Utf8Error(#[from] Utf8Error),
|
||||
|
||||
#[error(transparent)]
|
||||
FromUtf8Error(#[from] FromUtf8Error),
|
||||
|
||||
#[cfg(feature = "pdf-extract")]
|
||||
#[error(transparent)]
|
||||
PdfExtractError(#[from] pdf_extract::Error),
|
||||
|
||||
#[cfg(feature = "pdf-extract")]
|
||||
#[error(transparent)]
|
||||
PdfExtractOutputError(#[from] pdf_extract::OutputError),
|
||||
|
||||
#[error(transparent)]
|
||||
ReadabilityError(#[from] readability::error::Error),
|
||||
|
||||
#[error("Unsupported source language")]
|
||||
UnsupportedLanguage,
|
||||
|
||||
#[error("Error: {0}")]
|
||||
OtherError(String),
|
||||
}
|
||||
|
||||
pub type LoaderResult<T> = Result<T, LoaderError>;
|
||||
87
packages/common/native/src/doc_loader/loader/html.rs
Normal file
87
packages/common/native/src/doc_loader/loader/html.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
use super::*;
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HtmlLoader<R> {
|
||||
html: R,
|
||||
url: Url,
|
||||
}
|
||||
|
||||
impl HtmlLoader<Cursor<Vec<u8>>> {
|
||||
pub fn from_string<S: Into<String>>(input: S, url: Url) -> Self {
|
||||
let input = input.into();
|
||||
let reader = Cursor::new(input.into_bytes());
|
||||
Self::new(reader, url)
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> HtmlLoader<R> {
|
||||
pub fn new(html: R, url: Url) -> Self {
|
||||
Self { html, url }
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read + Send + Sync + 'static> Loader for HtmlLoader<R> {
|
||||
fn load(mut self) -> Result<Vec<Document>, LoaderError> {
|
||||
let cleaned_html = readability::extractor::extract(&mut self.html, &self.url)?;
|
||||
let doc =
|
||||
Document::new(format!("{}\n{}", cleaned_html.title, cleaned_html.text)).with_metadata(
|
||||
HashMap::from([("source".to_string(), Value::from(self.url.as_str()))]),
|
||||
);
|
||||
|
||||
Ok(vec![doc])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_html_loader() {
|
||||
let input = "<p>Hello world!</p>";
|
||||
|
||||
let html_loader = HtmlLoader::new(
|
||||
input.as_bytes(),
|
||||
Url::parse("https://example.com/").unwrap(),
|
||||
);
|
||||
|
||||
let documents = html_loader.load().unwrap();
|
||||
|
||||
let expected = "\nHello world!";
|
||||
|
||||
assert_eq!(documents.len(), 1);
|
||||
assert_eq!(
|
||||
documents[0].metadata.get("source").unwrap(),
|
||||
&Value::from("https://example.com/")
|
||||
);
|
||||
assert_eq!(documents[0].page_content, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_html_load_from_path() {
|
||||
let buffer = include_bytes!("../../../fixtures/sample.html");
|
||||
let html_loader = HtmlLoader::new(
|
||||
Cursor::new(buffer),
|
||||
Url::parse("https://example.com/").unwrap(),
|
||||
);
|
||||
|
||||
let documents = html_loader.load().unwrap();
|
||||
|
||||
let expected = "Example Domain\n\n This domain is for use in illustrative examples in \
|
||||
documents. You may\n use this domain in literature without prior \
|
||||
coordination or asking for\n permission.\n More information...";
|
||||
|
||||
assert_eq!(documents.len(), 1);
|
||||
assert_eq!(
|
||||
documents[0].metadata.get("source").unwrap(),
|
||||
&Value::from("https://example.com/")
|
||||
);
|
||||
assert_eq!(documents[0].page_content, expected);
|
||||
}
|
||||
}
|
||||
33
packages/common/native/src/doc_loader/loader/mod.rs
Normal file
33
packages/common/native/src/doc_loader/loader/mod.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
mod docx;
|
||||
mod error;
|
||||
mod html;
|
||||
mod pdf;
|
||||
mod source;
|
||||
mod text;
|
||||
|
||||
use std::io::{Read, Seek};
|
||||
|
||||
use super::*;
|
||||
|
||||
// modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
pub trait Loader: Send + Sync {
|
||||
fn load(self) -> Result<Vec<Document>, LoaderError>;
|
||||
fn load_and_split<TS: TextSplitter + 'static>(
|
||||
self,
|
||||
splitter: TS,
|
||||
) -> Result<Vec<Document>, LoaderError>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let docs = self.load()?;
|
||||
Ok(splitter.split_documents(&docs)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub use docx::DocxLoader;
|
||||
pub use error::{LoaderError, LoaderResult};
|
||||
pub use html::HtmlLoader;
|
||||
pub use pdf::PdfExtractLoader;
|
||||
pub use source::{get_language_by_filename, LanguageParserOptions, SourceCodeLoader};
|
||||
pub use text::TextLoader;
|
||||
pub use url::Url;
|
||||
70
packages/common/native/src/doc_loader/loader/pdf.rs
Normal file
70
packages/common/native/src/doc_loader/loader/pdf.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use pdf_extract::{output_doc, output_doc_encrypted, PlainTextOutput};
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PdfExtractLoader {
|
||||
document: pdf_extract::Document,
|
||||
}
|
||||
|
||||
impl PdfExtractLoader {
|
||||
pub fn new<R: Read>(reader: R) -> Result<Self, LoaderError> {
|
||||
let document = pdf_extract::Document::load_from(reader)
|
||||
.map_err(|e| LoaderError::OtherError(e.to_string()))?;
|
||||
Ok(Self { document })
|
||||
}
|
||||
}
|
||||
|
||||
impl PdfExtractLoader {
|
||||
fn extract_text(&self) -> Result<String, LoaderError> {
|
||||
let mut doc = self.document.clone();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut output = PlainTextOutput::new(&mut buffer as &mut dyn std::io::Write);
|
||||
if doc.is_encrypted() {
|
||||
output_doc_encrypted(&mut doc, &mut output, "")?;
|
||||
} else {
|
||||
output_doc(&doc, &mut output)?;
|
||||
}
|
||||
Ok(String::from_utf8(buffer)?)
|
||||
}
|
||||
|
||||
fn extract_text_to_doc(&self) -> Result<Document, LoaderError> {
|
||||
let text = self.extract_text()?;
|
||||
Ok(Document::new(text))
|
||||
}
|
||||
}
|
||||
|
||||
impl Loader for PdfExtractLoader {
|
||||
fn load(self) -> Result<Vec<Document>, LoaderError> {
|
||||
let doc = self.extract_text_to_doc()?;
|
||||
Ok(vec![doc])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{fs::read, io::Cursor, path::PathBuf};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_pdf() {
|
||||
let fixtures = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("fixtures");
|
||||
let buffer = read(fixtures.join("sample.pdf")).unwrap();
|
||||
|
||||
let reader = Cursor::new(buffer);
|
||||
let loader = PdfExtractLoader::new(reader).expect("Failed to create PdfExtractLoader");
|
||||
|
||||
let docs = loader.load().unwrap();
|
||||
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(
|
||||
&docs[0].page_content[..100],
|
||||
"\n\nSample PDF\nThis is a simple PDF file. Fun fun fun.\n\nLorem ipsum dolor sit amet, \
|
||||
consectetuer a"
|
||||
);
|
||||
}
|
||||
}
|
||||
61
packages/common/native/src/doc_loader/loader/source/mod.rs
Normal file
61
packages/common/native/src/doc_loader/loader/source/mod.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
mod parser;
|
||||
|
||||
pub use parser::{get_language_by_filename, LanguageParser, LanguageParserOptions};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SourceCodeLoader {
|
||||
content: String,
|
||||
parser_option: LanguageParserOptions,
|
||||
}
|
||||
|
||||
impl SourceCodeLoader {
|
||||
pub fn from_string<S: Into<String>>(input: S) -> Self {
|
||||
Self {
|
||||
content: input.into(),
|
||||
parser_option: LanguageParserOptions::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SourceCodeLoader {
|
||||
pub fn with_parser_option(mut self, parser_option: LanguageParserOptions) -> Self {
|
||||
self.parser_option = parser_option;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Loader for SourceCodeLoader {
|
||||
fn load(self) -> Result<Vec<Document>, LoaderError> {
|
||||
let options = self.parser_option.clone();
|
||||
|
||||
let docs = LanguageParser::from_language(options.language)
|
||||
.with_parser_threshold(options.parser_threshold)
|
||||
.parse_code(&self.content)?;
|
||||
|
||||
Ok(docs)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use parser::Language;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_source_code_loader() {
|
||||
let content = include_str!("../../../../fixtures/sample.rs");
|
||||
let loader = SourceCodeLoader::from_string(content).with_parser_option(LanguageParserOptions {
|
||||
language: Language::Rust,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let documents_with_content = loader.load().unwrap();
|
||||
assert_eq!(documents_with_content.len(), 1);
|
||||
}
|
||||
}
|
||||
246
packages/common/native/src/doc_loader/loader/source/parser.rs
Normal file
246
packages/common/native/src/doc_loader/loader/source/parser.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use std::{collections::HashMap, fmt::Debug, string::ToString};
|
||||
|
||||
use strum_macros::Display;
|
||||
use tree_sitter::{Parser, Tree};
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
#[derive(Display, Debug, Clone)]
|
||||
pub enum Language {
|
||||
Rust,
|
||||
C,
|
||||
Cpp,
|
||||
Javascript,
|
||||
Typescript,
|
||||
Go,
|
||||
Python,
|
||||
}
|
||||
|
||||
pub enum LanguageContentTypes {
|
||||
SimplifiedCode,
|
||||
FunctionsImpls,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for LanguageContentTypes {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
LanguageContentTypes::SimplifiedCode => "simplified_code",
|
||||
LanguageContentTypes::FunctionsImpls => "functions_impls",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LanguageParserOptions {
|
||||
pub parser_threshold: u64,
|
||||
pub language: Language,
|
||||
}
|
||||
|
||||
impl Default for LanguageParserOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
parser_threshold: 1000,
|
||||
language: Language::Rust,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LanguageParser {
|
||||
parser: Parser,
|
||||
parser_options: LanguageParserOptions,
|
||||
}
|
||||
|
||||
impl Debug for LanguageParser {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"LanguageParser {{ language: {:?} }}",
|
||||
self.parser_options.language
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for LanguageParser {
|
||||
fn clone(&self) -> Self {
|
||||
LanguageParser {
|
||||
parser: get_language_parser(&self.parser_options.language),
|
||||
parser_options: self.parser_options.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_language_by_filename(name: &str) -> LoaderResult<Language> {
|
||||
let extension = name
|
||||
.split('.')
|
||||
.last()
|
||||
.ok_or(LoaderError::UnsupportedLanguage)?;
|
||||
let language = match extension.to_lowercase().as_str() {
|
||||
"rs" => Language::Rust,
|
||||
"c" => Language::C,
|
||||
"cpp" => Language::Cpp,
|
||||
"h" => Language::C,
|
||||
"hpp" => Language::Cpp,
|
||||
"js" => Language::Javascript,
|
||||
"ts" => Language::Typescript,
|
||||
"tsx" => Language::Typescript,
|
||||
"go" => Language::Go,
|
||||
"py" => Language::Python,
|
||||
_ => return Err(LoaderError::UnsupportedLanguage),
|
||||
};
|
||||
Ok(language)
|
||||
}
|
||||
|
||||
fn get_language_parser(language: &Language) -> Parser {
|
||||
let mut parser = Parser::new();
|
||||
let lang = match language {
|
||||
Language::Rust => tree_sitter_rust::LANGUAGE,
|
||||
Language::C => tree_sitter_c::LANGUAGE,
|
||||
Language::Cpp => tree_sitter_cpp::LANGUAGE,
|
||||
Language::Javascript => tree_sitter_javascript::LANGUAGE,
|
||||
Language::Typescript => tree_sitter_typescript::LANGUAGE_TSX,
|
||||
Language::Go => tree_sitter_go::LANGUAGE,
|
||||
Language::Python => tree_sitter_python::LANGUAGE,
|
||||
};
|
||||
parser
|
||||
.set_language(&lang.into())
|
||||
.unwrap_or_else(|_| panic!("Error loading grammar for language: {:?}", language));
|
||||
parser
|
||||
}
|
||||
|
||||
impl LanguageParser {
|
||||
pub fn from_language(language: Language) -> Self {
|
||||
Self {
|
||||
parser: get_language_parser(&language),
|
||||
parser_options: LanguageParserOptions {
|
||||
language,
|
||||
..LanguageParserOptions::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_parser_threshold(mut self, threshold: u64) -> Self {
|
||||
self.parser_options.parser_threshold = threshold;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LanguageParser {
|
||||
pub fn parse_code(&mut self, code: &String) -> LoaderResult<Vec<Document>> {
|
||||
let tree = self
|
||||
.parser
|
||||
.parse(code, None)
|
||||
.ok_or(LoaderError::UnsupportedLanguage)?;
|
||||
if self.parser_options.parser_threshold > tree.root_node().end_position().row as u64 {
|
||||
return Ok(vec![Document::new(code).with_metadata(HashMap::from([
|
||||
(
|
||||
"content_type".to_string(),
|
||||
serde_json::Value::from(LanguageContentTypes::SimplifiedCode.to_string()),
|
||||
),
|
||||
(
|
||||
"language".to_string(),
|
||||
serde_json::Value::from(self.parser_options.language.to_string()),
|
||||
),
|
||||
]))]);
|
||||
}
|
||||
self.extract_functions_classes(tree, code)
|
||||
}
|
||||
|
||||
pub fn extract_functions_classes(
|
||||
&self,
|
||||
tree: Tree,
|
||||
code: &String,
|
||||
) -> LoaderResult<Vec<Document>> {
|
||||
let mut chunks = Vec::new();
|
||||
|
||||
let count = tree.root_node().child_count();
|
||||
for i in 0..count {
|
||||
let Some(node) = tree.root_node().child(i) else {
|
||||
continue;
|
||||
};
|
||||
let source_code = node.utf8_text(code.as_bytes())?.to_string();
|
||||
let lang_meta = (
|
||||
"language".to_string(),
|
||||
serde_json::Value::from(self.parser_options.language.to_string()),
|
||||
);
|
||||
if node.kind() == "function_item" || node.kind() == "impl_item" {
|
||||
let doc = Document::new(source_code).with_metadata(HashMap::from([
|
||||
lang_meta.clone(),
|
||||
(
|
||||
"content_type".to_string(),
|
||||
serde_json::Value::from(LanguageContentTypes::FunctionsImpls.to_string()),
|
||||
),
|
||||
]));
|
||||
chunks.push(doc);
|
||||
} else {
|
||||
let doc = Document::new(source_code).with_metadata(HashMap::from([
|
||||
lang_meta.clone(),
|
||||
(
|
||||
"content_type".to_string(),
|
||||
serde_json::Value::from(LanguageContentTypes::SimplifiedCode.to_string()),
|
||||
),
|
||||
]));
|
||||
chunks.push(doc);
|
||||
}
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_code_parser() {
|
||||
let code = r#"
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
||||
|
||||
pub struct Person {
|
||||
name: String,
|
||||
age: i32,
|
||||
}
|
||||
|
||||
impl Person {
|
||||
pub fn new(name: String, age: i32) -> Self {
|
||||
Self { name, age }
|
||||
}
|
||||
|
||||
pub fn get_name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn get_age(&self) -> i32 {
|
||||
self.age
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
let mut parser = LanguageParser::from_language(Language::Rust);
|
||||
|
||||
let documents = parser.parse_code(&code.to_string()).unwrap();
|
||||
assert_eq!(documents.len(), 1);
|
||||
|
||||
// Set the parser threshold to 10 for testing
|
||||
let mut parser = parser.with_parser_threshold(10);
|
||||
|
||||
let documents = parser.parse_code(&code.to_string()).unwrap();
|
||||
assert_eq!(documents.len(), 3);
|
||||
assert_eq!(
|
||||
documents[0].page_content,
|
||||
"fn main() {\n println!(\"Hello, world!\");\n }"
|
||||
);
|
||||
assert_eq!(
|
||||
documents[1].metadata.get("content_type").unwrap(),
|
||||
LanguageContentTypes::SimplifiedCode.to_string().as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
24
packages/common/native/src/doc_loader/loader/text.rs
Normal file
24
packages/common/native/src/doc_loader/loader/text.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/document_loaders
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TextLoader {
|
||||
content: String,
|
||||
}
|
||||
|
||||
impl TextLoader {
|
||||
pub fn new<T: Into<String>>(input: T) -> Self {
|
||||
Self {
|
||||
content: input.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Loader for TextLoader {
|
||||
fn load(self) -> Result<Vec<Document>, LoaderError> {
|
||||
let doc = Document::new(self.content);
|
||||
Ok(vec![doc])
|
||||
}
|
||||
}
|
||||
12
packages/common/native/src/doc_loader/mod.rs
Normal file
12
packages/common/native/src/doc_loader/mod.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
mod document;
|
||||
mod loader;
|
||||
mod splitter;
|
||||
mod types;
|
||||
|
||||
pub use document::{Chunk, Doc};
|
||||
use loader::{
|
||||
get_language_by_filename, DocxLoader, HtmlLoader, LanguageParserOptions, Loader, LoaderError,
|
||||
PdfExtractLoader, SourceCodeLoader, TextLoader, Url,
|
||||
};
|
||||
use splitter::{MarkdownSplitter, TextSplitter, TextSplitterError, TokenSplitter};
|
||||
use types::Document;
|
||||
35
packages/common/native/src/doc_loader/splitter/error.rs
Normal file
35
packages/common/native/src/doc_loader/splitter/error.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
|
||||
*/
|
||||
use text_splitter::ChunkConfigError;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum TextSplitterError {
|
||||
#[error("Empty input text")]
|
||||
EmptyInputText,
|
||||
|
||||
#[error("Mismatch metadata and text")]
|
||||
MetadataTextMismatch,
|
||||
|
||||
#[error("Tokenizer not found")]
|
||||
TokenizerNotFound,
|
||||
|
||||
#[error("Tokenizer creation failed due to invalid tokenizer")]
|
||||
InvalidTokenizer,
|
||||
|
||||
#[error("Tokenizer creation failed due to invalid model")]
|
||||
InvalidModel,
|
||||
|
||||
#[error("Invalid chunk overlap and size")]
|
||||
InvalidSplitterOptions,
|
||||
|
||||
#[error("Error: {0}")]
|
||||
OtherError(String),
|
||||
}
|
||||
|
||||
impl From<ChunkConfigError> for TextSplitterError {
|
||||
fn from(_: ChunkConfigError) -> Self {
|
||||
Self::InvalidSplitterOptions
|
||||
}
|
||||
}
|
||||
36
packages/common/native/src/doc_loader/splitter/markdown.rs
Normal file
36
packages/common/native/src/doc_loader/splitter/markdown.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use text_splitter::ChunkConfig;
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
pub struct MarkdownSplitter {
|
||||
splitter_options: SplitterOptions,
|
||||
}
|
||||
|
||||
impl Default for MarkdownSplitter {
|
||||
fn default() -> Self {
|
||||
MarkdownSplitter::new(SplitterOptions::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl MarkdownSplitter {
|
||||
pub fn new(options: SplitterOptions) -> MarkdownSplitter {
|
||||
MarkdownSplitter {
|
||||
splitter_options: options,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TextSplitter for MarkdownSplitter {
|
||||
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError> {
|
||||
let chunk_config = ChunkConfig::try_from(&self.splitter_options)?;
|
||||
Ok(
|
||||
text_splitter::MarkdownSplitter::new(chunk_config)
|
||||
.chunks(text)
|
||||
.map(|x| x.to_string())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
58
packages/common/native/src/doc_loader/splitter/mod.rs
Normal file
58
packages/common/native/src/doc_loader/splitter/mod.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
|
||||
*/
|
||||
mod error;
|
||||
mod markdown;
|
||||
mod options;
|
||||
mod token;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub use error::TextSplitterError;
|
||||
pub use markdown::MarkdownSplitter;
|
||||
use options::SplitterOptions;
|
||||
use serde_json::Value;
|
||||
pub use token::TokenSplitter;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub trait TextSplitter: Send + Sync {
|
||||
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError>;
|
||||
|
||||
fn split_documents(&self, documents: &[Document]) -> Result<Vec<Document>, TextSplitterError> {
|
||||
let mut texts: Vec<String> = Vec::new();
|
||||
let mut metadatas: Vec<HashMap<String, Value>> = Vec::new();
|
||||
documents.iter().for_each(|d| {
|
||||
texts.push(d.page_content.clone());
|
||||
metadatas.push(d.metadata.clone());
|
||||
});
|
||||
|
||||
self.create_documents(&texts, &metadatas)
|
||||
}
|
||||
|
||||
fn create_documents(
|
||||
&self,
|
||||
text: &[String],
|
||||
metadatas: &[HashMap<String, Value>],
|
||||
) -> Result<Vec<Document>, TextSplitterError> {
|
||||
let mut metadatas = metadatas.to_vec();
|
||||
if metadatas.is_empty() {
|
||||
metadatas = vec![HashMap::new(); text.len()];
|
||||
}
|
||||
|
||||
if text.len() != metadatas.len() {
|
||||
return Err(TextSplitterError::MetadataTextMismatch);
|
||||
}
|
||||
|
||||
let mut documents: Vec<Document> = Vec::new();
|
||||
for i in 0..text.len() {
|
||||
let chunks = self.split_text(&text[i])?;
|
||||
for chunk in chunks {
|
||||
let document = Document::new(chunk).with_metadata(metadatas[i].clone());
|
||||
documents.push(document);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(documents)
|
||||
}
|
||||
}
|
||||
96
packages/common/native/src/doc_loader/splitter/options.rs
Normal file
96
packages/common/native/src/doc_loader/splitter/options.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
|
||||
*/
|
||||
use text_splitter::ChunkConfig;
|
||||
use tiktoken_rs::{get_bpe_from_model, get_bpe_from_tokenizer, tokenizer::Tokenizer, CoreBPE};
|
||||
|
||||
use super::TextSplitterError;
|
||||
|
||||
// Options is a struct that contains options for a text splitter.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SplitterOptions {
|
||||
pub chunk_size: usize,
|
||||
pub chunk_overlap: usize,
|
||||
pub model_name: String,
|
||||
pub encoding_name: String,
|
||||
pub trim_chunks: bool,
|
||||
}
|
||||
|
||||
impl Default for SplitterOptions {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SplitterOptions {
|
||||
pub fn new() -> Self {
|
||||
SplitterOptions {
|
||||
chunk_size: 512,
|
||||
chunk_overlap: 0,
|
||||
model_name: String::from("gpt-3.5-turbo"),
|
||||
encoding_name: String::from("cl100k_base"),
|
||||
trim_chunks: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Builder pattern for Options struct
|
||||
impl SplitterOptions {
|
||||
pub fn with_chunk_size(mut self, chunk_size: usize) -> Self {
|
||||
self.chunk_size = chunk_size;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_chunk_overlap(mut self, chunk_overlap: usize) -> Self {
|
||||
self.chunk_overlap = chunk_overlap;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_model_name(mut self, model_name: &str) -> Self {
|
||||
self.model_name = String::from(model_name);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_encoding_name(mut self, encoding_name: &str) -> Self {
|
||||
self.encoding_name = String::from(encoding_name);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_trim_chunks(mut self, trim_chunks: bool) -> Self {
|
||||
self.trim_chunks = trim_chunks;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn get_tokenizer_from_str(s: &str) -> Option<Tokenizer> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"cl100k_base" => Some(Tokenizer::Cl100kBase),
|
||||
"p50k_base" => Some(Tokenizer::P50kBase),
|
||||
"r50k_base" => Some(Tokenizer::R50kBase),
|
||||
"p50k_edit" => Some(Tokenizer::P50kEdit),
|
||||
"gpt2" => Some(Tokenizer::Gpt2),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&SplitterOptions> for ChunkConfig<CoreBPE> {
|
||||
type Error = TextSplitterError;
|
||||
|
||||
fn try_from(options: &SplitterOptions) -> Result<Self, Self::Error> {
|
||||
let tk = if !options.encoding_name.is_empty() {
|
||||
let tokenizer = SplitterOptions::get_tokenizer_from_str(&options.encoding_name)
|
||||
.ok_or(TextSplitterError::TokenizerNotFound)?;
|
||||
|
||||
get_bpe_from_tokenizer(tokenizer).map_err(|_| TextSplitterError::InvalidTokenizer)?
|
||||
} else {
|
||||
get_bpe_from_model(&options.model_name).map_err(|_| TextSplitterError::InvalidModel)?
|
||||
};
|
||||
|
||||
Ok(
|
||||
ChunkConfig::new(options.chunk_size)
|
||||
.with_sizer(tk)
|
||||
.with_trim(options.trim_chunks)
|
||||
.with_overlap(options.chunk_overlap)?,
|
||||
)
|
||||
}
|
||||
}
|
||||
37
packages/common/native/src/doc_loader/splitter/token.rs
Normal file
37
packages/common/native/src/doc_loader/splitter/token.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use text_splitter::ChunkConfig;
|
||||
|
||||
/**
|
||||
* modified from https://github.com/Abraxas-365/langchain-rust/tree/v4.6.0/src/text_splitter
|
||||
*/
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TokenSplitter {
|
||||
splitter_options: SplitterOptions,
|
||||
}
|
||||
|
||||
impl Default for TokenSplitter {
|
||||
fn default() -> Self {
|
||||
TokenSplitter::new(SplitterOptions::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenSplitter {
|
||||
pub fn new(options: SplitterOptions) -> TokenSplitter {
|
||||
TokenSplitter {
|
||||
splitter_options: options,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TextSplitter for TokenSplitter {
|
||||
fn split_text(&self, text: &str) -> Result<Vec<String>, TextSplitterError> {
|
||||
let chunk_config = ChunkConfig::try_from(&self.splitter_options)?;
|
||||
Ok(
|
||||
text_splitter::TextSplitter::new(chunk_config)
|
||||
.chunks(text)
|
||||
.map(|x| x.to_string())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
37
packages/common/native/src/doc_loader/types.rs
Normal file
37
packages/common/native/src/doc_loader/types.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Document {
|
||||
pub page_content: String,
|
||||
pub metadata: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
impl Document {
|
||||
/// Constructs a new `Document` with provided `page_content`, an empty
|
||||
/// `metadata` map and a `score` of 0.
|
||||
pub fn new<S: Into<String>>(page_content: S) -> Self {
|
||||
Document {
|
||||
page_content: page_content.into(),
|
||||
metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the `metadata` Map of the `Document` to the provided HashMap.
|
||||
pub fn with_metadata(mut self, metadata: HashMap<String, Value>) -> Self {
|
||||
self.metadata = metadata;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Document {
|
||||
/// Provides a default `Document` with an empty `page_content`, an empty
|
||||
/// `metadata` map and a `score` of 0.
|
||||
fn default() -> Self {
|
||||
Document {
|
||||
page_content: "".to_string(),
|
||||
metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1 +1,3 @@
|
||||
#[cfg(feature = "doc-loader")]
|
||||
pub mod doc_loader;
|
||||
pub mod hashcash;
|
||||
|
||||
Reference in New Issue
Block a user