mirror of
https://github.com/toeverything/AFFiNE.git
synced 2026-02-04 00:28:33 +00:00
feat(server): adapt gpt5 (#13478)
<!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit - New Features - Added GPT-5 family and made GPT-5/-mini the new defaults for Copilot scenarios and prompts. - Bug Fixes - Improved streaming chunk formats and reasoning/text semantics, consistent attachment mediaType handling, and more reliable reranking via log-prob handling. - Refactor - Unified maxOutputTokens usage; removed per-call step caps and migrated several tools to a unified inputSchema shape. - Chores - Upgraded AI SDK dependencies and bumped an internal dependency version. - Tests - Updated mocks and tests to reference GPT-5 variants and new stream formats. <!-- end of auto-generated comment: release notes by coderabbit.ai -->
This commit is contained in:
@@ -669,7 +669,7 @@
|
||||
},
|
||||
"scenarios": {
|
||||
"type": "object",
|
||||
"description": "Use custom models in scenarios and override default settings.\n@default {\"override_enabled\":false,\"scenarios\":{\"audio_transcribing\":\"gemini-2.5-flash\",\"chat\":\"claude-sonnet-4@20250514\",\"embedding\":\"gemini-embedding-001\",\"image\":\"gpt-image-1\",\"rerank\":\"gpt-4.1\",\"coding\":\"claude-sonnet-4@20250514\",\"complex_text_generation\":\"gpt-4o-2024-08-06\",\"quick_decision_making\":\"gpt-4.1-mini\",\"quick_text_generation\":\"gemini-2.5-flash\",\"polish_and_summarize\":\"gemini-2.5-flash\"}}",
|
||||
"description": "Use custom models in scenarios and override default settings.\n@default {\"override_enabled\":false,\"scenarios\":{\"audio_transcribing\":\"gemini-2.5-flash\",\"chat\":\"claude-sonnet-4@20250514\",\"embedding\":\"gemini-embedding-001\",\"image\":\"gpt-image-1\",\"rerank\":\"gpt-4.1\",\"coding\":\"claude-sonnet-4@20250514\",\"complex_text_generation\":\"gpt-4o-2024-08-06\",\"quick_decision_making\":\"gpt-5-mini\",\"quick_text_generation\":\"gemini-2.5-flash\",\"polish_and_summarize\":\"gemini-2.5-flash\"}}",
|
||||
"default": {
|
||||
"override_enabled": false,
|
||||
"scenarios": {
|
||||
@@ -680,7 +680,7 @@
|
||||
"rerank": "gpt-4.1",
|
||||
"coding": "claude-sonnet-4@20250514",
|
||||
"complex_text_generation": "gpt-4o-2024-08-06",
|
||||
"quick_decision_making": "gpt-4.1-mini",
|
||||
"quick_decision_making": "gpt-5-mini",
|
||||
"quick_text_generation": "gemini-2.5-flash",
|
||||
"polish_and_summarize": "gemini-2.5-flash"
|
||||
}
|
||||
|
||||
24
Cargo.lock
generated
24
Cargo.lock
generated
@@ -93,7 +93,7 @@ dependencies = [
|
||||
"symphonia",
|
||||
"thiserror 2.0.12",
|
||||
"uuid",
|
||||
"windows 0.61.1",
|
||||
"windows 0.61.3",
|
||||
"windows-core 0.61.2",
|
||||
]
|
||||
|
||||
@@ -1691,7 +1691,7 @@ dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"rustversion",
|
||||
"windows 0.61.1",
|
||||
"windows 0.61.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2284,7 +2284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4732,9 +4732,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tree-sitter"
|
||||
version = "0.25.5"
|
||||
version = "0.25.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac5fff5c47490dfdf473b5228039bfacad9d765d9b6939d26bf7cc064c1c7822"
|
||||
checksum = "6d7b8994f367f16e6fa14b5aebbcb350de5d7cbea82dc5b00ae997dd71680dd2"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"regex",
|
||||
@@ -4842,9 +4842,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tree-sitter-scala"
|
||||
version = "0.23.4"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efde5e68b4736e9eac17bfa296c6f104a26bffab363b365eb898c40a63c15d2f"
|
||||
checksum = "7516aeb3d1f40ede8e3045b163e86993b3434514dd06c34c0b75e782d9a0b251"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"tree-sitter-language",
|
||||
@@ -5334,7 +5334,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5365,9 +5365,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.61.1"
|
||||
version = "0.61.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419"
|
||||
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
|
||||
dependencies = [
|
||||
"windows-collections",
|
||||
"windows-core 0.61.2",
|
||||
@@ -5477,9 +5477,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.1"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-numerics"
|
||||
|
||||
@@ -93,7 +93,7 @@ tree-sitter-javascript = { version = "0.23" }
|
||||
tree-sitter-kotlin-ng = { version = "1.1" }
|
||||
tree-sitter-python = { version = "0.23" }
|
||||
tree-sitter-rust = { version = "0.24" }
|
||||
tree-sitter-scala = { version = "0.23" }
|
||||
tree-sitter-scala = { version = "0.24" }
|
||||
tree-sitter-typescript = { version = "0.23" }
|
||||
uniffi = "0.29"
|
||||
url = { version = "2.5" }
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use tiktoken_rs::{get_bpe_from_tokenizer, tokenizer::Tokenizer as TiktokenTokenizer};
|
||||
|
||||
#[napi]
|
||||
pub struct Tokenizer {
|
||||
inner: tiktoken_rs::CoreBPE,
|
||||
@@ -7,6 +9,10 @@ pub struct Tokenizer {
|
||||
|
||||
#[napi]
|
||||
pub fn from_model_name(model_name: String) -> Option<Tokenizer> {
|
||||
if model_name.starts_with("gpt-5") {
|
||||
let bpe = get_bpe_from_tokenizer(TiktokenTokenizer::O200kBase).ok()?;
|
||||
return Some(Tokenizer { inner: bpe });
|
||||
}
|
||||
let bpe = tiktoken_rs::get_bpe_from_model(&model_name).ok()?;
|
||||
Some(Tokenizer { inner: bpe })
|
||||
}
|
||||
@@ -31,7 +37,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer() {
|
||||
let tokenizer = from_model_name("gpt-4.1".to_string()).unwrap();
|
||||
let tokenizer = from_model_name("gpt-5".to_string()).unwrap();
|
||||
let content = "Hello, world!";
|
||||
let count = tokenizer.count(content.to_string(), None);
|
||||
assert!(count > 0);
|
||||
|
||||
@@ -28,12 +28,12 @@
|
||||
"dependencies": {
|
||||
"@affine/reader": "workspace:*",
|
||||
"@affine/server-native": "workspace:*",
|
||||
"@ai-sdk/anthropic": "^1.2.12",
|
||||
"@ai-sdk/google": "^1.2.18",
|
||||
"@ai-sdk/google-vertex": "^2.2.23",
|
||||
"@ai-sdk/openai": "^1.3.22",
|
||||
"@ai-sdk/openai-compatible": "^0.2.14",
|
||||
"@ai-sdk/perplexity": "^1.1.9",
|
||||
"@ai-sdk/anthropic": "^2.0.1",
|
||||
"@ai-sdk/google": "^2.0.4",
|
||||
"@ai-sdk/google-vertex": "^3.0.5",
|
||||
"@ai-sdk/openai": "^2.0.10",
|
||||
"@ai-sdk/openai-compatible": "^1.0.5",
|
||||
"@ai-sdk/perplexity": "^2.0.1",
|
||||
"@apollo/server": "^4.11.3",
|
||||
"@aws-sdk/client-s3": "^3.779.0",
|
||||
"@aws-sdk/s3-request-presigner": "^3.779.0",
|
||||
@@ -75,7 +75,7 @@
|
||||
"@prisma/instrumentation": "^6.7.0",
|
||||
"@react-email/components": "0.0.38",
|
||||
"@socket.io/redis-adapter": "^8.3.0",
|
||||
"ai": "^4.3.4",
|
||||
"ai": "^5.0.10",
|
||||
"bullmq": "^5.40.2",
|
||||
"cookie-parser": "^1.4.7",
|
||||
"cross-env": "^7.0.3",
|
||||
|
||||
@@ -118,11 +118,11 @@ test.serial.before(async t => {
|
||||
enabled: true,
|
||||
scenarios: {
|
||||
image: 'flux-1/schnell',
|
||||
rerank: 'gpt-4.1-mini',
|
||||
complex_text_generation: 'gpt-4.1-mini',
|
||||
coding: 'gpt-4.1-mini',
|
||||
quick_decision_making: 'gpt-4.1-mini',
|
||||
quick_text_generation: 'gpt-4.1-mini',
|
||||
rerank: 'gpt-5-mini',
|
||||
complex_text_generation: 'gpt-5-mini',
|
||||
coding: 'gpt-5-mini',
|
||||
quick_decision_making: 'gpt-5-mini',
|
||||
quick_text_generation: 'gpt-5-mini',
|
||||
polish_and_summarize: 'gemini-2.5-flash',
|
||||
},
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@ import { ProjectRoot } from '@affine-tools/utils/path';
|
||||
import { PrismaClient } from '@prisma/client';
|
||||
import type { TestFn } from 'ava';
|
||||
import ava from 'ava';
|
||||
import { nanoid } from 'nanoid';
|
||||
import Sinon from 'sinon';
|
||||
|
||||
import { EventBus, JobQueue } from '../base';
|
||||
@@ -1340,16 +1341,16 @@ test('TextStreamParser should format different types of chunks correctly', t =>
|
||||
textDelta: {
|
||||
chunk: {
|
||||
type: 'text-delta' as const,
|
||||
textDelta: 'Hello world',
|
||||
} as any,
|
||||
text: 'Hello world',
|
||||
},
|
||||
expected: 'Hello world',
|
||||
description: 'should format text-delta correctly',
|
||||
},
|
||||
reasoning: {
|
||||
chunk: {
|
||||
type: 'reasoning' as const,
|
||||
textDelta: 'I need to think about this',
|
||||
} as any,
|
||||
type: 'reasoning-delta' as const,
|
||||
text: 'I need to think about this',
|
||||
},
|
||||
expected: '\n> [!]\n> I need to think about this',
|
||||
description: 'should format reasoning as callout',
|
||||
},
|
||||
@@ -1358,8 +1359,8 @@ test('TextStreamParser should format different types of chunks correctly', t =>
|
||||
type: 'tool-call' as const,
|
||||
toolName: 'web_search_exa' as const,
|
||||
toolCallId: 'test-id-1',
|
||||
args: { query: 'test query', mode: 'AUTO' as const },
|
||||
} as any,
|
||||
input: { query: 'test query', mode: 'AUTO' as const },
|
||||
},
|
||||
expected: '\n> [!]\n> \n> Searching the web "test query"\n> ',
|
||||
description: 'should format web search tool call correctly',
|
||||
},
|
||||
@@ -1368,8 +1369,8 @@ test('TextStreamParser should format different types of chunks correctly', t =>
|
||||
type: 'tool-call' as const,
|
||||
toolName: 'web_crawl_exa' as const,
|
||||
toolCallId: 'test-id-2',
|
||||
args: { url: 'https://example.com' },
|
||||
} as any,
|
||||
input: { url: 'https://example.com' },
|
||||
},
|
||||
expected: '\n> [!]\n> \n> Crawling the web "https://example.com"\n> ',
|
||||
description: 'should format web crawl tool call correctly',
|
||||
},
|
||||
@@ -1378,8 +1379,8 @@ test('TextStreamParser should format different types of chunks correctly', t =>
|
||||
type: 'tool-result' as const,
|
||||
toolName: 'web_search_exa' as const,
|
||||
toolCallId: 'test-id-1',
|
||||
args: { query: 'test query', mode: 'AUTO' as const },
|
||||
result: [
|
||||
input: { query: 'test query', mode: 'AUTO' as const },
|
||||
output: [
|
||||
{
|
||||
title: 'Test Title',
|
||||
url: 'https://test.com',
|
||||
@@ -1406,7 +1407,7 @@ test('TextStreamParser should format different types of chunks correctly', t =>
|
||||
chunk: {
|
||||
type: 'error' as const,
|
||||
error: { type: 'testError', message: 'Test error message' },
|
||||
} as any,
|
||||
},
|
||||
errorMessage: 'Test error message',
|
||||
description: 'should throw error for error chunks',
|
||||
},
|
||||
@@ -1436,78 +1437,85 @@ test('TextStreamParser should process a sequence of message chunks', t => {
|
||||
chunks: [
|
||||
// Reasoning chunks
|
||||
{
|
||||
type: 'reasoning' as const,
|
||||
textDelta: 'The user is asking about',
|
||||
} as any,
|
||||
id: nanoid(),
|
||||
type: 'reasoning-delta' as const,
|
||||
text: 'The user is asking about',
|
||||
},
|
||||
{
|
||||
type: 'reasoning' as const,
|
||||
textDelta: ' recent advances in quantum computing',
|
||||
} as any,
|
||||
id: nanoid(),
|
||||
type: 'reasoning-delta' as const,
|
||||
text: ' recent advances in quantum computing',
|
||||
},
|
||||
{
|
||||
type: 'reasoning' as const,
|
||||
textDelta: ' and how it might impact',
|
||||
} as any,
|
||||
id: nanoid(),
|
||||
type: 'reasoning-delta' as const,
|
||||
text: ' and how it might impact',
|
||||
},
|
||||
{
|
||||
type: 'reasoning' as const,
|
||||
textDelta: ' cryptography and data security.',
|
||||
} as any,
|
||||
id: nanoid(),
|
||||
type: 'reasoning-delta' as const,
|
||||
text: ' cryptography and data security.',
|
||||
},
|
||||
{
|
||||
type: 'reasoning' as const,
|
||||
textDelta:
|
||||
' I should provide information on quantum supremacy achievements',
|
||||
} as any,
|
||||
id: nanoid(),
|
||||
type: 'reasoning-delta' as const,
|
||||
text: ' I should provide information on quantum supremacy achievements',
|
||||
},
|
||||
|
||||
// Text delta
|
||||
{
|
||||
id: nanoid(),
|
||||
type: 'text-delta' as const,
|
||||
textDelta:
|
||||
'Let me search for the latest breakthroughs in quantum computing and their ',
|
||||
} as any,
|
||||
text: 'Let me search for the latest breakthroughs in quantum computing and their ',
|
||||
},
|
||||
|
||||
// Tool call
|
||||
{
|
||||
type: 'tool-call' as const,
|
||||
toolCallId: 'toolu_01ABCxyz123456789',
|
||||
toolName: 'web_search_exa' as const,
|
||||
args: {
|
||||
input: {
|
||||
query: 'latest quantum computing breakthroughs cryptography impact',
|
||||
},
|
||||
} as any,
|
||||
},
|
||||
|
||||
// Tool result
|
||||
{
|
||||
type: 'tool-result' as const,
|
||||
toolCallId: 'toolu_01ABCxyz123456789',
|
||||
toolName: 'web_search_exa' as const,
|
||||
args: {
|
||||
input: {
|
||||
query: 'latest quantum computing breakthroughs cryptography impact',
|
||||
},
|
||||
result: [
|
||||
output: [
|
||||
{
|
||||
title: 'IBM Unveils 1000-Qubit Quantum Processor',
|
||||
url: 'https://example.com/tech/quantum-computing-milestone',
|
||||
},
|
||||
],
|
||||
} as any,
|
||||
},
|
||||
|
||||
// More text deltas
|
||||
{
|
||||
id: nanoid(),
|
||||
type: 'text-delta' as const,
|
||||
textDelta: 'implications for security.',
|
||||
} as any,
|
||||
text: 'implications for security.',
|
||||
},
|
||||
{
|
||||
id: nanoid(),
|
||||
type: 'text-delta' as const,
|
||||
textDelta: '\n\nQuantum computing has made ',
|
||||
} as any,
|
||||
text: '\n\nQuantum computing has made ',
|
||||
},
|
||||
{
|
||||
id: nanoid(),
|
||||
type: 'text-delta' as const,
|
||||
textDelta: 'remarkable progress in the past year. ',
|
||||
} as any,
|
||||
text: 'remarkable progress in the past year. ',
|
||||
},
|
||||
{
|
||||
id: nanoid(),
|
||||
type: 'text-delta' as const,
|
||||
textDelta:
|
||||
'The development of more stable qubits has accelerated research significantly.',
|
||||
} as any,
|
||||
text: 'The development of more stable qubits has accelerated research significantly.',
|
||||
},
|
||||
],
|
||||
expected:
|
||||
'\n> [!]\n> The user is asking about recent advances in quantum computing and how it might impact cryptography and data security. I should provide information on quantum supremacy achievements\n\nLet me search for the latest breakthroughs in quantum computing and their \n> [!]\n> \n> Searching the web "latest quantum computing breakthroughs cryptography impact"\n> \n> \n> \n> [IBM Unveils 1000-Qubit Quantum Processor](https://example.com/tech/quantum-computing-milestone)\n> \n> \n> \n\nimplications for security.\n\nQuantum computing has made remarkable progress in the past year. The development of more stable qubits has accelerated research significantly.',
|
||||
|
||||
@@ -57,15 +57,6 @@ export class MockCopilotProvider extends OpenAIProvider {
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-4.1',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [ModelOutputType.Text, ModelOutputType.Object],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-4.1-2025-04-14',
|
||||
capabilities: [
|
||||
@@ -76,7 +67,25 @@ export class MockCopilotProvider extends OpenAIProvider {
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-4.1-mini',
|
||||
id: 'gpt-5',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [ModelOutputType.Text, ModelOutputType.Object],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5-2025-08-07',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [ModelOutputType.Text, ModelOutputType.Object],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5-mini',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
|
||||
@@ -48,7 +48,7 @@ let docId = 'doc1';
|
||||
|
||||
test.beforeEach(async t => {
|
||||
await t.context.module.initTestingDB();
|
||||
await t.context.copilotSession.createPrompt('prompt-name', 'gpt-4.1');
|
||||
await t.context.copilotSession.createPrompt('prompt-name', 'gpt-5-mini');
|
||||
user = await t.context.user.create({
|
||||
email: 'test@affine.pro',
|
||||
});
|
||||
|
||||
@@ -58,9 +58,9 @@ const createTestPrompts = async (
|
||||
copilotSession: CopilotSessionModel,
|
||||
db: PrismaClient
|
||||
) => {
|
||||
await copilotSession.createPrompt(TEST_PROMPTS.NORMAL, 'gpt-4.1');
|
||||
await copilotSession.createPrompt(TEST_PROMPTS.NORMAL, 'gpt-5-mini');
|
||||
await db.aiPrompt.create({
|
||||
data: { name: TEST_PROMPTS.ACTION, model: 'gpt-4.1', action: 'edit' },
|
||||
data: { name: TEST_PROMPTS.ACTION, model: 'gpt-5-mini', action: 'edit' },
|
||||
});
|
||||
};
|
||||
|
||||
@@ -116,7 +116,7 @@ const addMessagesToSession = async (
|
||||
await copilotSession.updateMessages({
|
||||
sessionId,
|
||||
userId: user.id,
|
||||
prompt: { model: 'gpt-4.1' },
|
||||
prompt: { model: 'gpt-5-mini' },
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
@@ -807,7 +807,7 @@ test('should handle fork and session attachment operations', async t => {
|
||||
pinned: forkConfig.pinned,
|
||||
title: null,
|
||||
parentSessionId,
|
||||
prompt: { name: TEST_PROMPTS.NORMAL, action: null, model: 'gpt-4.1' },
|
||||
prompt: { name: TEST_PROMPTS.NORMAL, action: null, model: 'gpt-5-mini' },
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
|
||||
@@ -57,7 +57,7 @@ defineModuleConfig('copilot', {
|
||||
rerank: 'gpt-4.1',
|
||||
coding: 'claude-sonnet-4@20250514',
|
||||
complex_text_generation: 'gpt-4o-2024-08-06',
|
||||
quick_decision_making: 'gpt-4.1-mini',
|
||||
quick_decision_making: 'gpt-5-mini',
|
||||
quick_text_generation: 'gemini-2.5-flash',
|
||||
polish_and_summarize: 'gemini-2.5-flash',
|
||||
},
|
||||
|
||||
@@ -107,7 +107,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:presentation:step1',
|
||||
action: 'workflow:presentation:step1',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
config: { temperature: 0.7 },
|
||||
messages: [
|
||||
{
|
||||
@@ -170,7 +170,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:brainstorm:step1',
|
||||
action: 'workflow:brainstorm:step1',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
config: { temperature: 0.7 },
|
||||
messages: [
|
||||
{
|
||||
@@ -221,7 +221,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:image-sketch:step2',
|
||||
action: 'workflow:image-sketch:step2',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -262,7 +262,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:image-clay:step2',
|
||||
action: 'workflow:image-clay:step2',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -303,7 +303,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:image-anime:step2',
|
||||
action: 'workflow:image-anime:step2',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -344,7 +344,7 @@ const workflows: Prompt[] = [
|
||||
{
|
||||
name: 'workflow:image-pixel:step2',
|
||||
action: 'workflow:image-pixel:step2',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -432,7 +432,7 @@ Convert a multi-speaker audio recording into a structured JSON format by transcr
|
||||
{
|
||||
name: 'Generate a caption',
|
||||
action: 'Generate a caption',
|
||||
model: 'gpt-4.1-mini',
|
||||
model: 'gpt-5-mini',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
@@ -1931,6 +1931,7 @@ const CHAT_PROMPT: Omit<Prompt, 'name'> = {
|
||||
model: 'claude-sonnet-4@20250514',
|
||||
optionalModels: [
|
||||
'gpt-4.1',
|
||||
'gpt-5',
|
||||
'o3',
|
||||
'o4-mini',
|
||||
'gemini-2.5-flash',
|
||||
|
||||
@@ -3,7 +3,7 @@ import {
|
||||
type AnthropicProviderOptions,
|
||||
} from '@ai-sdk/anthropic';
|
||||
import { type GoogleVertexAnthropicProvider } from '@ai-sdk/google-vertex/anthropic';
|
||||
import { AISDKError, generateText, streamText } from 'ai';
|
||||
import { AISDKError, generateText, stepCountIs, streamText } from 'ai';
|
||||
|
||||
import {
|
||||
CopilotProviderSideError,
|
||||
@@ -75,8 +75,7 @@ export abstract class AnthropicProvider<T> extends CopilotProvider<T> {
|
||||
anthropic: this.getAnthropicOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
experimental_continueSteps: true,
|
||||
stopWhen: stepCountIs(this.MAX_STEPS),
|
||||
});
|
||||
|
||||
if (!text) throw new Error('Failed to generate text');
|
||||
@@ -169,8 +168,7 @@ export abstract class AnthropicProvider<T> extends CopilotProvider<T> {
|
||||
anthropic: this.getAnthropicOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
experimental_continueSteps: true,
|
||||
stopWhen: stepCountIs(this.MAX_STEPS),
|
||||
});
|
||||
return fullStream;
|
||||
}
|
||||
|
||||
@@ -38,8 +38,6 @@ import {
|
||||
export const DEFAULT_DIMENSIONS = 256;
|
||||
|
||||
export abstract class GeminiProvider<T> extends CopilotProvider<T> {
|
||||
private readonly MAX_STEPS = 20;
|
||||
|
||||
protected abstract instance:
|
||||
| GoogleGenerativeAIProvider
|
||||
| GoogleVertexProvider;
|
||||
@@ -87,8 +85,6 @@ export abstract class GeminiProvider<T> extends CopilotProvider<T> {
|
||||
google: this.getGeminiOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
experimental_continueSteps: true,
|
||||
});
|
||||
|
||||
if (!text) throw new Error('Failed to generate text');
|
||||
@@ -116,9 +112,7 @@ export abstract class GeminiProvider<T> extends CopilotProvider<T> {
|
||||
throw new CopilotPromptInvalid('Schema is required');
|
||||
}
|
||||
|
||||
const modelInstance = this.instance(model.id, {
|
||||
structuredOutputs: true,
|
||||
});
|
||||
const modelInstance = this.instance(model.id);
|
||||
const { object } = await generateObject({
|
||||
model: modelInstance,
|
||||
system,
|
||||
@@ -238,14 +232,21 @@ export abstract class GeminiProvider<T> extends CopilotProvider<T> {
|
||||
.counter('generate_embedding_calls')
|
||||
.add(1, { model: model.id });
|
||||
|
||||
const modelInstance = this.instance.textEmbeddingModel(model.id, {
|
||||
outputDimensionality: options.dimensions || DEFAULT_DIMENSIONS,
|
||||
taskType: 'RETRIEVAL_DOCUMENT',
|
||||
});
|
||||
const modelInstance = this.instance.textEmbeddingModel(model.id);
|
||||
|
||||
const embeddings = await Promise.allSettled(
|
||||
messages.map(m =>
|
||||
embedMany({ model: modelInstance, values: [m], maxRetries: 3 })
|
||||
embedMany({
|
||||
model: modelInstance,
|
||||
values: [m],
|
||||
maxRetries: 3,
|
||||
providerOptions: {
|
||||
google: {
|
||||
outputDimensionality: options.dimensions || DEFAULT_DIMENSIONS,
|
||||
taskType: 'RETRIEVAL_DOCUMENT',
|
||||
},
|
||||
},
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
@@ -275,8 +276,6 @@ export abstract class GeminiProvider<T> extends CopilotProvider<T> {
|
||||
google: this.getGeminiOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
experimental_continueSteps: true,
|
||||
});
|
||||
return fullStream;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
experimental_generateImage as generateImage,
|
||||
generateObject,
|
||||
generateText,
|
||||
stepCountIs,
|
||||
streamText,
|
||||
Tool,
|
||||
} from 'ai';
|
||||
@@ -65,6 +66,18 @@ const ImageResponseSchema = z.union([
|
||||
}),
|
||||
}),
|
||||
]);
|
||||
const LogProbsSchema = z.array(
|
||||
z.object({
|
||||
token: z.string(),
|
||||
logprob: z.number(),
|
||||
top_logprobs: z.array(
|
||||
z.object({
|
||||
token: z.string(),
|
||||
logprob: z.number(),
|
||||
})
|
||||
),
|
||||
})
|
||||
);
|
||||
|
||||
export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
readonly type = CopilotProviderType.OpenAI;
|
||||
@@ -162,6 +175,58 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [
|
||||
ModelOutputType.Text,
|
||||
ModelOutputType.Object,
|
||||
ModelOutputType.Structured,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5-2025-08-07',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [
|
||||
ModelOutputType.Text,
|
||||
ModelOutputType.Object,
|
||||
ModelOutputType.Structured,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5-mini',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [
|
||||
ModelOutputType.Text,
|
||||
ModelOutputType.Object,
|
||||
ModelOutputType.Structured,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'gpt-5-nano',
|
||||
capabilities: [
|
||||
{
|
||||
input: [ModelInputType.Text, ModelInputType.Image],
|
||||
output: [
|
||||
ModelOutputType.Text,
|
||||
ModelOutputType.Object,
|
||||
ModelOutputType.Structured,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'o1',
|
||||
capabilities: [
|
||||
@@ -299,7 +364,7 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
model: string
|
||||
): [string, Tool?] | undefined {
|
||||
if (toolName === 'webSearch' && !this.isReasoningModel(model)) {
|
||||
return ['web_search_preview', openai.tools.webSearchPreview()];
|
||||
return ['web_search_preview', openai.tools.webSearchPreview({})];
|
||||
} else if (toolName === 'docEdit') {
|
||||
return ['doc_edit', undefined];
|
||||
}
|
||||
@@ -330,12 +395,12 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
system,
|
||||
messages: msgs,
|
||||
temperature: options.temperature ?? 0,
|
||||
maxTokens: options.maxTokens ?? 4096,
|
||||
maxOutputTokens: options.maxTokens ?? 4096,
|
||||
providerOptions: {
|
||||
openai: this.getOpenAIOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
stopWhen: stepCountIs(this.MAX_STEPS),
|
||||
abortSignal: options.signal,
|
||||
});
|
||||
|
||||
@@ -451,7 +516,7 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
system,
|
||||
messages: msgs,
|
||||
temperature: options.temperature ?? 0,
|
||||
maxTokens: options.maxTokens ?? 4096,
|
||||
maxOutputTokens: options.maxTokens ?? 4096,
|
||||
maxRetries: options.maxRetries ?? 3,
|
||||
schema,
|
||||
providerOptions: {
|
||||
@@ -476,36 +541,37 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
await this.checkParams({ messages: [], cond: fullCond, options });
|
||||
const model = this.selectModel(fullCond);
|
||||
// get the log probability of "yes"/"no"
|
||||
const instance = this.#instance(model.id, { logprobs: 16 });
|
||||
const instance = this.#instance.chat(model.id);
|
||||
|
||||
const scores = await Promise.all(
|
||||
chunkMessages.map(async messages => {
|
||||
const [system, msgs] = await chatToGPTMessage(messages);
|
||||
|
||||
const { logprobs } = await generateText({
|
||||
const result = await generateText({
|
||||
model: instance,
|
||||
system,
|
||||
messages: msgs,
|
||||
temperature: 0,
|
||||
maxTokens: 16,
|
||||
maxOutputTokens: 16,
|
||||
providerOptions: {
|
||||
openai: {
|
||||
...this.getOpenAIOptions(options, model.id),
|
||||
logprobs: 16,
|
||||
},
|
||||
},
|
||||
abortSignal: options.signal,
|
||||
});
|
||||
|
||||
const topMap: Record<string, number> = (
|
||||
logprobs?.[0]?.topLogprobs ?? []
|
||||
).reduce<Record<string, number>>(
|
||||
const topMap: Record<string, number> = LogProbsSchema.parse(
|
||||
result.providerMetadata?.openai?.logprobs
|
||||
)[0].top_logprobs.reduce<Record<string, number>>(
|
||||
(acc, { token, logprob }) => ({ ...acc, [token]: logprob }),
|
||||
{}
|
||||
);
|
||||
|
||||
const findLogProb = (token: string): number => {
|
||||
// OpenAI often includes a leading space, so try matching '.yes', '_yes', ' yes' and 'yes'
|
||||
return [`.${token}`, `_${token}`, ` ${token}`, token]
|
||||
return [...'_:. "-\t,(=_“'.split('').map(c => c + token), token]
|
||||
.flatMap(v => [v, v.toLowerCase(), v.toUpperCase()])
|
||||
.reduce<number>(
|
||||
(best, key) =>
|
||||
@@ -544,12 +610,12 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
frequencyPenalty: options.frequencyPenalty ?? 0,
|
||||
presencePenalty: options.presencePenalty ?? 0,
|
||||
temperature: options.temperature ?? 0,
|
||||
maxTokens: options.maxTokens ?? 4096,
|
||||
maxOutputTokens: options.maxTokens ?? 4096,
|
||||
providerOptions: {
|
||||
openai: this.getOpenAIOptions(options, model.id),
|
||||
},
|
||||
tools: await this.getTools(options, model.id),
|
||||
maxSteps: this.MAX_STEPS,
|
||||
stopWhen: stepCountIs(this.MAX_STEPS),
|
||||
abortSignal: options.signal,
|
||||
});
|
||||
return fullStream;
|
||||
@@ -676,14 +742,16 @@ export class OpenAIProvider extends CopilotProvider<OpenAIConfig> {
|
||||
.counter('generate_embedding_calls')
|
||||
.add(1, { model: model.id });
|
||||
|
||||
const modelInstance = this.#instance.embedding(model.id, {
|
||||
dimensions: options.dimensions || DEFAULT_DIMENSIONS,
|
||||
user: options.user,
|
||||
});
|
||||
const modelInstance = this.#instance.embedding(model.id);
|
||||
|
||||
const { embeddings } = await embedMany({
|
||||
model: modelInstance,
|
||||
values: messages,
|
||||
providerOptions: {
|
||||
openai: {
|
||||
dimensions: options.dimensions || DEFAULT_DIMENSIONS,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
return embeddings.filter(v => v && Array.isArray(v));
|
||||
|
||||
@@ -125,12 +125,12 @@ export class PerplexityProvider extends CopilotProvider<PerplexityConfig> {
|
||||
system,
|
||||
messages: msgs,
|
||||
temperature: options.temperature ?? 0,
|
||||
maxTokens: options.maxTokens ?? 4096,
|
||||
maxOutputTokens: options.maxTokens ?? 4096,
|
||||
abortSignal: options.signal,
|
||||
});
|
||||
|
||||
const parser = new CitationParser();
|
||||
for (const source of sources) {
|
||||
for (const source of sources.filter(s => s.sourceType === 'url')) {
|
||||
parser.push(source.url);
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ export class PerplexityProvider extends CopilotProvider<PerplexityConfig> {
|
||||
system,
|
||||
messages: msgs,
|
||||
temperature: options.temperature ?? 0,
|
||||
maxTokens: options.maxTokens ?? 4096,
|
||||
maxOutputTokens: options.maxTokens ?? 4096,
|
||||
abortSignal: options.signal,
|
||||
});
|
||||
|
||||
@@ -173,19 +173,18 @@ export class PerplexityProvider extends CopilotProvider<PerplexityConfig> {
|
||||
for await (const chunk of stream.fullStream) {
|
||||
switch (chunk.type) {
|
||||
case 'source': {
|
||||
parser.push(chunk.source.url);
|
||||
if (chunk.sourceType === 'url') {
|
||||
parser.push(chunk.url);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'text-delta': {
|
||||
const text = chunk.textDelta.replaceAll(
|
||||
/<\/?think>\n?/g,
|
||||
'\n---\n'
|
||||
);
|
||||
const text = chunk.text.replaceAll(/<\/?think>\n?/g, '\n---\n');
|
||||
const result = parser.parse(text);
|
||||
yield result;
|
||||
break;
|
||||
}
|
||||
case 'step-finish': {
|
||||
case 'finish-step': {
|
||||
const result = parser.end();
|
||||
yield result;
|
||||
break;
|
||||
|
||||
@@ -94,24 +94,24 @@ export async function chatToGPTMessage(
|
||||
|
||||
if (withAttachment) {
|
||||
for (let attachment of attachments) {
|
||||
let mimeType: string;
|
||||
let mediaType: string;
|
||||
if (typeof attachment === 'string') {
|
||||
mimeType =
|
||||
mediaType =
|
||||
typeof mimetype === 'string'
|
||||
? mimetype
|
||||
: await inferMimeType(attachment);
|
||||
} else {
|
||||
({ attachment, mimeType } = attachment);
|
||||
({ attachment, mimeType: mediaType } = attachment);
|
||||
}
|
||||
if (SIMPLE_IMAGE_URL_REGEX.test(attachment)) {
|
||||
const data =
|
||||
attachment.startsWith('data:') || useBase64Attachment
|
||||
? await fetch(attachment).then(r => r.arrayBuffer())
|
||||
: new URL(attachment);
|
||||
if (mimeType.startsWith('image/')) {
|
||||
contents.push({ type: 'image', image: data, mimeType });
|
||||
if (mediaType.startsWith('image/')) {
|
||||
contents.push({ type: 'image', image: data, mediaType });
|
||||
} else {
|
||||
contents.push({ type: 'file' as const, data, mimeType });
|
||||
contents.push({ type: 'file' as const, data, mediaType });
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -417,12 +417,12 @@ export class TextStreamParser {
|
||||
if (!this.prefix) {
|
||||
this.resetPrefix();
|
||||
}
|
||||
result = chunk.textDelta;
|
||||
result = chunk.text;
|
||||
result = this.addNewline(chunk.type, result);
|
||||
break;
|
||||
}
|
||||
case 'reasoning': {
|
||||
result = chunk.textDelta;
|
||||
case 'reasoning-delta': {
|
||||
result = chunk.text;
|
||||
result = this.addPrefix(result);
|
||||
result = this.markAsCallout(result);
|
||||
break;
|
||||
@@ -438,28 +438,28 @@ export class TextStreamParser {
|
||||
break;
|
||||
}
|
||||
case 'web_search_exa': {
|
||||
result += `\nSearching the web "${chunk.args.query}"\n`;
|
||||
result += `\nSearching the web "${chunk.input.query}"\n`;
|
||||
break;
|
||||
}
|
||||
case 'web_crawl_exa': {
|
||||
result += `\nCrawling the web "${chunk.args.url}"\n`;
|
||||
result += `\nCrawling the web "${chunk.input.url}"\n`;
|
||||
break;
|
||||
}
|
||||
case 'doc_keyword_search': {
|
||||
result += `\nSearching the keyword "${chunk.args.query}"\n`;
|
||||
result += `\nSearching the keyword "${chunk.input.query}"\n`;
|
||||
break;
|
||||
}
|
||||
case 'doc_read': {
|
||||
result += `\nReading the doc "${chunk.args.doc_id}"\n`;
|
||||
result += `\nReading the doc "${chunk.input.doc_id}"\n`;
|
||||
break;
|
||||
}
|
||||
case 'doc_compose': {
|
||||
result += `\nWriting document "${chunk.args.title}"\n`;
|
||||
result += `\nWriting document "${chunk.input.title}"\n`;
|
||||
break;
|
||||
}
|
||||
case 'doc_edit': {
|
||||
this.docEditFootnotes.push({
|
||||
intent: chunk.args.instructions,
|
||||
intent: chunk.input.instructions,
|
||||
result: '',
|
||||
});
|
||||
break;
|
||||
@@ -475,12 +475,12 @@ export class TextStreamParser {
|
||||
result = this.addPrefix(result);
|
||||
switch (chunk.toolName) {
|
||||
case 'doc_edit': {
|
||||
if (
|
||||
chunk.result &&
|
||||
typeof chunk.result === 'object' &&
|
||||
Array.isArray(chunk.result.result)
|
||||
) {
|
||||
result += chunk.result.result
|
||||
const array =
|
||||
chunk.output && typeof chunk.output === 'object'
|
||||
? chunk.output.result
|
||||
: undefined;
|
||||
if (Array.isArray(array)) {
|
||||
result += array
|
||||
.map(item => {
|
||||
return `\n${item.changedContent}\n`;
|
||||
})
|
||||
@@ -493,37 +493,37 @@ export class TextStreamParser {
|
||||
break;
|
||||
}
|
||||
case 'doc_semantic_search': {
|
||||
if (Array.isArray(chunk.result)) {
|
||||
result += `\nFound ${chunk.result.length} document${chunk.result.length !== 1 ? 's' : ''} related to “${chunk.args.query}”.\n`;
|
||||
} else if (typeof chunk.result === 'string') {
|
||||
result += `\n${chunk.result}\n`;
|
||||
const output = chunk.output;
|
||||
if (Array.isArray(output)) {
|
||||
result += `\nFound ${output.length} document${output.length !== 1 ? 's' : ''} related to “${chunk.input.query}”.\n`;
|
||||
} else if (typeof output === 'string') {
|
||||
result += `\n${output}\n`;
|
||||
} else {
|
||||
this.logger.warn(
|
||||
`Unexpected result type for doc_semantic_search: ${chunk.result?.message || 'Unknown error'}`
|
||||
`Unexpected result type for doc_semantic_search: ${output?.message || 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'doc_keyword_search': {
|
||||
if (Array.isArray(chunk.result)) {
|
||||
result += `\nFound ${chunk.result.length} document${chunk.result.length !== 1 ? 's' : ''} related to “${chunk.args.query}”.\n`;
|
||||
result += `\n${this.getKeywordSearchLinks(chunk.result)}\n`;
|
||||
const output = chunk.output;
|
||||
if (Array.isArray(output)) {
|
||||
result += `\nFound ${output.length} document${output.length !== 1 ? 's' : ''} related to “${chunk.input.query}”.\n`;
|
||||
result += `\n${this.getKeywordSearchLinks(output)}\n`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'doc_compose': {
|
||||
if (
|
||||
chunk.result &&
|
||||
typeof chunk.result === 'object' &&
|
||||
'title' in chunk.result
|
||||
) {
|
||||
result += `\nDocument "${chunk.result.title}" created successfully with ${chunk.result.wordCount} words.\n`;
|
||||
const output = chunk.output;
|
||||
if (output && typeof output === 'object' && 'title' in output) {
|
||||
result += `\nDocument "${output.title}" created successfully with ${output.wordCount} words.\n`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'web_search_exa': {
|
||||
if (Array.isArray(chunk.result)) {
|
||||
result += `\n${this.getWebSearchLinks(chunk.result)}\n`;
|
||||
const output = chunk.output;
|
||||
if (Array.isArray(output)) {
|
||||
result += `\n${this.getWebSearchLinks(output)}\n`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -598,11 +598,18 @@ export class TextStreamParser {
|
||||
export class StreamObjectParser {
|
||||
public parse(chunk: TextStreamPart<CustomAITools>) {
|
||||
switch (chunk.type) {
|
||||
case 'reasoning':
|
||||
case 'text-delta':
|
||||
case 'reasoning-delta': {
|
||||
return { type: 'reasoning' as const, textDelta: chunk.text };
|
||||
}
|
||||
case 'text-delta': {
|
||||
const { type, text: textDelta } = chunk;
|
||||
return { type, textDelta };
|
||||
}
|
||||
case 'tool-call':
|
||||
case 'tool-result': {
|
||||
return chunk;
|
||||
const { type, toolCallId, toolName, input: args } = chunk;
|
||||
const result = 'output' in chunk ? chunk.output : undefined;
|
||||
return { type, toolCallId, toolName, args, result } as StreamObject;
|
||||
}
|
||||
case 'error': {
|
||||
throw toError(chunk.error);
|
||||
|
||||
@@ -52,7 +52,7 @@ export const createBlobReadTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Return the content and basic metadata of a single attachment identified by blobId; more inclined to use search tools rather than this tool.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
blob_id: z.string().describe('The target blob in context to read'),
|
||||
chunk: z
|
||||
.number()
|
||||
|
||||
@@ -19,7 +19,7 @@ export const createCodeArtifactTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Generate a single-file HTML snippet (with inline <style> and <script>) that accomplishes the requested functionality. The final HTML should be runnable when saved as an .html file and opened in a browser. Do NOT reference external resources (CSS, JS, images) except through data URIs.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
/**
|
||||
* The <title> text that will appear in the browser tab.
|
||||
*/
|
||||
|
||||
@@ -16,7 +16,7 @@ export const createConversationSummaryTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Create a concise, AI-generated summary of the conversation so far—capturing key topics, decisions, and critical details. Use this tool whenever the context becomes lengthy to preserve essential information that might otherwise be lost to truncation in future turns.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
focus: z
|
||||
.string()
|
||||
.optional()
|
||||
|
||||
@@ -15,7 +15,7 @@ export const createDocComposeTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Write a new document with markdown content. This tool creates structured markdown content for documents including titles, sections, and formatting.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
title: z.string().describe('The title of the document'),
|
||||
userPrompt: z
|
||||
.string()
|
||||
|
||||
@@ -6,6 +6,25 @@ import { AccessController } from '../../../core/permission';
|
||||
import { type PromptService } from '../prompt';
|
||||
import type { CopilotChatOptions, CopilotProviderFactory } from '../providers';
|
||||
|
||||
const CodeEditSchema = z
|
||||
.array(
|
||||
z.object({
|
||||
op: z
|
||||
.string()
|
||||
.describe(
|
||||
'A short description of the change, such as "Bold intro name"'
|
||||
),
|
||||
updates: z
|
||||
.string()
|
||||
.describe(
|
||||
'Markdown block fragments that represent the change, including the block_id and type'
|
||||
),
|
||||
})
|
||||
)
|
||||
.describe(
|
||||
'An array of independent semantic changes to apply to the document.'
|
||||
);
|
||||
|
||||
export const buildContentGetter = (ac: AccessController, doc: DocReader) => {
|
||||
const getDocContent = async (options: CopilotChatOptions, docId?: string) => {
|
||||
if (!options || !docId || !options.user || !options.workspace) {
|
||||
@@ -129,7 +148,7 @@ Example response:
|
||||
You should specify the following arguments before the others: [doc_id], [origin_content]
|
||||
|
||||
`,
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
doc_id: z
|
||||
.string()
|
||||
.describe(
|
||||
@@ -150,33 +169,13 @@ You should specify the following arguments before the others: [doc_id], [origin_
|
||||
'A short, first-person description of the intended edit, clearly summarizing what I will change. For example: "I will translate the steps into English and delete the paragraph explaining the delay." This helps the downstream system understand the purpose of the changes.'
|
||||
),
|
||||
|
||||
code_edit: z.preprocess(
|
||||
val => {
|
||||
// BACKGROUND: LLM sometimes returns a JSON string instead of an array.
|
||||
if (typeof val === 'string') {
|
||||
return JSON.parse(val);
|
||||
}
|
||||
return val;
|
||||
},
|
||||
z
|
||||
.array(
|
||||
z.object({
|
||||
op: z
|
||||
.string()
|
||||
.describe(
|
||||
'A short description of the change, such as "Bold intro name"'
|
||||
),
|
||||
updates: z
|
||||
.string()
|
||||
.describe(
|
||||
'Markdown block fragments that represent the change, including the block_id and type'
|
||||
),
|
||||
})
|
||||
)
|
||||
.describe(
|
||||
'An array of independent semantic changes to apply to the document.'
|
||||
)
|
||||
),
|
||||
code_edit: z.preprocess(val => {
|
||||
// BACKGROUND: LLM sometimes returns a JSON string instead of an array.
|
||||
if (typeof val === 'string') {
|
||||
return JSON.parse(val);
|
||||
}
|
||||
return val;
|
||||
}, CodeEditSchema) as unknown as typeof CodeEditSchema,
|
||||
}),
|
||||
execute: async ({ doc_id, origin_content, code_edit }) => {
|
||||
try {
|
||||
|
||||
@@ -40,7 +40,7 @@ export const createDocKeywordSearchTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Fuzzy search all workspace documents for the exact keyword or phrase supplied and return passages ranked by textual match. Use this tool by default whenever a straightforward term-based or keyword-base lookup is sufficient.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
|
||||
@@ -75,7 +75,7 @@ export const createDocReadTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Return the complete text and basic metadata of a single document identified by docId; use this when the user needs the full content of a specific file rather than a search result.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
doc_id: z.string().describe('The target doc to read'),
|
||||
}),
|
||||
execute: async ({ doc_id }) => {
|
||||
|
||||
@@ -104,7 +104,7 @@ export const createDocSemanticSearchTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Retrieve conceptually related passages by performing vector-based semantic similarity search across embedded documents; use this tool only when exact keyword search fails or the user explicitly needs meaning-level matches (e.g., paraphrases, synonyms, broader concepts, recent documents).',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
|
||||
@@ -8,7 +8,7 @@ import { toolError } from './error';
|
||||
export const createExaCrawlTool = (config: Config) => {
|
||||
return tool({
|
||||
description: 'Crawl the web url for information',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
url: z
|
||||
.string()
|
||||
.describe('The URL to crawl (including http:// or https://)'),
|
||||
|
||||
@@ -8,7 +8,7 @@ import { toolError } from './error';
|
||||
export const createExaSearchTool = (config: Config) => {
|
||||
return tool({
|
||||
description: 'Search the web for information',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
query: z.string().describe('The query to search the web for.'),
|
||||
mode: z
|
||||
.enum(['MUST', 'AUTO'])
|
||||
|
||||
@@ -15,7 +15,7 @@ export const createSectionEditTool = (
|
||||
return tool({
|
||||
description:
|
||||
'Intelligently edit and modify a specific section of a document based on user instructions, with full document context awareness. This tool can refine, rewrite, translate, restructure, or enhance any part of markdown content while preserving formatting, maintaining contextual coherence, and ensuring consistency with the entire document. Perfect for targeted improvements that consider the broader document context.',
|
||||
parameters: z.object({
|
||||
inputSchema: z.object({
|
||||
section: z
|
||||
.string()
|
||||
.describe(
|
||||
|
||||
252
yarn.lock
252
yarn.lock
@@ -921,12 +921,12 @@ __metadata:
|
||||
"@affine/graphql": "workspace:*"
|
||||
"@affine/reader": "workspace:*"
|
||||
"@affine/server-native": "workspace:*"
|
||||
"@ai-sdk/anthropic": "npm:^1.2.12"
|
||||
"@ai-sdk/google": "npm:^1.2.18"
|
||||
"@ai-sdk/google-vertex": "npm:^2.2.23"
|
||||
"@ai-sdk/openai": "npm:^1.3.22"
|
||||
"@ai-sdk/openai-compatible": "npm:^0.2.14"
|
||||
"@ai-sdk/perplexity": "npm:^1.1.9"
|
||||
"@ai-sdk/anthropic": "npm:^2.0.1"
|
||||
"@ai-sdk/google": "npm:^2.0.4"
|
||||
"@ai-sdk/google-vertex": "npm:^3.0.5"
|
||||
"@ai-sdk/openai": "npm:^2.0.10"
|
||||
"@ai-sdk/openai-compatible": "npm:^1.0.5"
|
||||
"@ai-sdk/perplexity": "npm:^2.0.1"
|
||||
"@apollo/server": "npm:^4.11.3"
|
||||
"@aws-sdk/client-s3": "npm:^3.779.0"
|
||||
"@aws-sdk/s3-request-presigner": "npm:^3.779.0"
|
||||
@@ -987,7 +987,7 @@ __metadata:
|
||||
"@types/semver": "npm:^7.5.8"
|
||||
"@types/sinon": "npm:^17.0.3"
|
||||
"@types/supertest": "npm:^6.0.2"
|
||||
ai: "npm:^4.3.4"
|
||||
ai: "npm:^5.0.10"
|
||||
ava: "npm:^6.2.0"
|
||||
bullmq: "npm:^5.40.2"
|
||||
c8: "npm:^10.1.3"
|
||||
@@ -1091,131 +1091,113 @@ __metadata:
|
||||
languageName: unknown
|
||||
linkType: soft
|
||||
|
||||
"@ai-sdk/anthropic@npm:1.2.12, @ai-sdk/anthropic@npm:^1.2.12":
|
||||
version: 1.2.12
|
||||
resolution: "@ai-sdk/anthropic@npm:1.2.12"
|
||||
"@ai-sdk/anthropic@npm:2.0.1, @ai-sdk/anthropic@npm:^2.0.1":
|
||||
version: 2.0.1
|
||||
resolution: "@ai-sdk/anthropic@npm:2.0.1"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/ee09f00328c88954fba8d00795f6c379091f95b7717bb90292acf32f41b072b2c89b178c1c18dfc23fcec7532e04fcd13233b9a813ce4881e0a16cae20a878c8
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/aa6fc0be49775e061412c4644b11b33ccf07a983a1353a1960ed3a8dc4236586003f46080b2890ed345c9bdc00f55628719ad8b0246528d8a40e066638840b53
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google-vertex@npm:^2.2.23":
|
||||
version: 2.2.23
|
||||
resolution: "@ai-sdk/google-vertex@npm:2.2.23"
|
||||
"@ai-sdk/gateway@npm:1.0.4":
|
||||
version: 1.0.4
|
||||
resolution: "@ai-sdk/gateway@npm:1.0.4"
|
||||
dependencies:
|
||||
"@ai-sdk/anthropic": "npm:1.2.12"
|
||||
"@ai-sdk/google": "npm:1.2.18"
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/11b0322a619a922e74ae782ddff5577825f99b34f02889ffefc31652109a644110f6a4413caac047b8760635a880798e99d04a3bd042057013c772435dc4d172
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google-vertex@npm:^3.0.5":
|
||||
version: 3.0.5
|
||||
resolution: "@ai-sdk/google-vertex@npm:3.0.5"
|
||||
dependencies:
|
||||
"@ai-sdk/anthropic": "npm:2.0.1"
|
||||
"@ai-sdk/google": "npm:2.0.4"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
google-auth-library: "npm:^9.15.0"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/19b7120879662e3597e95b84d30a587ada24d5fbd4306e4551f88605ae4e7c9d3f52bc60b6ea3bfbfc487a58c0a0ad35d1e058f7408140ea84e2ff86e6a1d052
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/8f6cdba400e9443548e07940b6f753964d29981a43ab95d2d13e96883bf638c94e3a9a3aba2ce9a59ae4a771ab7ec6f2dac17c3610817f6b2819fbe7d2c296c5
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google@npm:1.2.18, @ai-sdk/google@npm:^1.2.18":
|
||||
version: 1.2.18
|
||||
resolution: "@ai-sdk/google@npm:1.2.18"
|
||||
"@ai-sdk/google@npm:2.0.4, @ai-sdk/google@npm:^2.0.4":
|
||||
version: 2.0.4
|
||||
resolution: "@ai-sdk/google@npm:2.0.4"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/e8ff1ea1cae8f6c1c17e5526e3e51a8e584bb60d8e407646594c9b07600e06ef43c85518d08aafd3856aa2d46a1ae88111d6c61532bdf8c917859e0baad23432
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/f8d778804cc7e6674aa4ff3931e2cecbc95fdd6484669dcb25398f6461cb033372de1c8b0667fa96604fbe59a8cf37b9d1461b66e6a995f82a6ecf3917d589f1
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/openai-compatible@npm:^0.2.14":
|
||||
version: 0.2.14
|
||||
resolution: "@ai-sdk/openai-compatible@npm:0.2.14"
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.5":
|
||||
version: 1.0.5
|
||||
resolution: "@ai-sdk/openai-compatible@npm:1.0.5"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/a2b9fbe6c9a0a9edbe6c5d91fbb06708088c881060cff7018ce0bb7ca52d8f63a20dd334389099d9ea256482f2c22f9f1ff6be0de836d3af98a27274578f0be6
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/52437a335a64c3c9993aedad4e85cbfa7876fe073b3dfc543a7478d6d4f63ec5eba0b1c67de317732a70c682a1cbb903c36b2e623e25c15baf7450d677592fff
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/openai@npm:^1.3.22":
|
||||
version: 1.3.22
|
||||
resolution: "@ai-sdk/openai@npm:1.3.22"
|
||||
"@ai-sdk/openai@npm:^2.0.10":
|
||||
version: 2.0.10
|
||||
resolution: "@ai-sdk/openai@npm:2.0.10"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/65d6bb89eb732f88b9995c0ff3dba5f80b2c996808b3ef9246352a7272f9d2f25576a9af29229f3d6c285c7085888258737224155b0e83b150ee2df819d2d06d
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/5e07f9ed0f9a5459c6c6c7cc89e4efd6656b0db065b03f2e6ccacac567d84aa11ddd301ee076f4e438ee8819a0eeead45acc2a6ade82877e8445c862af464aa2
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/perplexity@npm:^1.1.9":
|
||||
version: 1.1.9
|
||||
resolution: "@ai-sdk/perplexity@npm:1.1.9"
|
||||
"@ai-sdk/perplexity@npm:^2.0.1":
|
||||
version: 2.0.1
|
||||
resolution: "@ai-sdk/perplexity@npm:2.0.1"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10/186589b4290daaae4ddc1126c856ee84e0d1307d6a8e970fc218975db859175aff4faf93b7f7c0af0e933adfe754f1052bcf0ca3226698ef4865ee08d0f28182
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/7fe19ce52c8c7031d8f567d7c40c8b2c563838cd283baf4f2e278430d9dbddba0fa41184024d1818cb230143bdc1e5ec065d27dad240974bec16417d896482e2
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider-utils@npm:2.2.8":
|
||||
version: 2.2.8
|
||||
resolution: "@ai-sdk/provider-utils@npm:2.2.8"
|
||||
"@ai-sdk/provider-utils@npm:3.0.1":
|
||||
version: 3.0.1
|
||||
resolution: "@ai-sdk/provider-utils@npm:3.0.1"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
nanoid: "npm:^3.3.8"
|
||||
secure-json-parse: "npm:^2.7.0"
|
||||
peerDependencies:
|
||||
zod: ^3.23.8
|
||||
checksum: 10/3aa8fce97c78d1c78e0c35e82425d4634957dfd16d5d8228994eef5ada71b9d5a1b0b9c2edb81db8cca106d90607c1798970e2e5ec74730cb8058bd7d900144a
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider@npm:1.1.3":
|
||||
version: 1.1.3
|
||||
resolution: "@ai-sdk/provider@npm:1.1.3"
|
||||
dependencies:
|
||||
json-schema: "npm:^0.4.0"
|
||||
checksum: 10/b094ca2f08001d4d34a0dcdf9fbe1b6eeedeaf0cf1568ce148d67b04333fd959cca60f888a24522b6d74114eabbc469f762d290d08096adec992a3c6de23756a
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/react@npm:1.2.12":
|
||||
version: 1.2.12
|
||||
resolution: "@ai-sdk/react@npm:1.2.12"
|
||||
dependencies:
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/ui-utils": "npm:1.2.11"
|
||||
swr: "npm:^2.2.5"
|
||||
throttleit: "npm:2.1.0"
|
||||
peerDependencies:
|
||||
react: ^18 || ^19 || ^19.0.0-rc
|
||||
zod: ^3.23.8
|
||||
peerDependenciesMeta:
|
||||
zod:
|
||||
optional: true
|
||||
checksum: 10/1809d785f8c9df65620576aa04d08d1ca1d1e3905518d833837d5cc5c2d489e31ffecb78c301ac209e354dc4f8b13d48ccea0966d3bbc5b654aab1633c19b6ca
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/ui-utils@npm:1.2.11":
|
||||
version: 1.2.11
|
||||
resolution: "@ai-sdk/ui-utils@npm:1.2.11"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@standard-schema/spec": "npm:^1.0.0"
|
||||
eventsource-parser: "npm:^3.0.3"
|
||||
zod-to-json-schema: "npm:^3.24.1"
|
||||
peerDependencies:
|
||||
zod: ^3.23.8
|
||||
checksum: 10/2de445d6babd82a3588dbb396e88d85f4a1e2411fdade017a8f908cd21f99bb97e207a8c8f24ce4174c2cedb53a455b75814d1ee8e20a5074df783cfac57be6e
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/23f841ff876dcdd3a507acad82e50501784eaa5635364ecc63790c518748309ab5a6b9a18f605ae471778335258917e372de4ad8f45ee94ffa690b7d2ae7ea99
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider@npm:2.0.0":
|
||||
version: 2.0.0
|
||||
resolution: "@ai-sdk/provider@npm:2.0.0"
|
||||
dependencies:
|
||||
json-schema: "npm:^0.4.0"
|
||||
checksum: 10/e6d5460f0c52e64033ccc5d20787ab9ff5251646e6263daa76a006367fda8ad527dadc959110113c42796d293d4e669c3ae911062086574cd46f0707357dedb5
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
@@ -13937,6 +13919,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@standard-schema/spec@npm:^1.0.0":
|
||||
version: 1.0.0
|
||||
resolution: "@standard-schema/spec@npm:1.0.0"
|
||||
checksum: 10/aee780cc1431888ca4b9aba9b24ffc8f3073fc083acc105e3951481478a2f4dc957796931b2da9e2d8329584cf211e4542275f188296c1cdff3ed44fd93a8bc8
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@storybook/builder-vite@npm:9.0.0":
|
||||
version: 9.0.0
|
||||
resolution: "@storybook/builder-vite@npm:9.0.0"
|
||||
@@ -14867,13 +14856,6 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@types/diff-match-patch@npm:^1.0.36":
|
||||
version: 1.0.36
|
||||
resolution: "@types/diff-match-patch@npm:1.0.36"
|
||||
checksum: 10/7d7ce03422fcc3e79d0cda26e4748aeb176b75ca4b4e5f38459b112bf24660d628424bdb08d330faefa69039d19a5316e7a102a8ab68b8e294c8346790e55113
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@types/doctrine@npm:^0.0.9":
|
||||
version: 0.0.9
|
||||
resolution: "@types/doctrine@npm:0.0.9"
|
||||
@@ -16713,23 +16695,17 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ai@npm:^4.3.4":
|
||||
version: 4.3.16
|
||||
resolution: "ai@npm:4.3.16"
|
||||
"ai@npm:^5.0.10":
|
||||
version: 5.0.10
|
||||
resolution: "ai@npm:5.0.10"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.1.3"
|
||||
"@ai-sdk/provider-utils": "npm:2.2.8"
|
||||
"@ai-sdk/react": "npm:1.2.12"
|
||||
"@ai-sdk/ui-utils": "npm:1.2.11"
|
||||
"@ai-sdk/gateway": "npm:1.0.4"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.1"
|
||||
"@opentelemetry/api": "npm:1.9.0"
|
||||
jsondiffpatch: "npm:0.6.0"
|
||||
peerDependencies:
|
||||
react: ^18 || ^19 || ^19.0.0-rc
|
||||
zod: ^3.23.8
|
||||
peerDependenciesMeta:
|
||||
react:
|
||||
optional: true
|
||||
checksum: 10/c48e0b61239708f4fdce2a3f177e21d85dad1e6c28f100bcc06faf00a6bb6884b3683bf8f6354e45d6c207853ca9c22c33141b1aad5cc0d948ed5e50605aa115
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10/c424464f39cd9a875b7cbf1dac8046f9a8a164ac42f1cc25c0bb44597996656e9c2ab18bc518f8802ee3917624c666e26aa120ea8821282b7bd1cb8dc1eca518
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
@@ -20413,13 +20389,6 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"diff-match-patch@npm:^1.0.5":
|
||||
version: 1.0.5
|
||||
resolution: "diff-match-patch@npm:1.0.5"
|
||||
checksum: 10/fd1ab417eba9559bda752a4dfc9a8ac73fa2ca8b146d29d153964b437168e301c09d8a688fae0cd81d32dc6508a4918a94614213c85df760793f44e245173bb6
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"diff@npm:^4.0.1":
|
||||
version: 4.0.2
|
||||
resolution: "diff@npm:4.0.2"
|
||||
@@ -21699,7 +21668,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"eventsource-parser@npm:^3.0.0, eventsource-parser@npm:^3.0.1":
|
||||
"eventsource-parser@npm:^3.0.0, eventsource-parser@npm:^3.0.1, eventsource-parser@npm:^3.0.3":
|
||||
version: 3.0.3
|
||||
resolution: "eventsource-parser@npm:3.0.3"
|
||||
checksum: 10/b8f8e79333441ad0eb9299e3fa693ab506892ffc53f0cc1d23134090351cf2d71c8e405a2e879f6acfbd2e17f41d5a00dafba05ff25c82141fc07078ad992187
|
||||
@@ -25109,19 +25078,6 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jsondiffpatch@npm:0.6.0":
|
||||
version: 0.6.0
|
||||
resolution: "jsondiffpatch@npm:0.6.0"
|
||||
dependencies:
|
||||
"@types/diff-match-patch": "npm:^1.0.36"
|
||||
chalk: "npm:^5.3.0"
|
||||
diff-match-patch: "npm:^1.0.5"
|
||||
bin:
|
||||
jsondiffpatch: bin/jsondiffpatch.js
|
||||
checksum: 10/124b9797c266c693e69f8d23216e64d5ca4b21a4ec10e3a769a7b8cb19602ba62522f9a3d0c55299c1bfbe5ad955ca9ad2852439ca2c6b6316b8f91a5c218e94
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jsonfile@npm:^4.0.0":
|
||||
version: 4.0.0
|
||||
resolution: "jsonfile@npm:4.0.0"
|
||||
@@ -27939,7 +27895,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"nanoid@npm:^3.3.11, nanoid@npm:^3.3.6, nanoid@npm:^3.3.8":
|
||||
"nanoid@npm:^3.3.11, nanoid@npm:^3.3.6":
|
||||
version: 3.3.11
|
||||
resolution: "nanoid@npm:3.3.11"
|
||||
bin:
|
||||
@@ -31695,13 +31651,6 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"secure-json-parse@npm:^2.7.0":
|
||||
version: 2.7.0
|
||||
resolution: "secure-json-parse@npm:2.7.0"
|
||||
checksum: 10/974386587060b6fc5b1ac06481b2f9dbbb0d63c860cc73dc7533f27835fdb67b0ef08762dbfef25625c15bc0a0c366899e00076cb0d556af06b71e22f1dede4c
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"selderee@npm:^0.11.0":
|
||||
version: 0.11.0
|
||||
resolution: "selderee@npm:0.11.0"
|
||||
@@ -33479,13 +33428,6 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"throttleit@npm:2.1.0":
|
||||
version: 2.1.0
|
||||
resolution: "throttleit@npm:2.1.0"
|
||||
checksum: 10/a2003947aafc721c4a17e6f07db72dc88a64fa9bba0f9c659f7997d30f9590b3af22dadd6a41851e0e8497d539c33b2935c2c7919cf4255922509af6913c619b
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"through2@npm:^4.0.2":
|
||||
version: 4.0.2
|
||||
resolution: "through2@npm:4.0.2"
|
||||
|
||||
Reference in New Issue
Block a user