diff --git a/apps/core/src/bootstrap/register-plugins.ts b/apps/core/src/bootstrap/register-plugins.ts
index d40970dc88..0bacbe9f7a 100644
--- a/apps/core/src/bootstrap/register-plugins.ts
+++ b/apps/core/src/bootstrap/register-plugins.ts
@@ -76,7 +76,7 @@ await Promise.all(
globalThis.__pluginPackageJson__.push(packageJson);
logger.debug(`registering plugin ${pluginName}`);
logger.debug(`package.json: ${packageJson}`);
- if (!release) {
+ if (!release && process.env.NODE_ENV === 'production') {
return Promise.resolve();
}
const pluginCompartment = new Compartment(createGlobalThis(), {});
diff --git a/apps/core/src/components/page-detail-editor.tsx b/apps/core/src/components/page-detail-editor.tsx
index 3ceb3e52f9..0f1a3c2753 100644
--- a/apps/core/src/components/page-detail-editor.tsx
+++ b/apps/core/src/components/page-detail-editor.tsx
@@ -191,7 +191,12 @@ const LayoutPanel = memo(function LayoutPanel(
-
+
diff --git a/plugins/copilot/src/UI/header-item.tsx b/plugins/copilot/src/UI/header-item.tsx
index e312e6f390..4a50670ecc 100644
--- a/plugins/copilot/src/UI/header-item.tsx
+++ b/plugins/copilot/src/UI/header-item.tsx
@@ -17,7 +17,7 @@ export const HeaderItem = (): ReactElement => {
return {
direction: 'horizontal',
first: 'editor',
- second: '@affine/copilot',
+ second: '@affine/copilot-plugin',
splitPercentage: 70,
};
} else {
diff --git a/plugins/copilot/src/core/chat.ts b/plugins/copilot/src/core/chat.ts
index 9654ba5139..33b2bcc201 100644
--- a/plugins/copilot/src/core/chat.ts
+++ b/plugins/copilot/src/core/chat.ts
@@ -11,22 +11,29 @@ import {
import { IndexedDBChatMessageHistory } from './langchain/message-history';
import { chatPrompt, followupQuestionPrompt } from './prompts';
+import { followupQuestionParser } from './prompts/output-parser';
-declare global {
- interface WindowEventMap {
- 'llm-start': CustomEvent;
- 'llm-new-token': CustomEvent<{ token: string }>;
- }
-}
+type ChatAI = {
+ // Core chat AI
+ conversationChain: ConversationChain;
+ // Followup AI, used to generate followup questions
+ followupChain: LLMChain;
+ // Chat history, used to store messages
+ chatHistory: IndexedDBChatMessageHistory;
+};
+
+export type ChatAIConfig = {
+ events: {
+ llmStart: () => void;
+ llmNewToken: (token: string) => void;
+ };
+};
export async function createChatAI(
room: string,
- openAIApiKey: string
-): Promise<{
- conversationChain: ConversationChain;
- followupChain: LLMChain;
- chatHistory: IndexedDBChatMessageHistory;
-}> {
+ openAIApiKey: string,
+ config: ChatAIConfig
+): Promise {
if (!openAIApiKey) {
console.warn('OpenAI API key not set, chat will not work');
}
@@ -44,25 +51,11 @@ export async function createChatAI(
openAIApiKey: openAIApiKey,
callbacks: [
{
- async handleLLMStart(llm, prompts, runId, parentRunId, extraParams) {
- console.log(
- 'handleLLMStart',
- llm,
- prompts,
- runId,
- parentRunId,
- extraParams
- );
- window.dispatchEvent(new CustomEvent('llm-start'));
+ async handleLLMStart() {
+ config.events.llmStart();
},
- async handleLLMNewToken(token, runId, parentRunId) {
- console.log('handleLLMNewToken', token, runId, parentRunId);
- window.dispatchEvent(
- new CustomEvent('llm-new-token', { detail: { token } })
- );
- },
- async handleLLMEnd(output, runId, parentRunId) {
- console.log('handleLLMEnd', output, runId, parentRunId);
+ async handleLLMNewToken(token) {
+ config.events.llmNewToken(token);
},
},
],
@@ -77,6 +70,9 @@ export async function createChatAI(
const followupPromptTemplate = new PromptTemplate({
template: followupQuestionPrompt,
inputVariables: ['human_conversation', 'ai_conversation'],
+ partialVariables: {
+ format_instructions: followupQuestionParser.getFormatInstructions(),
+ },
});
const followupChain = new LLMChain({
@@ -101,5 +97,5 @@ export async function createChatAI(
conversationChain,
followupChain,
chatHistory,
- } as const;
+ };
}
diff --git a/plugins/copilot/src/core/hooks/index.ts b/plugins/copilot/src/core/hooks/index.ts
index a9cc42b8d4..ec4563e571 100644
--- a/plugins/copilot/src/core/hooks/index.ts
+++ b/plugins/copilot/src/core/hooks/index.ts
@@ -1,51 +1,56 @@
-import type { IndexedDBChatMessageHistory } from '@affine/copilot/core/langchain/message-history';
import { atom, useAtomValue } from 'jotai';
import { atomWithDefault, atomWithStorage } from 'jotai/utils';
import type { WritableAtom } from 'jotai/vanilla';
+import type { PrimitiveAtom } from 'jotai/vanilla';
import type { LLMChain } from 'langchain/chains';
import { type ConversationChain } from 'langchain/chains';
import { type BufferMemory } from 'langchain/memory';
import type { BaseMessage } from 'langchain/schema';
import { AIMessage } from 'langchain/schema';
import { HumanMessage } from 'langchain/schema';
-import { z } from 'zod';
+import type { ChatAIConfig } from '../chat';
import { createChatAI } from '../chat';
-
-const followupResponseSchema = z.array(z.string());
+import type { IndexedDBChatMessageHistory } from '../langchain/message-history';
+import { followupQuestionParser } from '../prompts/output-parser';
export const openAIApiKeyAtom = atomWithStorage(
'com.affine.copilot.openai.token',
null
);
-export const chatAtom = atom(async get => {
- const openAIApiKey = get(openAIApiKeyAtom);
- if (!openAIApiKey) {
- throw new Error('OpenAI API key not set, chat will not work');
- }
- return createChatAI('default-copilot', openAIApiKey);
-});
-
+const conversationBaseWeakMap = new WeakMap<
+ ConversationChain,
+ PrimitiveAtom
+>();
const conversationWeakMap = new WeakMap<
ConversationChain,
WritableAtom>
>();
-const getConversationAtom = (chat: ConversationChain) => {
- if (conversationWeakMap.has(chat)) {
- return conversationWeakMap.get(chat) as WritableAtom<
- BaseMessage[],
- [string],
- Promise
- >;
+export const chatAtom = atom(async get => {
+ const openAIApiKey = get(openAIApiKeyAtom);
+ if (!openAIApiKey) {
+ throw new Error('OpenAI API key not set, chat will not work');
}
- const conversationBaseAtom = atom([]);
- conversationBaseAtom.onMount = setAtom => {
- if (!chat) {
- throw new Error();
- }
- const memory = chat.memory as BufferMemory;
+ const events: ChatAIConfig['events'] = {
+ llmStart: () => {
+ throw new Error('llmStart not set');
+ },
+ llmNewToken: () => {
+ throw new Error('llmNewToken not set');
+ },
+ };
+ const chatAI = await createChatAI('default-copilot', openAIApiKey, {
+ events,
+ });
+ getOrCreateConversationAtom(chatAI.conversationChain);
+ const baseAtom = conversationBaseWeakMap.get(chatAI.conversationChain);
+ if (!baseAtom) {
+ throw new TypeError();
+ }
+ baseAtom.onMount = setAtom => {
+ const memory = chatAI.conversationChain.memory as BufferMemory;
memory.chatHistory
.getMessages()
.then(messages => {
@@ -54,23 +59,27 @@ const getConversationAtom = (chat: ConversationChain) => {
.catch(err => {
console.error(err);
});
- const llmStart = (): void => {
+ events.llmStart = () => {
setAtom(conversations => [...conversations, new AIMessage('')]);
};
- const llmNewToken = (event: CustomEvent<{ token: string }>): void => {
+ events.llmNewToken = token => {
setAtom(conversations => {
const last = conversations[conversations.length - 1] as AIMessage;
- last.content += event.detail.token;
+ last.content += token;
return [...conversations];
});
};
- window.addEventListener('llm-start', llmStart);
- window.addEventListener('llm-new-token', llmNewToken);
- return () => {
- window.removeEventListener('llm-start', llmStart);
- window.removeEventListener('llm-new-token', llmNewToken);
- };
};
+ return chatAI;
+});
+
+const getOrCreateConversationAtom = (chat: ConversationChain) => {
+ if (conversationWeakMap.has(chat)) {
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
+ return conversationWeakMap.get(chat)!;
+ }
+ const conversationBaseAtom = atom([]);
+ conversationBaseWeakMap.set(chat, conversationBaseAtom);
const conversationAtom = atom>(
get => get(conversationBaseAtom),
@@ -105,7 +114,9 @@ const getConversationAtom = (chat: ConversationChain) => {
const followingUpWeakMap = new WeakMap<
LLMChain,
{
- questionsAtom: ReturnType>>;
+ questionsAtom: ReturnType<
+ typeof atomWithDefault | string[]>
+ >;
generateChatAtom: WritableAtom;
}
>();
@@ -115,12 +126,10 @@ const getFollowingUpAtoms = (
chatHistory: IndexedDBChatMessageHistory
) => {
if (followingUpWeakMap.has(followupLLMChain)) {
- return followingUpWeakMap.get(followupLLMChain) as {
- questionsAtom: ReturnType>>;
- generateChatAtom: WritableAtom;
- };
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
+ return followingUpWeakMap.get(followupLLMChain)!;
}
- const baseAtom = atomWithDefault>(async () => {
+ const baseAtom = atomWithDefault | string[]>(async () => {
return chatHistory?.getFollowingUp() ?? [];
});
const setAtom = atom(null, async (get, set) => {
@@ -137,10 +146,9 @@ const getFollowingUpAtoms = (
ai_conversation: aiMessage,
human_conversation: humanMessage,
});
- const followingUp = JSON.parse(response.text);
- followupResponseSchema.parse(followingUp);
- set(baseAtom, followingUp);
- chatHistory.saveFollowingUp(followingUp).catch(() => {
+ const followingUp = await followupQuestionParser.parse(response.text);
+ set(baseAtom, followingUp.followupQuestions);
+ chatHistory.saveFollowingUp(followingUp.followupQuestions).catch(() => {
console.error('failed to save followup');
});
});
@@ -155,11 +163,11 @@ const getFollowingUpAtoms = (
};
export function useChatAtoms(): {
- conversationAtom: ReturnType;
+ conversationAtom: ReturnType;
followingUpAtoms: ReturnType;
} {
const chat = useAtomValue(chatAtom);
- const conversationAtom = getConversationAtom(chat.conversationChain);
+ const conversationAtom = getOrCreateConversationAtom(chat.conversationChain);
const followingUpAtoms = getFollowingUpAtoms(
chat.followupChain,
chat.chatHistory
diff --git a/plugins/copilot/src/core/prompts/index.ts b/plugins/copilot/src/core/prompts/index.ts
index 0e745735c7..a17583d26c 100644
--- a/plugins/copilot/src/core/prompts/index.ts
+++ b/plugins/copilot/src/core/prompts/index.ts
@@ -18,12 +18,10 @@ You can only give one reply for each conversation turn.
`;
export const followupQuestionPrompt = `Rules you must follow:
-- You only respond in JSON format
-- Read the following conversation between AI and Human and generate at most 3 follow-up messages or questions the Human can ask
-- Your response MUST be a valid JSON array of strings like this: ["some question", "another question"]
-- Each message in your response should be concise, no more than 15 words
-- You MUST reply in the same written language as the conversation
-- Don't output anything other text
+Read the following conversation between AI and Human and generate at most 3 follow-up messages or questions the Human can ask
+Each message in your response should be concise, no more than 15 words
+You MUST reply in the same written language as the conversation
+{format_instructions}
The conversation is inside triple quotes:
\`\`\`
Human: {human_conversation}
diff --git a/plugins/copilot/src/core/prompts/output-parser.ts b/plugins/copilot/src/core/prompts/output-parser.ts
new file mode 100644
index 0000000000..0df42f35fa
--- /dev/null
+++ b/plugins/copilot/src/core/prompts/output-parser.ts
@@ -0,0 +1,8 @@
+import { StructuredOutputParser } from 'langchain/output_parsers';
+import { z } from 'zod';
+
+export const followupQuestionParser = StructuredOutputParser.fromZodSchema(
+ z.object({
+ followupQuestions: z.array(z.string()),
+ })
+);