chore: cleanup outdated model & upgrade model (#7739)

This commit is contained in:
darkskygit
2024-08-05 10:13:33 +00:00
parent e6e9f7d4c7
commit 0acc1bd9e8
8 changed files with 16 additions and 44 deletions

View File

@@ -278,18 +278,6 @@ const workflows: Prompt[] = [
];
const actions: Prompt[] = [
{
name: 'debug:action:gpt4',
action: 'text',
model: 'gpt-4o',
messages: [],
},
{
name: 'debug:action:vision4',
action: 'text',
model: 'gpt-4o',
messages: [],
},
{
name: 'debug:action:dalle3',
action: 'image',
@@ -302,12 +290,6 @@ const actions: Prompt[] = [
model: 'lcm-sd15-i2i',
messages: [],
},
{
name: 'debug:action:fal-sdturbo',
action: 'image',
model: 'fast-turbo-diffusion',
messages: [],
},
{
name: 'debug:action:fal-upscaler',
action: 'Clearer',
@@ -332,14 +314,14 @@ const actions: Prompt[] = [
messages: [],
},
{
name: 'debug:action:fal-summary-caption',
name: 'Generate a caption',
action: 'Generate a caption',
model: 'llava-next',
model: 'gpt-4o-mini',
messages: [
{
role: 'user',
content:
'Please understand this image and generate a short caption. Limit it to 20 words. {{content}}',
'Please understand this image and generate a short caption that can summarize the content of the image. Limit it to up 20 words. {{content}}',
},
],
},
@@ -393,7 +375,7 @@ content: {{content}}`,
{
name: 'Explain this image',
action: 'Explain this image',
model: 'gpt-4-vision-preview',
model: 'gpt-4o',
messages: [
{
role: 'user',
@@ -692,7 +674,7 @@ content: {{content}}`,
{
name: 'Make it real',
action: 'Make it real',
model: 'gpt-4-vision-preview',
model: 'gpt-4o',
messages: [
{
role: 'user',
@@ -731,7 +713,7 @@ content: {{content}}`,
{
name: 'Make it real with text',
action: 'Make it real with text',
model: 'gpt-4-vision-preview',
model: 'gpt-4o',
messages: [
{
role: 'user',

View File

@@ -43,9 +43,6 @@ export class OpenAIProvider
// text to text
'gpt-4o',
'gpt-4o-mini',
'gpt-4-vision-preview',
'gpt-4-turbo-preview',
'gpt-3.5-turbo',
// embeddings
'text-embedding-3-large',
'text-embedding-3-small',
@@ -203,7 +200,7 @@ export class OpenAIProvider
// ====== text to text ======
async generateText(
messages: PromptMessage[],
model: string = 'gpt-3.5-turbo',
model: string = 'gpt-4o-mini',
options: CopilotChatOptions = {}
): Promise<string> {
this.checkParams({ messages, model, options });
@@ -232,7 +229,7 @@ export class OpenAIProvider
async *generateTextStream(
messages: PromptMessage[],
model: string = 'gpt-3.5-turbo',
model: string = 'gpt-4o-mini',
options: CopilotChatOptions = {}
): AsyncIterable<string> {
this.checkParams({ messages, model, options });

View File

@@ -8,9 +8,7 @@ import type { ChatPrompt } from './prompt';
export enum AvailableModels {
// text to text
Gpt4Omni = 'gpt-4o',
Gpt4VisionPreview = 'gpt-4-vision-preview',
Gpt4TurboPreview = 'gpt-4-turbo-preview',
Gpt35Turbo = 'gpt-3.5-turbo',
Gpt4OmniMini = 'gpt-4o-mini',
// embeddings
TextEmbedding3Large = 'text-embedding-3-large',
TextEmbedding3Small = 'text-embedding-3-small',
@@ -34,7 +32,8 @@ export function getTokenEncoder(model?: string | null): Tokenizer | null {
// dalle don't need to calc the token
return null;
} else {
return fromModelName('gpt-4-turbo-preview');
// c100k based model
return fromModelName('gpt-4');
}
}

View File

@@ -52,9 +52,7 @@ type CopilotMessageNotFoundDataType {
enum CopilotModels {
DallE3
Gpt4Omni
Gpt4TurboPreview
Gpt4VisionPreview
Gpt35Turbo
Gpt4OmniMini
TextEmbedding3Large
TextEmbedding3Small
TextEmbeddingAda002

View File

@@ -91,7 +91,7 @@ export class MockCopilotTestProvider
override async *generateTextStream(
messages: PromptMessage[],
model: string = 'gpt-3.5-turbo',
model: string = 'gpt-4o-mini',
options: CopilotChatOptions = {}
): AsyncIterable<string> {
this.checkParams({ messages, model, options });