mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 13:03:42 +00:00
chore(ai): regenerate models
This commit is contained in:
parent
0a7537bf86
commit
cc8c51d9ae
1 changed files with 55 additions and 89 deletions
|
|
@ -3645,6 +3645,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-responses">,
|
||||
"gpt-5.2-codex": {
|
||||
id: "gpt-5.2-codex",
|
||||
name: "GPT-5.2 Codex",
|
||||
api: "openai-responses",
|
||||
provider: "openai",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 1.75,
|
||||
output: 14,
|
||||
cacheRead: 0.175,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 400000,
|
||||
maxTokens: 128000,
|
||||
} satisfies Model<"openai-responses">,
|
||||
"gpt-5.2-pro": {
|
||||
id: "gpt-5.2-pro",
|
||||
name: "GPT-5.2 Pro",
|
||||
|
|
@ -4535,23 +4552,6 @@ export const MODELS = {
|
|||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"anthropic/claude-3.5-haiku-20241022": {
|
||||
id: "anthropic/claude-3.5-haiku-20241022",
|
||||
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 0.7999999999999999,
|
||||
output: 4,
|
||||
cacheRead: 0.08,
|
||||
cacheWrite: 1,
|
||||
},
|
||||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"anthropic/claude-3.5-sonnet": {
|
||||
id: "anthropic/claude-3.5-sonnet",
|
||||
name: "Anthropic: Claude 3.5 Sonnet",
|
||||
|
|
@ -5453,23 +5453,6 @@ export const MODELS = {
|
|||
contextWindow: 16384,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.2-3b-instruct": {
|
||||
id: "meta-llama/llama-3.2-3b-instruct",
|
||||
name: "Meta: Llama 3.2 3B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.02,
|
||||
output: 0.02,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.3-70b-instruct": {
|
||||
id: "meta-llama/llama-3.3-70b-instruct",
|
||||
name: "Meta: Llama 3.3 70B Instruct",
|
||||
|
|
@ -5963,23 +5946,6 @@ export const MODELS = {
|
|||
contextWindow: 131072,
|
||||
maxTokens: 131072,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-small-3.1-24b-instruct:free": {
|
||||
id: "mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
name: "Mistral: Mistral Small 3.1 24B (free)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-small-3.2-24b-instruct": {
|
||||
id: "mistralai/mistral-small-3.2-24b-instruct",
|
||||
name: "Mistral: Mistral Small 3.2 24B",
|
||||
|
|
@ -6932,6 +6898,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openai/gpt-5.2-codex": {
|
||||
id: "openai/gpt-5.2-codex",
|
||||
name: "OpenAI: GPT-5.2-Codex",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 1.75,
|
||||
output: 14,
|
||||
cacheRead: 0.175,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 400000,
|
||||
maxTokens: 128000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openai/gpt-5.2-pro": {
|
||||
id: "openai/gpt-5.2-pro",
|
||||
name: "OpenAI: GPT-5.2 Pro",
|
||||
|
|
@ -7510,23 +7493,6 @@ export const MODELS = {
|
|||
contextWindow: 40960,
|
||||
maxTokens: 40960,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"qwen/qwen3-4b:free": {
|
||||
id: "qwen/qwen3-4b:free",
|
||||
name: "Qwen: Qwen3 4B (free)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 40960,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"qwen/qwen3-8b": {
|
||||
id: "qwen/qwen3-8b",
|
||||
name: "Qwen: Qwen3 8B",
|
||||
|
|
@ -7536,13 +7502,13 @@ export const MODELS = {
|
|||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.035,
|
||||
output: 0.13799999999999998,
|
||||
input: 0.049999999999999996,
|
||||
output: 0.25,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 20000,
|
||||
contextWindow: 32000,
|
||||
maxTokens: 8192,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"qwen/qwen3-coder": {
|
||||
id: "qwen/qwen3-coder",
|
||||
|
|
@ -7629,23 +7595,6 @@ export const MODELS = {
|
|||
contextWindow: 262144,
|
||||
maxTokens: 65536,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"qwen/qwen3-coder:free": {
|
||||
id: "qwen/qwen3-coder:free",
|
||||
name: "Qwen: Qwen3 Coder 480B A35B (free)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 262000,
|
||||
maxTokens: 262000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"qwen/qwen3-max": {
|
||||
id: "qwen/qwen3-max",
|
||||
name: "Qwen: Qwen3 Max",
|
||||
|
|
@ -9773,6 +9722,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"anthropic-messages">,
|
||||
"openai/gpt-5.2-codex": {
|
||||
id: "openai/gpt-5.2-codex",
|
||||
name: "GPT-5.2-Codex",
|
||||
api: "anthropic-messages",
|
||||
provider: "vercel-ai-gateway",
|
||||
baseUrl: "https://ai-gateway.vercel.sh",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 1.75,
|
||||
output: 14,
|
||||
cacheRead: 0.175,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 400000,
|
||||
maxTokens: 128000,
|
||||
} satisfies Model<"anthropic-messages">,
|
||||
"openai/gpt-5.2-pro": {
|
||||
id: "openai/gpt-5.2-pro",
|
||||
name: "GPT 5.2 ",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue