diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index 05396fdb..ce5c2cf1 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -53,7 +53,8 @@ const COPILOT_STATIC_HEADERS = { "Copilot-Integration-Id": "vscode-chat", } as const; -const AI_GATEWAY_BASE_URL = "https://ai-gateway.vercel.sh/v1"; +const AI_GATEWAY_MODELS_URL = "https://ai-gateway.vercel.sh/v1"; +const AI_GATEWAY_BASE_URL = "https://ai-gateway.vercel.sh"; async function fetchOpenRouterModels(): Promise[]> { try { @@ -116,7 +117,7 @@ async function fetchOpenRouterModels(): Promise[]> { async function fetchAiGatewayModels(): Promise[]> { try { console.log("Fetching models from Vercel AI Gateway API..."); - const response = await fetch(`${AI_GATEWAY_BASE_URL}/models`); + const response = await fetch(`${AI_GATEWAY_MODELS_URL}/models`); const data = await response.json(); const models: Model[] = []; @@ -147,7 +148,7 @@ async function fetchAiGatewayModels(): Promise[]> { models.push({ id: model.id, name: model.name || model.id, - api: "openai-completions", + api: "anthropic-messages", baseUrl: AI_GATEWAY_BASE_URL, provider: "ai-gateway", reasoning: tags.includes("reasoning"), diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 490c1d38..d8730f6e 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -8,9 +8,9 @@ export const MODELS = { "alibaba/qwen-3-14b": { id: "alibaba/qwen-3-14b", name: "Qwen3-14B", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -21,30 +21,30 @@ export const MODELS = { }, contextWindow: 40960, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen-3-235b": { id: "alibaba/qwen-3-235b", name: "Qwen3 235B A22b Instruct 2507", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { - input: 0.13, - output: 0.6, + input: 0.071, + output: 0.463, cacheRead: 0, cacheWrite: 0, }, contextWindow: 40960, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen-3-30b": { id: "alibaba/qwen-3-30b", name: "Qwen3-30B-A3B", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -55,13 +55,13 @@ export const MODELS = { }, contextWindow: 40960, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen-3-32b": { id: "alibaba/qwen-3-32b", name: "Qwen 3.32B", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -72,13 +72,13 @@ export const MODELS = { }, contextWindow: 40960, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-235b-a22b-thinking": { id: "alibaba/qwen3-235b-a22b-thinking", name: "Qwen3 235B A22B Thinking 2507", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -89,13 +89,13 @@ export const MODELS = { }, contextWindow: 262114, maxTokens: 262114, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-coder": { id: "alibaba/qwen3-coder", name: "Qwen3 Coder 480B A35B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -106,13 +106,13 @@ export const MODELS = { }, contextWindow: 262144, maxTokens: 66536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-coder-30b-a3b": { id: "alibaba/qwen3-coder-30b-a3b", name: "Qwen 3 Coder 30B A3B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -123,13 +123,13 @@ export const MODELS = { }, contextWindow: 160000, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-coder-plus": { id: "alibaba/qwen3-coder-plus", name: "Qwen3 Coder Plus", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -140,13 +140,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-max": { id: "alibaba/qwen3-max", name: "Qwen3 Max", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -157,13 +157,13 @@ export const MODELS = { }, contextWindow: 262144, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "alibaba/qwen3-max-preview": { id: "alibaba/qwen3-max-preview", name: "Qwen3 Max Preview", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -174,13 +174,13 @@ export const MODELS = { }, contextWindow: 262144, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3-haiku": { id: "anthropic/claude-3-haiku", name: "Claude 3 Haiku", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -191,13 +191,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 4096, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3-opus": { id: "anthropic/claude-3-opus", name: "Claude 3 Opus", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -208,13 +208,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3.5-haiku": { id: "anthropic/claude-3.5-haiku", name: "Claude 3.5 Haiku", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -225,13 +225,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3.5-sonnet": { id: "anthropic/claude-3.5-sonnet", name: "Claude 3.5 Sonnet", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -242,13 +242,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3.5-sonnet-20240620": { id: "anthropic/claude-3.5-sonnet-20240620", name: "Claude 3.5 Sonnet (2024-06-20)", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -259,13 +259,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-3.7-sonnet": { id: "anthropic/claude-3.7-sonnet", name: "Claude 3.7 Sonnet", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -276,13 +276,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-haiku-4.5": { id: "anthropic/claude-haiku-4.5", name: "Claude Haiku 4.5", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -293,13 +293,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-opus-4": { id: "anthropic/claude-opus-4", name: "Claude Opus 4", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -310,13 +310,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 32000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-opus-4.1": { id: "anthropic/claude-opus-4.1", name: "Claude Opus 4.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -327,13 +327,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 32000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-opus-4.5": { id: "anthropic/claude-opus-4.5", name: "Claude Opus 4.5", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -344,13 +344,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-sonnet-4": { id: "anthropic/claude-sonnet-4", name: "Claude Sonnet 4", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -361,13 +361,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "anthropic/claude-sonnet-4.5": { id: "anthropic/claude-sonnet-4.5", name: "Claude Sonnet 4.5", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -378,13 +378,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "bytedance/seed-1.6": { id: "bytedance/seed-1.6", name: "Seed 1.6", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -395,13 +395,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 32000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "cohere/command-a": { id: "cohere/command-a", name: "Command A", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -412,13 +412,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 8000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "deepseek/deepseek-v3": { id: "deepseek/deepseek-v3", name: "DeepSeek V3 0324", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -429,13 +429,13 @@ export const MODELS = { }, contextWindow: 163840, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "deepseek/deepseek-v3.1": { id: "deepseek/deepseek-v3.1", name: "DeepSeek-V3.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -446,13 +446,13 @@ export const MODELS = { }, contextWindow: 163840, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "deepseek/deepseek-v3.1-terminus": { id: "deepseek/deepseek-v3.1-terminus", name: "DeepSeek V3.1 Terminus", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -463,13 +463,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "deepseek/deepseek-v3.2-exp": { id: "deepseek/deepseek-v3.2-exp", name: "DeepSeek V3.2 Exp", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -480,13 +480,13 @@ export const MODELS = { }, contextWindow: 163840, maxTokens: 163840, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "deepseek/deepseek-v3.2-thinking": { id: "deepseek/deepseek-v3.2-thinking", name: "DeepSeek V3.2 Thinking", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -497,13 +497,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.0-flash": { id: "google/gemini-2.0-flash", name: "Gemini 2.0 Flash", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -514,13 +514,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.0-flash-lite": { id: "google/gemini-2.0-flash-lite", name: "Gemini 2.0 Flash Lite", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -531,13 +531,13 @@ export const MODELS = { }, contextWindow: 1048576, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.5-flash": { id: "google/gemini-2.5-flash", name: "Gemini 2.5 Flash", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -548,13 +548,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.5-flash-lite": { id: "google/gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -565,13 +565,13 @@ export const MODELS = { }, contextWindow: 1048576, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.5-flash-lite-preview-09-2025": { id: "google/gemini-2.5-flash-lite-preview-09-2025", name: "Gemini 2.5 Flash Lite Preview 09-2025", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -582,13 +582,13 @@ export const MODELS = { }, contextWindow: 1048576, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.5-flash-preview-09-2025": { id: "google/gemini-2.5-flash-preview-09-2025", name: "Gemini 2.5 Flash Preview 09-2025", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -599,13 +599,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-2.5-pro": { id: "google/gemini-2.5-pro", name: "Gemini 2.5 Pro", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -616,13 +616,13 @@ export const MODELS = { }, contextWindow: 1048576, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-3-flash": { id: "google/gemini-3-flash", name: "Gemini 3 Flash", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -633,13 +633,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "google/gemini-3-pro-preview": { id: "google/gemini-3-pro-preview", name: "Gemini 3 Pro Preview", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -650,13 +650,13 @@ export const MODELS = { }, contextWindow: 1000000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "inception/mercury-coder-small": { id: "inception/mercury-coder-small", name: "Mercury Coder Small Beta", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -667,13 +667,13 @@ export const MODELS = { }, contextWindow: 32000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meituan/longcat-flash-chat": { id: "meituan/longcat-flash-chat", name: "LongCat Flash Chat", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -684,13 +684,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meituan/longcat-flash-thinking": { id: "meituan/longcat-flash-thinking", name: "LongCat Flash Thinking", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -701,13 +701,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-3.1-70b": { id: "meta/llama-3.1-70b", name: "Llama 3.1 70B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -718,13 +718,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-3.1-8b": { id: "meta/llama-3.1-8b", name: "Llama 3.1 8B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -735,13 +735,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-3.2-11b": { id: "meta/llama-3.2-11b", name: "Llama 3.2 11B Vision Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -752,13 +752,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-3.2-90b": { id: "meta/llama-3.2-90b", name: "Llama 3.2 90B Vision Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -769,13 +769,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-3.3-70b": { id: "meta/llama-3.3-70b", name: "Llama 3.3 70B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -786,13 +786,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-4-maverick": { id: "meta/llama-4-maverick", name: "Llama 4 Maverick 17B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -803,13 +803,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "meta/llama-4-scout": { id: "meta/llama-4-scout", name: "Llama 4 Scout 17B Instruct", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -820,13 +820,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "minimax/minimax-m2": { id: "minimax/minimax-m2", name: "MiniMax M2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -837,13 +837,13 @@ export const MODELS = { }, contextWindow: 262114, maxTokens: 262114, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "minimax/minimax-m2.1": { id: "minimax/minimax-m2.1", name: "MiniMax M2.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -854,13 +854,13 @@ export const MODELS = { }, contextWindow: 196608, maxTokens: 196608, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "minimax/minimax-m2.1-lightning": { id: "minimax/minimax-m2.1-lightning", name: "MiniMax M2.1 Lightning", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -871,13 +871,13 @@ export const MODELS = { }, contextWindow: 204800, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/codestral": { id: "mistral/codestral", name: "Mistral Codestral", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -888,13 +888,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/devstral-2": { id: "mistral/devstral-2", name: "Devstral 2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -905,13 +905,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/devstral-small": { id: "mistral/devstral-small", name: "Devstral Small 1.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -922,13 +922,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/devstral-small-2": { id: "mistral/devstral-small-2", name: "Devstral Small 2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -939,13 +939,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/ministral-3b": { id: "mistral/ministral-3b", name: "Ministral 3B", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -956,13 +956,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/ministral-8b": { id: "mistral/ministral-8b", name: "Ministral 8B", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -973,13 +973,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/mistral-medium": { id: "mistral/mistral-medium", name: "Mistral Medium 3.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -990,13 +990,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 64000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/mistral-nemo": { id: "mistral/mistral-nemo", name: "Mistral Nemo", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1007,13 +1007,13 @@ export const MODELS = { }, contextWindow: 60288, maxTokens: 16000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/mistral-small": { id: "mistral/mistral-small", name: "Mistral Small", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1024,13 +1024,13 @@ export const MODELS = { }, contextWindow: 32000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/pixtral-12b": { id: "mistral/pixtral-12b", name: "Pixtral 12B 2409", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1041,13 +1041,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "mistral/pixtral-large": { id: "mistral/pixtral-large", name: "Pixtral Large", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1058,13 +1058,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "moonshotai/kimi-k2": { id: "moonshotai/kimi-k2", name: "Kimi K2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1075,13 +1075,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "moonshotai/kimi-k2-thinking": { id: "moonshotai/kimi-k2-thinking", name: "Kimi K2 Thinking", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1092,13 +1092,13 @@ export const MODELS = { }, contextWindow: 216144, maxTokens: 216144, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "moonshotai/kimi-k2-thinking-turbo": { id: "moonshotai/kimi-k2-thinking-turbo", name: "Kimi K2 Thinking Turbo", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1109,13 +1109,13 @@ export const MODELS = { }, contextWindow: 262114, maxTokens: 262114, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "moonshotai/kimi-k2-turbo": { id: "moonshotai/kimi-k2-turbo", name: "Kimi K2 Turbo", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1126,13 +1126,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "nvidia/nemotron-nano-12b-v2-vl": { id: "nvidia/nemotron-nano-12b-v2-vl", name: "Nvidia Nemotron Nano 12B V2 VL", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1143,13 +1143,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "nvidia/nemotron-nano-9b-v2": { id: "nvidia/nemotron-nano-9b-v2", name: "Nvidia Nemotron Nano 9B V2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1160,13 +1160,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/codex-mini": { id: "openai/codex-mini", name: "Codex Mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1177,13 +1177,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4-turbo": { id: "openai/gpt-4-turbo", name: "GPT-4 Turbo", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1194,13 +1194,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 4096, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4.1": { id: "openai/gpt-4.1", name: "GPT-4.1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1211,13 +1211,13 @@ export const MODELS = { }, contextWindow: 1047576, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4.1-mini": { id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1228,13 +1228,13 @@ export const MODELS = { }, contextWindow: 1047576, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4.1-nano": { id: "openai/gpt-4.1-nano", name: "GPT-4.1 nano", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1245,13 +1245,13 @@ export const MODELS = { }, contextWindow: 1047576, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4o": { id: "openai/gpt-4o", name: "GPT-4o", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1262,13 +1262,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-4o-mini": { id: "openai/gpt-4o-mini", name: "GPT-4o mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1279,13 +1279,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5": { id: "openai/gpt-5", name: "GPT-5", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1296,13 +1296,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5-chat": { id: "openai/gpt-5-chat", name: "GPT-5 Chat", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1313,13 +1313,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5-codex": { id: "openai/gpt-5-codex", name: "GPT-5-Codex", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1330,13 +1330,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5-mini": { id: "openai/gpt-5-mini", name: "GPT-5 mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1347,13 +1347,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5-nano": { id: "openai/gpt-5-nano", name: "GPT-5 nano", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1364,13 +1364,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5-pro": { id: "openai/gpt-5-pro", name: "GPT-5 pro", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1381,13 +1381,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 272000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.1-codex": { id: "openai/gpt-5.1-codex", name: "GPT-5.1-Codex", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1398,13 +1398,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.1-codex-max": { id: "openai/gpt-5.1-codex-max", name: "GPT 5.1 Codex Max", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1415,13 +1415,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.1-codex-mini": { id: "openai/gpt-5.1-codex-mini", name: "GPT-5.1 Codex mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1432,13 +1432,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.1-instant": { id: "openai/gpt-5.1-instant", name: "GPT-5.1 Instant", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1449,13 +1449,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.1-thinking": { id: "openai/gpt-5.1-thinking", name: "GPT 5.1 Thinking", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1466,13 +1466,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.2": { id: "openai/gpt-5.2", name: "GPT-5.2", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1483,13 +1483,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.2-chat": { id: "openai/gpt-5.2-chat", name: "GPT-5.2 Chat", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1500,13 +1500,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 16384, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-5.2-pro": { id: "openai/gpt-5.2-pro", name: "GPT 5.2 ", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1517,13 +1517,13 @@ export const MODELS = { }, contextWindow: 400000, maxTokens: 128000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-oss-120b": { id: "openai/gpt-oss-120b", name: "gpt-oss-120b", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1534,13 +1534,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-oss-20b": { id: "openai/gpt-oss-20b", name: "gpt-oss-20b", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1551,13 +1551,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 8192, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/gpt-oss-safeguard-20b": { id: "openai/gpt-oss-safeguard-20b", name: "gpt-oss-safeguard-20b", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1568,13 +1568,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 65536, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o1": { id: "openai/o1", name: "o1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1585,13 +1585,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o3": { id: "openai/o3", name: "o3", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1602,13 +1602,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o3-deep-research": { id: "openai/o3-deep-research", name: "o3-deep-research", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1619,13 +1619,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o3-mini": { id: "openai/o3-mini", name: "o3-mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1636,13 +1636,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o3-pro": { id: "openai/o3-pro", name: "o3 Pro", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1653,13 +1653,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "openai/o4-mini": { id: "openai/o4-mini", name: "o4-mini", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1670,13 +1670,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 100000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "perplexity/sonar": { id: "perplexity/sonar", name: "Sonar", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1687,13 +1687,13 @@ export const MODELS = { }, contextWindow: 127000, maxTokens: 8000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "perplexity/sonar-pro": { id: "perplexity/sonar-pro", name: "Sonar Pro", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1704,13 +1704,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 8000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "prime-intellect/intellect-3": { id: "prime-intellect/intellect-3", name: "INTELLECT 3", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1721,13 +1721,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "stealth/sonoma-dusk-alpha": { id: "stealth/sonoma-dusk-alpha", name: "Sonoma Dusk Alpha", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1738,13 +1738,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "stealth/sonoma-sky-alpha": { id: "stealth/sonoma-sky-alpha", name: "Sonoma Sky Alpha", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1755,13 +1755,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "vercel/v0-1.0-md": { id: "vercel/v0-1.0-md", name: "v0-1.0-md", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1772,13 +1772,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 32000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "vercel/v0-1.5-md": { id: "vercel/v0-1.5-md", name: "v0-1.5-md", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1789,13 +1789,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-2-vision": { id: "xai/grok-2-vision", name: "Grok 2 Vision", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text", "image"], cost: { @@ -1806,13 +1806,13 @@ export const MODELS = { }, contextWindow: 32768, maxTokens: 32768, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-3": { id: "xai/grok-3", name: "Grok 3 Beta", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1823,13 +1823,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-3-fast": { id: "xai/grok-3-fast", name: "Grok 3 Fast Beta", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1840,13 +1840,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-3-mini": { id: "xai/grok-3-mini", name: "Grok 3 Mini Beta", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1857,13 +1857,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-3-mini-fast": { id: "xai/grok-3-mini-fast", name: "Grok 3 Mini Fast Beta", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1874,13 +1874,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-4": { id: "xai/grok-4", name: "Grok 4", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -1891,13 +1891,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-4-fast-non-reasoning": { id: "xai/grok-4-fast-non-reasoning", name: "Grok 4 Fast Non-Reasoning", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1908,13 +1908,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-4-fast-reasoning": { id: "xai/grok-4-fast-reasoning", name: "Grok 4 Fast Reasoning", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1925,13 +1925,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-4.1-fast-non-reasoning": { id: "xai/grok-4.1-fast-non-reasoning", name: "Grok 4.1 Fast Non-Reasoning", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: false, input: ["text"], cost: { @@ -1942,13 +1942,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 30000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-4.1-fast-reasoning": { id: "xai/grok-4.1-fast-reasoning", name: "Grok 4.1 Fast Reasoning", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1959,13 +1959,13 @@ export const MODELS = { }, contextWindow: 2000000, maxTokens: 30000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xai/grok-code-fast-1": { id: "xai/grok-code-fast-1", name: "Grok Code Fast 1", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1976,13 +1976,13 @@ export const MODELS = { }, contextWindow: 256000, maxTokens: 256000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "xiaomi/mimo-v2-flash": { id: "xiaomi/mimo-v2-flash", name: "MiMo V2 Flash", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -1993,13 +1993,13 @@ export const MODELS = { }, contextWindow: 262144, maxTokens: 32000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.5": { id: "zai/glm-4.5", name: "GLM-4.5", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -2010,13 +2010,13 @@ export const MODELS = { }, contextWindow: 131072, maxTokens: 131072, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.5-air": { id: "zai/glm-4.5-air", name: "GLM 4.5 Air", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -2027,13 +2027,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 96000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.5v": { id: "zai/glm-4.5v", name: "GLM 4.5V", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -2044,13 +2044,13 @@ export const MODELS = { }, contextWindow: 65536, maxTokens: 66000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.6": { id: "zai/glm-4.6", name: "GLM 4.6", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -2061,13 +2061,13 @@ export const MODELS = { }, contextWindow: 200000, maxTokens: 96000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.6v": { id: "zai/glm-4.6v", name: "GLM-4.6V", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -2078,13 +2078,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 24000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.6v-flash": { id: "zai/glm-4.6v-flash", name: "GLM-4.6V-Flash", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text", "image"], cost: { @@ -2095,13 +2095,13 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 24000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, "zai/glm-4.7": { id: "zai/glm-4.7", name: "GLM 4.7", - api: "openai-completions", + api: "anthropic-messages", provider: "ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh/v1", + baseUrl: "https://ai-gateway.vercel.sh", reasoning: true, input: ["text"], cost: { @@ -2112,7 +2112,7 @@ export const MODELS = { }, contextWindow: 202752, maxTokens: 120000, - } satisfies Model<"openai-completions">, + } satisfies Model<"anthropic-messages">, }, "amazon-bedrock": { "anthropic.claude-3-5-haiku-20241022-v1:0": {