From e0c2745989520aaa72a3b718bd145a164c73185b Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Sun, 16 Nov 2025 22:35:09 +0100 Subject: [PATCH] Add gpt-5.1-codex to model list --- packages/ai/scripts/generate-models.ts | 22 +++++++++++++++++++++- packages/ai/src/models.generated.ts | 17 +++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index dce8df75..82055a31 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -295,7 +295,7 @@ async function generateModels() { // Combine models (models.dev has priority) const allModels = [...modelsDevModels, ...openRouterModels]; - // Add missing gpt models (can't use tools) + // Add missing gpt models if (!allModels.some(m => m.provider === "openai" && m.id === "gpt-5-chat-latest")) { allModels.push({ id: "gpt-5-chat-latest", @@ -316,6 +316,26 @@ async function generateModels() { }); } + if (!allModels.some(m => m.provider === "openai" && m.id === "gpt-5.1-codex")) { + allModels.push({ + id: "gpt-5.1-codex", + name: "GPT-5.1 Codex", + api: "openai-responses", + baseUrl: "https://api.openai.com/v1", + provider: "openai", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 5, + cacheRead: 0.125, + cacheWrite: 1.25, + }, + contextWindow: 400000, + maxTokens: 128000, + }); + } + // Add missing Grok models if (!allModels.some(m => m.provider === "xai" && m.id === "grok-code-fast-1")) { allModels.push({ diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index d5e5e166..dab0f851 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -1097,6 +1097,23 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"openai-responses">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1 Codex", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 5, + cacheRead: 0.125, + cacheWrite: 1.25, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, }, groq: { "llama-3.1-8b-instant": {