diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 413a8729..86fd7ad3 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -2,10 +2,6 @@ ## [Unreleased] -### Added - -- Added `gpt-5.2-codex` to OpenAI provider model list. - ### Fixed - Fixed signature support for non-Anthropic models in Amazon Bedrock provider ([#727](https://github.com/badlogic/pi-mono/pull/727) by [@unexge](https://github.com/unexge)) diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index bcdefe9a..41e34de4 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -689,26 +689,6 @@ async function generateModels() { }); } - if (!allModels.some(m => m.provider === "openai" && m.id === "gpt-5.2-codex")) { - allModels.push({ - id: "gpt-5.2-codex", - name: "GPT-5.2 Codex", - api: "openai-responses", - baseUrl: "https://api.openai.com/v1", - provider: "openai", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.75, - output: 14, - cacheRead: 0.175, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - }); - } - // OpenAI Codex (ChatGPT OAuth) models // NOTE: These are not fetched from models.dev; we keep a small, explicit list to avoid aliases. // Context window is based on observed server limits (400s above ~272k), not marketing numbers. diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index ead1c1c1..78945672 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -3645,23 +3645,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"openai-responses">, - "gpt-5.2-codex": { - id: "gpt-5.2-codex", - name: "GPT-5.2 Codex", - api: "openai-responses", - provider: "openai", - baseUrl: "https://api.openai.com/v1", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.75, - output: 14, - cacheRead: 0.175, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-responses">, "gpt-5.2-pro": { id: "gpt-5.2-pro", name: "GPT-5.2 Pro", @@ -4552,6 +4535,23 @@ export const MODELS = { contextWindow: 200000, maxTokens: 8192, } satisfies Model<"openai-completions">, + "anthropic/claude-3.5-haiku-20241022": { + id: "anthropic/claude-3.5-haiku-20241022", + name: "Anthropic: Claude 3.5 Haiku (2024-10-22)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.7999999999999999, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"openai-completions">, "anthropic/claude-3.5-sonnet": { id: "anthropic/claude-3.5-sonnet", name: "Anthropic: Claude 3.5 Sonnet", @@ -6932,23 +6932,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"openai-completions">, - "openai/gpt-5.2-codex": { - id: "openai/gpt-5.2-codex", - name: "OpenAI: GPT-5.2-Codex", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.75, - output: 14, - cacheRead: 0.175, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-completions">, "openai/gpt-5.2-pro": { id: "openai/gpt-5.2-pro", name: "OpenAI: GPT-5.2 Pro", @@ -9790,23 +9773,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"anthropic-messages">, - "openai/gpt-5.2-codex": { - id: "openai/gpt-5.2-codex", - name: "GPT-5.2-Codex", - api: "anthropic-messages", - provider: "vercel-ai-gateway", - baseUrl: "https://ai-gateway.vercel.sh", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.75, - output: 14, - cacheRead: 0.175, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"anthropic-messages">, "openai/gpt-5.2-pro": { id: "openai/gpt-5.2-pro", name: "GPT 5.2 ", diff --git a/packages/ai/src/models.ts b/packages/ai/src/models.ts index 7edb01e0..b6c91565 100644 --- a/packages/ai/src/models.ts +++ b/packages/ai/src/models.ts @@ -12,17 +12,12 @@ for (const [provider, models] of Object.entries(MODELS)) { modelRegistry.set(provider, providerModels); } -type ModelApi = TProvider extends keyof typeof MODELS - ? TModelId extends keyof (typeof MODELS)[TProvider] - ? (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } - ? TApi extends Api - ? TApi - : Api - : Api - : Api - : Api; +type ModelApi< + TProvider extends KnownProvider, + TModelId extends keyof (typeof MODELS)[TProvider], +> = (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never; -export function getModel( +export function getModel( provider: TProvider, modelId: TModelId, ): Model> { @@ -34,9 +29,11 @@ export function getProviders(): KnownProvider[] { return Array.from(modelRegistry.keys()) as KnownProvider[]; } -export function getModels(provider: TProvider): Model>[] { +export function getModels( + provider: TProvider, +): Model>[] { const models = modelRegistry.get(provider); - return models ? (Array.from(models.values()) as Model>[]) : []; + return models ? (Array.from(models.values()) as Model>[]) : []; } export function calculateCost(model: Model, usage: Usage): Usage["cost"] {