Revert "feat(ai): add gpt-5.2-codex to OpenAI provider (#730)"

This reverts commit 5a795b9857.
This commit is contained in:
Mario Zechner 2026-01-14 22:22:55 +01:00
parent 5a795b9857
commit 0a7537bf86
4 changed files with 26 additions and 87 deletions

View file

@ -2,10 +2,6 @@
## [Unreleased]
### Added
- Added `gpt-5.2-codex` to OpenAI provider model list.
### Fixed
- Fixed signature support for non-Anthropic models in Amazon Bedrock provider ([#727](https://github.com/badlogic/pi-mono/pull/727) by [@unexge](https://github.com/unexge))

View file

@ -689,26 +689,6 @@ async function generateModels() {
});
}
if (!allModels.some(m => m.provider === "openai" && m.id === "gpt-5.2-codex")) {
allModels.push({
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
provider: "openai",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
});
}
// OpenAI Codex (ChatGPT OAuth) models
// NOTE: These are not fetched from models.dev; we keep a small, explicit list to avoid aliases.
// Context window is based on observed server limits (400s above ~272k), not marketing numbers.

View file

@ -3645,23 +3645,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-responses">,
"gpt-5.2-codex": {
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.2-pro": {
id: "gpt-5.2-pro",
name: "GPT-5.2 Pro",
@ -4552,6 +4535,23 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.7999999999999999,
output: 4,
cacheRead: 0.08,
cacheWrite: 1,
},
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-sonnet": {
id: "anthropic/claude-3.5-sonnet",
name: "Anthropic: Claude 3.5 Sonnet",
@ -6932,23 +6932,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-5.2-codex": {
id: "openai/gpt-5.2-codex",
name: "OpenAI: GPT-5.2-Codex",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5.2-pro": {
id: "openai/gpt-5.2-pro",
name: "OpenAI: GPT-5.2 Pro",
@ -9790,23 +9773,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.2-codex": {
id: "openai/gpt-5.2-codex",
name: "GPT-5.2-Codex",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.2-pro": {
id: "openai/gpt-5.2-pro",
name: "GPT 5.2 ",

View file

@ -12,17 +12,12 @@ for (const [provider, models] of Object.entries(MODELS)) {
modelRegistry.set(provider, providerModels);
}
type ModelApi<TProvider extends KnownProvider, TModelId extends string> = TProvider extends keyof typeof MODELS
? TModelId extends keyof (typeof MODELS)[TProvider]
? (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi }
? TApi extends Api
? TApi
: Api
: Api
: Api
: Api;
type ModelApi<
TProvider extends KnownProvider,
TModelId extends keyof (typeof MODELS)[TProvider],
> = (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never;
export function getModel<TProvider extends KnownProvider, TModelId extends string>(
export function getModel<TProvider extends KnownProvider, TModelId extends keyof (typeof MODELS)[TProvider]>(
provider: TProvider,
modelId: TModelId,
): Model<ModelApi<TProvider, TModelId>> {
@ -34,9 +29,11 @@ export function getProviders(): KnownProvider[] {
return Array.from(modelRegistry.keys()) as KnownProvider[];
}
export function getModels<TProvider extends KnownProvider>(provider: TProvider): Model<ModelApi<TProvider, string>>[] {
export function getModels<TProvider extends KnownProvider>(
provider: TProvider,
): Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[] {
const models = modelRegistry.get(provider);
return models ? (Array.from(models.values()) as Model<ModelApi<TProvider, string>>[]) : [];
return models ? (Array.from(models.values()) as Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[]) : [];
}
export function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage): Usage["cost"] {