merge: add gpt-5.4 support and cap context windows to 272k

This commit is contained in:
Mario Zechner 2026-03-05 23:00:34 +01:00
commit 8e77bb3272
8 changed files with 129 additions and 10 deletions

View file

@ -647,7 +647,10 @@ async function generateModels() {
const aiGatewayModels = await fetchAiGatewayModels();
// Combine models (models.dev has priority)
const allModels = [...modelsDevModels, ...openRouterModels, ...aiGatewayModels];
const allModels = [...modelsDevModels, ...openRouterModels, ...aiGatewayModels].filter(
(model) =>
!((model.provider === "opencode" || model.provider === "opencode-go") && model.id === "gpt-5.3-codex-spark"),
);
// Fix incorrect cache pricing for Claude Opus 4.5 from models.dev
// models.dev has 3x the correct pricing (1.5/18.75 instead of 0.5/6.25)
@ -677,8 +680,29 @@ async function generateModels() {
) {
candidate.contextWindow = 200000;
}
if ((candidate.provider === "opencode" || candidate.provider === "opencode-go") && candidate.id === "gpt-5.4") {
candidate.contextWindow = 272000;
candidate.maxTokens = 128000;
}
if (candidate.provider === "openai" && candidate.id === "gpt-5.4") {
candidate.contextWindow = 272000;
candidate.maxTokens = 128000;
}
// Keep selected OpenRouter model metadata stable until upstream settles.
if (candidate.provider === "openrouter" && candidate.id === "moonshotai/kimi-k2.5") {
candidate.cost.input = 0.41;
candidate.cost.output = 2.06;
candidate.cost.cacheRead = 0.07;
candidate.maxTokens = 4096;
}
if (candidate.provider === "openrouter" && candidate.id === "z-ai/glm-5") {
candidate.cost.input = 0.6;
candidate.cost.output = 1.9;
candidate.cost.cacheRead = 0.119;
}
}
// Add missing EU Opus 4.6 profile
if (!allModels.some((m) => m.provider === "amazon-bedrock" && m.id === "eu.anthropic.claude-opus-4-6-v1")) {
allModels.push({
@ -858,6 +882,26 @@ async function generateModels() {
}
}
if (!allModels.some((m) => m.provider === "openai" && m.id === "gpt-5.4")) {
allModels.push({
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
provider: "openai",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2.5,
output: 15,
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 272000,
maxTokens: 128000,
});
}
// OpenAI Codex (ChatGPT OAuth) models
// NOTE: These are not fetched from models.dev; we keep a small, explicit list to avoid aliases.
// Context window is based on observed server limits (400s above ~272k), not marketing numbers.
@ -937,6 +981,18 @@ async function generateModels() {
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",

View file

@ -2287,6 +2287,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 32000,
} satisfies Model<"azure-openai-responses">,
"gpt-5.4": {
id: "gpt-5.4",
name: "GPT-5.4",
api: "azure-openai-responses",
provider: "azure-openai-responses",
baseUrl: "",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2.5,
output: 15,
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"azure-openai-responses">,
"o1": {
id: "o1",
name: "o1",
@ -5420,6 +5437,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 32000,
} satisfies Model<"openai-responses">,
"gpt-5.4": {
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2.5,
output: 15,
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"o1": {
id: "o1",
name: "o1",
@ -5677,6 +5711,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-codex-responses">,
"gpt-5.4": {
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2.5,
output: 15,
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"openai-codex-responses">,
},
"opencode": {
"big-pickle": {
@ -6118,7 +6169,7 @@ export const MODELS = {
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 1050000,
contextWindow: 272000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"kimi-k2.5": {

View file

@ -49,11 +49,11 @@ export function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage
* Check if a model supports xhigh thinking level.
*
* Supported today:
* - GPT-5.2 / GPT-5.3 model families
* - GPT-5.2 / GPT-5.3 / GPT-5.4 model families
* - Anthropic Messages API Opus 4.6 models (xhigh maps to adaptive effort "max")
*/
export function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean {
if (model.id.includes("gpt-5.2") || model.id.includes("gpt-5.3")) {
if (model.id.includes("gpt-5.2") || model.id.includes("gpt-5.3") || model.id.includes("gpt-5.4")) {
return true;
}

View file

@ -324,7 +324,8 @@ function buildRequestBody(
function clampReasoningEffort(modelId: string, effort: string): string {
const id = modelId.includes("/") ? modelId.split("/").pop()! : modelId;
if ((id.startsWith("gpt-5.2") || id.startsWith("gpt-5.3")) && effort === "minimal") return "low";
if ((id.startsWith("gpt-5.2") || id.startsWith("gpt-5.3") || id.startsWith("gpt-5.4")) && effort === "minimal")
return "low";
if (id === "gpt-5.1" && effort === "xhigh") return "high";
if (id === "gpt-5.1-codex-mini") return effort === "high" || effort === "xhigh" ? "high" : "medium";
return effort;

View file

@ -231,7 +231,7 @@ describe("openai-codex streaming", () => {
await streamResult.result();
});
it("clamps gpt-5.3-codex minimal reasoning effort to low", async () => {
it.each(["gpt-5.3-codex", "gpt-5.4"])("clamps %s minimal reasoning effort to low", async (modelId) => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
process.env.PI_CODING_AGENT_DIR = tempDir;
@ -303,8 +303,8 @@ describe("openai-codex streaming", () => {
global.fetch = fetchMock as typeof fetch;
const model: Model<"openai-codex-responses"> = {
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
id: modelId,
name: modelId,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",

View file

@ -14,6 +14,12 @@ describe("supportsXhigh", () => {
expect(supportsXhigh(model!)).toBe(false);
});
it("returns true for GPT-5.4 models", () => {
const model = getModel("openai-codex", "gpt-5.4");
expect(model).toBeDefined();
expect(supportsXhigh(model!)).toBe(true);
});
it("returns false for OpenRouter Opus 4.6 (openai-completions API)", () => {
const model = getModel("openrouter", "anthropic/claude-opus-4.6");
expect(model).toBeDefined();

View file

@ -14,9 +14,9 @@ import type { ModelRegistry } from "./model-registry.js";
export const defaultModelPerProvider: Record<KnownProvider, string> = {
"amazon-bedrock": "us.anthropic.claude-opus-4-6-v1",
anthropic: "claude-opus-4-6",
openai: "gpt-5.1-codex",
openai: "gpt-5.4",
"azure-openai-responses": "gpt-5.2",
"openai-codex": "gpt-5.3-codex",
"openai-codex": "gpt-5.4",
google: "gemini-2.5-pro",
"google-gemini-cli": "gemini-2.5-pro",
"google-antigravity": "gemini-3.1-pro-high",

View file

@ -373,6 +373,11 @@ describe("resolveCliModel", () => {
});
describe("default model selection", () => {
test("openai defaults are gpt-5.4", () => {
expect(defaultModelPerProvider.openai).toBe("gpt-5.4");
expect(defaultModelPerProvider["openai-codex"]).toBe("gpt-5.4");
});
test("ai-gateway default is opus 4.6", () => {
expect(defaultModelPerProvider["vercel-ai-gateway"]).toBe("anthropic/claude-opus-4-6");
});