fix(ai): clamp minimal reasoning for gpt-5.3-codex (fixes #1334)

This commit is contained in:
Mario Zechner 2026-02-06 18:45:08 +01:00
parent b0a8d79844
commit f5b9eeb514
2 changed files with 97 additions and 1 deletions

View file

@ -282,7 +282,7 @@ function buildRequestBody(
function clampReasoningEffort(modelId: string, effort: string): string {
const id = modelId.includes("/") ? modelId.split("/").pop()! : modelId;
if (id.startsWith("gpt-5.2") && effort === "minimal") return "low";
if ((id.startsWith("gpt-5.2") || id.startsWith("gpt-5.3")) && effort === "minimal") return "low";
if (id === "gpt-5.1" && effort === "xhigh") return "high";
if (id === "gpt-5.1-codex-mini") return effort === "high" || effort === "xhigh" ? "high" : "medium";
return effort;

View file

@ -231,6 +231,102 @@ describe("openai-codex streaming", () => {
await streamResult.result();
});
it("clamps gpt-5.3-codex minimal reasoning effort to low", async () => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
process.env.PI_CODING_AGENT_DIR = tempDir;
const payload = Buffer.from(
JSON.stringify({ "https://api.openai.com/auth": { chatgpt_account_id: "acc_test" } }),
"utf8",
).toString("base64");
const token = `aaa.${payload}.bbb`;
const sse = `${[
`data: ${JSON.stringify({
type: "response.output_item.added",
item: { type: "message", id: "msg_1", role: "assistant", status: "in_progress", content: [] },
})}`,
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
`data: ${JSON.stringify({
type: "response.output_item.done",
item: {
type: "message",
id: "msg_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text: "Hello" }],
},
})}`,
`data: ${JSON.stringify({
type: "response.completed",
response: {
status: "completed",
usage: {
input_tokens: 5,
output_tokens: 3,
total_tokens: 8,
input_tokens_details: { cached_tokens: 0 },
},
},
})}`,
].join("\n\n")}\n\n`;
const encoder = new TextEncoder();
const stream = new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(sse));
controller.close();
},
});
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
const url = typeof input === "string" ? input : input.toString();
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), { status: 200 });
}
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
return new Response("PROMPT", { status: 200, headers: { etag: '"etag"' } });
}
if (url === "https://chatgpt.com/backend-api/codex/responses") {
const body = typeof init?.body === "string" ? (JSON.parse(init.body) as Record<string, unknown>) : null;
expect(body?.reasoning).toEqual({ effort: "low", summary: "auto" });
return new Response(stream, {
status: 200,
headers: { "content-type": "text/event-stream" },
});
}
return new Response("not found", { status: 404 });
});
global.fetch = fetchMock as typeof fetch;
const model: Model<"openai-codex-responses"> = {
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
};
const context: Context = {
systemPrompt: "You are a helpful assistant.",
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
};
const streamResult = streamOpenAICodexResponses(model, context, {
apiKey: token,
reasoningEffort: "minimal",
});
await streamResult.result();
});
it("does not set conversation_id/session_id headers when sessionId is not provided", async () => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
process.env.PI_CODING_AGENT_DIR = tempDir;