fix(ai): append system prompt to codex bridge message instead of converting to input

Previously the system prompt was converted to an input message in convertMessages,
then stripped out by filterPiSystemPrompts. Now the system prompt is passed directly
to transformRequestBody and appended after CODEX_PI_BRIDGE in the bridge message.
This commit is contained in:
Mario Zechner 2026-01-05 06:03:07 +01:00
parent 9a147559c0
commit bb50738f7e
15 changed files with 908 additions and 127 deletions

View file

@ -25,8 +25,9 @@ const oauthTokens = await Promise.all([
resolveApiKey("github-copilot"),
resolveApiKey("google-gemini-cli"),
resolveApiKey("google-antigravity"),
resolveApiKey("openai-codex"),
]);
const [githubCopilotToken, geminiCliToken, antigravityToken] = oauthTokens;
const [githubCopilotToken, geminiCliToken, antigravityToken, openaiCodexToken] = oauthTokens;
// Lorem ipsum paragraph for realistic token estimation
const LOREM_IPSUM = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. `;
@ -263,6 +264,26 @@ describe("Context overflow error handling", () => {
);
});
// =============================================================================
// OpenAI Codex (OAuth)
// Uses ChatGPT Plus/Pro subscription via OAuth
// =============================================================================
describe("OpenAI Codex (OAuth)", () => {
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should detect overflow via isContextOverflow",
async () => {
const model = getModel("openai-codex", "gpt-5.2-xhigh");
const result = await testContextOverflow(model, openaiCodexToken!);
logResult(result);
expect(result.stopReason).toBe("error");
expect(isContextOverflow(result.response, model.contextWindow)).toBe(true);
},
120000,
);
});
// =============================================================================
// xAI
// Expected pattern: "maximum prompt length is X but the request contains Y"