fix(ai): append system prompt to codex bridge message instead of converting to input

Previously the system prompt was converted to an input message in convertMessages,
then stripped out by filterPiSystemPrompts. Now the system prompt is passed directly
to transformRequestBody and appended after CODEX_PI_BRIDGE in the bridge message.
This commit is contained in:
Mario Zechner 2026-01-05 06:03:07 +01:00
parent 9a147559c0
commit bb50738f7e
15 changed files with 908 additions and 127 deletions

View file

@ -10,8 +10,9 @@ const oauthTokens = await Promise.all([
resolveApiKey("github-copilot"),
resolveApiKey("google-gemini-cli"),
resolveApiKey("google-antigravity"),
resolveApiKey("openai-codex"),
]);
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken] = oauthTokens;
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken, openaiCodexToken] = oauthTokens;
async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
const context: Context = {
@ -43,11 +44,12 @@ async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: Op
expect(msg.stopReason).toBe("aborted");
// OpenAI providers, Gemini CLI, zai, and the GPT-OSS model on Antigravity only send usage in the final chunk,
// OpenAI providers, OpenAI Codex, Gemini CLI, zai, and the GPT-OSS model on Antigravity only send usage in the final chunk,
// so when aborted they have no token stats Anthropic and Google send usage information early in the stream
if (
llm.api === "openai-completions" ||
llm.api === "openai-responses" ||
llm.api === "openai-codex-responses" ||
llm.provider === "google-gemini-cli" ||
llm.provider === "zai" ||
(llm.provider === "google-antigravity" && llm.id.includes("gpt-oss"))
@ -217,4 +219,15 @@ describe("Token Statistics on Abort", () => {
},
);
});
describe("OpenAI Codex Provider", () => {
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should include token stats when aborted mid-stream",
{ retry: 3, timeout: 30000 },
async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testTokensOnAbort(llm, { apiKey: openaiCodexToken });
},
);
});
});