fix(ai): append system prompt to codex bridge message instead of converting to input

Previously the system prompt was converted to an input message in convertMessages,
then stripped out by filterPiSystemPrompts. Now the system prompt is passed directly
to transformRequestBody and appended after CODEX_PI_BRIDGE in the bridge message.
This commit is contained in:
Mario Zechner 2026-01-05 06:03:07 +01:00
parent 9a147559c0
commit bb50738f7e
15 changed files with 908 additions and 127 deletions

View file

@ -10,8 +10,9 @@ const oauthTokens = await Promise.all([
resolveApiKey("github-copilot"),
resolveApiKey("google-gemini-cli"),
resolveApiKey("google-antigravity"),
resolveApiKey("openai-codex"),
]);
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken] = oauthTokens;
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken, openaiCodexToken] = oauthTokens;
async function testEmptyMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
// Test with completely empty content array
@ -573,4 +574,42 @@ describe("AI Providers Empty Message Tests", () => {
},
);
});
describe("OpenAI Codex Provider Empty Messages", () => {
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should handle empty content array",
{ retry: 3, timeout: 30000 },
async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testEmptyMessage(llm, { apiKey: openaiCodexToken });
},
);
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should handle empty string content",
{ retry: 3, timeout: 30000 },
async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testEmptyStringMessage(llm, { apiKey: openaiCodexToken });
},
);
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should handle whitespace-only content",
{ retry: 3, timeout: 30000 },
async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testWhitespaceOnlyMessage(llm, { apiKey: openaiCodexToken });
},
);
it.skipIf(!openaiCodexToken)(
"gpt-5.2-xhigh - should handle empty assistant message in conversation",
{ retry: 3, timeout: 30000 },
async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testEmptyAssistantMessage(llm, { apiKey: openaiCodexToken });
},
);
});
});