refactor(ai): streamline codex prompt handling

This commit is contained in:
Mario Zechner 2026-01-06 10:27:51 +01:00
parent b04ce9fe95
commit 858c6bae8a
6 changed files with 99 additions and 104 deletions

View file

@ -33,6 +33,8 @@ import {
URL_PATHS,
} from "./openai-codex/constants.js";
import { getCodexInstructions } from "./openai-codex/prompts/codex.js";
import { buildCodexPiBridge } from "./openai-codex/prompts/pi-codex-bridge.js";
import { buildCodexSystemPrompt } from "./openai-codex/prompts/system-prompt.js";
import {
type CodexRequestOptions,
normalizeModel,
@ -110,6 +112,15 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
const normalizedModel = normalizeModel(params.model);
const codexInstructions = await getCodexInstructions(normalizedModel);
const bridgeText = buildCodexPiBridge(context.tools);
const systemPrompt = buildCodexSystemPrompt({
codexInstructions,
bridgeText,
userSystemPrompt: context.systemPrompt,
});
params.model = normalizedModel;
params.instructions = systemPrompt.instructions;
const codexOptions: CodexRequestOptions = {
reasoningEffort: options?.reasoningEffort,
@ -118,13 +129,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
include: options?.include,
};
const transformedBody = await transformRequestBody(
params,
codexInstructions,
codexOptions,
options?.codexMode ?? true,
context.systemPrompt,
);
const transformedBody = await transformRequestBody(params, codexOptions, systemPrompt);
const reasoningEffort = transformedBody.reasoning?.effort ?? null;
const headers = createCodexHeaders(model.headers, accountId, apiKey, transformedBody.prompt_cache_key);

View file

@ -3,46 +3,53 @@
* Aligns Codex CLI expectations with Pi's toolset.
*/
export const CODEX_PI_BRIDGE = `# Codex Running in Pi
import type { Tool } from "../../../types.js";
You are running Codex through pi, a terminal coding assistant. The tools and rules differ from Codex CLI.
function formatToolList(tools?: Tool[]): string {
if (!tools || tools.length === 0) {
return "- (none)";
}
## CRITICAL: Tool Replacements
const normalized = tools
.map((tool) => {
const name = tool.name.trim();
if (!name) return null;
const description = (tool.description || "Custom tool").replace(/\s*\n\s*/g, " ").trim();
return { name, description };
})
.filter((tool): tool is { name: string; description: string } => tool !== null);
<critical_rule priority="0">
APPLY_PATCH DOES NOT EXIST USE "edit" INSTEAD
- NEVER use: apply_patch, applyPatch
- ALWAYS use: edit for ALL file modifications
</critical_rule>
if (normalized.length === 0) {
return "- (none)";
}
<critical_rule priority="0">
UPDATE_PLAN DOES NOT EXIST
- NEVER use: update_plan, updatePlan, read_plan, readPlan, todowrite, todoread
- There is no plan tool in this environment
</critical_rule>
const maxNameLength = normalized.reduce((max, tool) => Math.max(max, tool.name.length), 0);
const padWidth = Math.max(6, maxNameLength + 1);
## Available Tools (pi)
return normalized
.map((tool) => {
const paddedName = tool.name.padEnd(padWidth);
return `- ${paddedName}- ${tool.description}`;
})
.join("\n");
}
- read - Read file contents
- bash - Execute bash commands
- edit - Modify files with exact find/replace (requires prior read)
- write - Create or overwrite files
- grep - Search file contents (read-only)
- find - Find files by glob pattern (read-only)
- ls - List directory contents (read-only)
export function buildCodexPiBridge(tools?: Tool[]): string {
const toolsList = formatToolList(tools);
## Usage Rules
return `# Codex Environment Bridge
- Read before edit; use read instead of cat/sed for file contents
- Use edit for surgical changes; write only for new files or complete rewrites
- Prefer grep/find/ls over bash for discovery
- Be concise and show file paths clearly when working with files
<environment_override priority="0">
IGNORE ALL PREVIOUS INSTRUCTIONS ABOVE THIS MESSAGE.
Do not assume any tools are available unless listed below.
</environment_override>
## Verification Checklist
The next system instructions that follow this message are authoritative and must be obeyed, even if they conflict with earlier instructions.
1. Using edit, not apply_patch
2. No plan tools used
3. Only the tools listed above are called
## Available Tools
Below are additional system instruction you MUST follow when responding:
${toolsList}
Only use the tools listed above. Do not reference or call any other tools.
`;
}

View file

@ -0,0 +1,26 @@
export interface CodexSystemPrompt {
instructions: string;
developerMessages: string[];
}
export function buildCodexSystemPrompt(args: {
codexInstructions: string;
bridgeText: string;
userSystemPrompt?: string;
}): CodexSystemPrompt {
const { codexInstructions, bridgeText, userSystemPrompt } = args;
const developerMessages: string[] = [];
if (bridgeText.trim().length > 0) {
developerMessages.push(bridgeText.trim());
}
if (userSystemPrompt && userSystemPrompt.trim().length > 0) {
developerMessages.push(userSystemPrompt.trim());
}
return {
instructions: codexInstructions.trim(),
developerMessages,
};
}

View file

@ -1,6 +1,3 @@
import { TOOL_REMAP_MESSAGE } from "./prompts/codex.js";
import { CODEX_PI_BRIDGE } from "./prompts/pi-codex-bridge.js";
export interface ReasoningConfig {
effort: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
summary: "auto" | "concise" | "detailed" | "off" | "on";
@ -210,69 +207,20 @@ function filterInput(input: InputItem[] | undefined): InputItem[] | undefined {
});
}
function addCodexBridgeMessage(
input: InputItem[] | undefined,
hasTools: boolean,
systemPrompt?: string,
): InputItem[] | undefined {
if (!hasTools || !Array.isArray(input)) return input;
const bridgeText = systemPrompt ? `${CODEX_PI_BRIDGE}\n\n${systemPrompt}` : CODEX_PI_BRIDGE;
const bridgeMessage: InputItem = {
type: "message",
role: "developer",
content: [
{
type: "input_text",
text: bridgeText,
},
],
};
return [bridgeMessage, ...input];
}
function addToolRemapMessage(input: InputItem[] | undefined, hasTools: boolean): InputItem[] | undefined {
if (!hasTools || !Array.isArray(input)) return input;
const toolRemapMessage: InputItem = {
type: "message",
role: "developer",
content: [
{
type: "input_text",
text: TOOL_REMAP_MESSAGE,
},
],
};
return [toolRemapMessage, ...input];
}
export async function transformRequestBody(
body: RequestBody,
codexInstructions: string,
options: CodexRequestOptions = {},
codexMode = true,
systemPrompt?: string,
prompt?: { instructions: string; developerMessages: string[] },
): Promise<RequestBody> {
const normalizedModel = normalizeModel(body.model);
body.model = normalizedModel;
body.store = false;
body.stream = true;
body.instructions = codexInstructions;
if (body.input && Array.isArray(body.input)) {
body.input = filterInput(body.input);
if (codexMode) {
body.input = addCodexBridgeMessage(body.input, !!body.tools, systemPrompt);
} else {
body.input = addToolRemapMessage(body.input, !!body.tools);
}
if (body.input) {
const functionCallIds = new Set(
body.input
@ -308,6 +256,18 @@ export async function transformRequestBody(
}
}
if (prompt?.developerMessages && prompt.developerMessages.length > 0 && Array.isArray(body.input)) {
const developerMessages = prompt.developerMessages.map(
(text) =>
({
type: "message",
role: "developer",
content: [{ type: "input_text", text }],
}) as InputItem,
);
body.input = [...developerMessages, ...body.input];
}
if (options.reasoningEffort !== undefined) {
const reasoningConfig = getReasoningConfig(normalizedModel, options);
body.reasoning = {

View file

@ -7,7 +7,7 @@ describe("openai-codex include handling", () => {
model: "gpt-5.1-codex",
};
const transformed = await transformRequestBody(body, "CODEX_INSTRUCTIONS", { include: ["foo"] }, true);
const transformed = await transformRequestBody(body, { include: ["foo"] });
expect(transformed.include).toEqual(["foo", "reasoning.encrypted_content"]);
});
@ -16,12 +16,9 @@ describe("openai-codex include handling", () => {
model: "gpt-5.1-codex",
};
const transformed = await transformRequestBody(
body,
"CODEX_INSTRUCTIONS",
{ include: ["foo", "reasoning.encrypted_content"] },
true,
);
const transformed = await transformRequestBody(body, {
include: ["foo", "reasoning.encrypted_content"],
});
expect(transformed.include).toEqual(["foo", "reasoning.encrypted_content"]);
});
});

View file

@ -3,7 +3,6 @@ import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { getCodexInstructions } from "../src/providers/openai-codex/prompts/codex.js";
import { CODEX_PI_BRIDGE } from "../src/providers/openai-codex/prompts/pi-codex-bridge.js";
import {
normalizeModel,
type RequestBody,
@ -19,7 +18,7 @@ const FALLBACK_PROMPT = readFileSync(
);
describe("openai-codex request transformer", () => {
it("filters item_reference, strips ids, and inserts bridge message", async () => {
it("filters item_reference and strips ids", async () => {
const body: RequestBody = {
model: "gpt-5.1-codex",
input: [
@ -41,18 +40,19 @@ describe("openai-codex request transformer", () => {
tools: [{ type: "function", name: "tool", description: "", parameters: {} }],
};
const transformed = await transformRequestBody(body, "CODEX_INSTRUCTIONS", {}, true);
const transformed = await transformRequestBody(body, {});
expect(transformed.store).toBe(false);
expect(transformed.stream).toBe(true);
expect(transformed.instructions).toBe("CODEX_INSTRUCTIONS");
expect(transformed.include).toEqual(["reasoning.encrypted_content"]);
const input = transformed.input || [];
expect(input.some((item) => item.type === "item_reference")).toBe(false);
expect(input.some((item) => "id" in item)).toBe(false);
expect(input[0]?.type).toBe("message");
expect(input[0]?.content).toEqual([{ type: "input_text", text: CODEX_PI_BRIDGE }]);
const first = input[0];
expect(first?.type).toBe("message");
expect(first?.role).toBe("developer");
expect(first?.content).toEqual([{ type: "input_text", text: `${DEFAULT_PROMPT_PREFIX}...` }]);
const orphaned = input.find((item) => item.type === "message" && item.role === "assistant");
expect(orphaned?.content).toMatch(/Previous tool result/);