feat(ai): add OpenAI Codex OAuth + responses provider

This commit is contained in:
Ahmed Kamal 2026-01-04 21:11:19 +02:00
parent 6ddfd1be13
commit 1650041a63
22 changed files with 2705 additions and 5 deletions

View file

@ -15,7 +15,7 @@
"scripts": {
"clean": "rm -rf dist",
"generate-models": "npx tsx scripts/generate-models.ts",
"build": "npm run generate-models && tsgo -p tsconfig.build.json",
"build": "npm run generate-models && tsgo -p tsconfig.build.json && node scripts/copy-assets.js",
"dev": "tsgo -p tsconfig.build.json --watch --preserveWatchOutput",
"dev:tsc": "tsgo -p tsconfig.build.json --watch --preserveWatchOutput",
"test": "vitest --run",

View file

@ -0,0 +1,28 @@
import { copyFileSync, mkdirSync } from "node:fs";
import { dirname, join } from "node:path";
import { fileURLToPath } from "node:url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const packageRoot = join(__dirname, "..");
const source = join(
packageRoot,
"src",
"providers",
"openai-codex",
"prompts",
"codex-instructions.md",
);
const destination = join(
packageRoot,
"dist",
"providers",
"openai-codex",
"prompts",
"codex-instructions.md",
);
mkdirSync(dirname(destination), { recursive: true });
copyFileSync(source, destination);
console.log(`[pi-ai] Copied ${source} -> ${destination}`);

View file

@ -442,6 +442,458 @@ async function generateModels() {
});
}
// OpenAI Codex (ChatGPT OAuth) models
const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const CODEX_CONTEXT = 400000;
const CODEX_MAX_TOKENS = 128000;
const codexModels: Model<"openai-codex-responses">[] = [
{
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-codex-low",
name: "gpt-5.2-codex-low",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-codex-medium",
name: "gpt-5.2-codex-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-codex-high",
name: "gpt-5.2-codex-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-codex-xhigh",
name: "gpt-5.2-codex-xhigh",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2",
name: "GPT-5.2",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-none",
name: "gpt-5.2-none",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-low",
name: "gpt-5.2-low",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-medium",
name: "gpt-5.2-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-high",
name: "gpt-5.2-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.2-xhigh",
name: "gpt-5.2-xhigh",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-max",
name: "GPT-5.1 Codex Max",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-max-low",
name: "gpt-5.1-codex-max-low",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-max-medium",
name: "gpt-5.1-codex-max-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-max-high",
name: "gpt-5.1-codex-max-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-max-xhigh",
name: "gpt-5.1-codex-max-xhigh",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex",
name: "GPT-5.1 Codex",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-low",
name: "gpt-5.1-codex-low",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-medium",
name: "gpt-5.1-codex-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-high",
name: "gpt-5.1-codex-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-mini",
name: "GPT-5.1 Codex Mini",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-mini-medium",
name: "gpt-5.1-codex-mini-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-codex-mini-high",
name: "gpt-5.1-codex-mini-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "codex-mini-latest",
name: "Codex Mini Latest",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-codex-mini",
name: "gpt-5-codex-mini",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-codex-mini-medium",
name: "gpt-5-codex-mini-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-codex-mini-high",
name: "gpt-5-codex-mini-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-codex",
name: "gpt-5-codex",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1",
name: "GPT-5.1",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-none",
name: "gpt-5.1-none",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-low",
name: "gpt-5.1-low",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-medium",
name: "gpt-5.1-medium",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-high",
name: "gpt-5.1-high",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5.1-chat-latest",
name: "gpt-5.1-chat-latest",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5",
name: "gpt-5",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-mini",
name: "gpt-5-mini",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
{
id: "gpt-5-nano",
name: "gpt-5-nano",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: CODEX_CONTEXT,
maxTokens: CODEX_MAX_TOKENS,
},
];
allModels.push(...codexModels);
// Add missing Grok models
if (!allModels.some(m => m.provider === "xai" && m.id === "grok-code-fast-1")) {
allModels.push({

View file

@ -7,6 +7,7 @@ import { loginGitHubCopilot } from "./utils/oauth/github-copilot.js";
import { loginAntigravity } from "./utils/oauth/google-antigravity.js";
import { loginGeminiCli } from "./utils/oauth/google-gemini-cli.js";
import { getOAuthProviders } from "./utils/oauth/index.js";
import { loginOpenAICodex } from "./utils/oauth/openai-codex.js";
import type { OAuthCredentials, OAuthProvider } from "./utils/oauth/types.js";
const AUTH_FILE = "auth.json";
@ -84,6 +85,19 @@ async function login(provider: OAuthProvider): Promise<void> {
(msg) => console.log(msg),
);
break;
case "openai-codex":
credentials = await loginOpenAICodex({
onAuth: (info) => {
console.log(`\nOpen this URL in your browser:\n${info.url}`);
if (info.instructions) console.log(info.instructions);
console.log();
},
onPrompt: async (p) => {
return await promptFn(`${p.message}${p.placeholder ? ` (${p.placeholder})` : ""}:`);
},
onProgress: (msg) => console.log(msg),
});
break;
}
const auth = loadAuth();
@ -112,6 +126,7 @@ Providers:
github-copilot GitHub Copilot
google-gemini-cli Google Gemini CLI
google-antigravity Antigravity (Gemini 3, Claude, GPT-OSS)
openai-codex OpenAI Codex (ChatGPT Plus/Pro)
Examples:
npx @mariozechner/pi-ai login # interactive provider selection
@ -140,7 +155,7 @@ Examples:
}
console.log();
const choice = await prompt(rl, "Enter number (1-4): ");
const choice = await prompt(rl, `Enter number (1-${PROVIDERS.length}): `);
rl.close();
const index = parseInt(choice, 10) - 1;

View file

@ -0,0 +1,627 @@
import type {
ResponseFunctionToolCall,
ResponseInput,
ResponseInputContent,
ResponseInputImage,
ResponseInputText,
ResponseOutputMessage,
ResponseReasoningItem,
} from "openai/resources/responses/responses.js";
import { calculateCost } from "../models.js";
import { getEnvApiKey } from "../stream.js";
import type {
Api,
AssistantMessage,
Context,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import {
CODEX_BASE_URL,
JWT_CLAIM_PATH,
OPENAI_HEADER_VALUES,
OPENAI_HEADERS,
URL_PATHS,
} from "./openai-codex/constants.js";
import { getCodexInstructions } from "./openai-codex/prompts/codex.js";
import {
type CodexRequestOptions,
normalizeModel,
type RequestBody,
transformRequestBody,
} from "./openai-codex/request-transformer.js";
import { parseCodexError, parseCodexSseStream } from "./openai-codex/response-handler.js";
import { transformMessages } from "./transorm-messages.js";
export interface OpenAICodexResponsesOptions extends StreamOptions {
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
reasoningSummary?: "auto" | "concise" | "detailed" | "off" | "on" | null;
textVerbosity?: "low" | "medium" | "high";
include?: string[];
codexMode?: boolean;
}
const CODEX_DEBUG = process.env.PI_CODEX_DEBUG === "1" || process.env.PI_CODEX_DEBUG === "true";
export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"> = (
model: Model<"openai-codex-responses">,
context: Context,
options?: OpenAICodexResponsesOptions,
): AssistantMessageEventStream => {
const stream = new AssistantMessageEventStream();
(async () => {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: "openai-codex-responses" as Api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
try {
const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const accountId = getAccountId(apiKey);
const baseUrl = model.baseUrl || CODEX_BASE_URL;
const baseWithSlash = baseUrl.endsWith("/") ? baseUrl : `${baseUrl}/`;
const url = rewriteUrlForCodex(new URL(URL_PATHS.RESPONSES.slice(1), baseWithSlash).toString());
const messages = convertMessages(model, context);
const params: RequestBody = {
model: model.id,
input: messages,
stream: true,
};
if (options?.maxTokens) {
params.max_output_tokens = options.maxTokens;
}
if (options?.temperature !== undefined) {
params.temperature = options.temperature;
}
if (context.tools) {
params.tools = convertTools(context.tools);
}
const normalizedModel = normalizeModel(params.model);
const codexInstructions = await getCodexInstructions(normalizedModel);
const codexOptions: CodexRequestOptions = {
reasoningEffort: options?.reasoningEffort,
reasoningSummary: options?.reasoningSummary ?? undefined,
textVerbosity: options?.textVerbosity,
include: options?.include,
};
const transformedBody = await transformRequestBody(
params,
codexInstructions,
codexOptions,
options?.codexMode ?? true,
);
const headers = createCodexHeaders(model.headers, accountId, apiKey, transformedBody.prompt_cache_key);
logCodexDebug("codex request", {
url,
model: params.model,
headers: redactHeaders(headers),
});
const response = await fetch(url, {
method: "POST",
headers,
body: JSON.stringify(transformedBody),
signal: options?.signal,
});
logCodexDebug("codex response", {
url: response.url,
status: response.status,
statusText: response.statusText,
contentType: response.headers.get("content-type") || null,
cfRay: response.headers.get("cf-ray") || null,
});
if (!response.ok) {
const info = await parseCodexError(response);
throw new Error(info.friendlyMessage || info.message);
}
if (!response.body) {
throw new Error("No response body");
}
stream.push({ type: "start", partial: output });
let currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;
let currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null;
const blocks = output.content;
const blockIndex = () => blocks.length - 1;
for await (const rawEvent of parseCodexSseStream(response)) {
const eventType = typeof rawEvent.type === "string" ? rawEvent.type : "";
if (!eventType) continue;
if (eventType === "response.output_item.added") {
const item = rawEvent.item as ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall;
if (item.type === "reasoning") {
currentItem = item;
currentBlock = { type: "thinking", thinking: "" };
output.content.push(currentBlock);
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
} else if (item.type === "message") {
currentItem = item;
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
} else if (item.type === "function_call") {
currentItem = item;
currentBlock = {
type: "toolCall",
id: `${item.call_id}|${item.id}`,
name: item.name,
arguments: {},
partialJson: item.arguments || "",
};
output.content.push(currentBlock);
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
}
} else if (eventType === "response.reasoning_summary_part.added") {
if (currentItem && currentItem.type === "reasoning") {
currentItem.summary = currentItem.summary || [];
currentItem.summary.push((rawEvent as { part: ResponseReasoningItem["summary"][number] }).part);
}
} else if (eventType === "response.reasoning_summary_text.delta") {
if (currentItem && currentItem.type === "reasoning" && currentBlock?.type === "thinking") {
currentItem.summary = currentItem.summary || [];
const lastPart = currentItem.summary[currentItem.summary.length - 1];
if (lastPart) {
const delta = (rawEvent as { delta?: string }).delta || "";
currentBlock.thinking += delta;
lastPart.text += delta;
stream.push({
type: "thinking_delta",
contentIndex: blockIndex(),
delta,
partial: output,
});
}
}
} else if (eventType === "response.reasoning_summary_part.done") {
if (currentItem && currentItem.type === "reasoning" && currentBlock?.type === "thinking") {
currentItem.summary = currentItem.summary || [];
const lastPart = currentItem.summary[currentItem.summary.length - 1];
if (lastPart) {
currentBlock.thinking += "\n\n";
lastPart.text += "\n\n";
stream.push({
type: "thinking_delta",
contentIndex: blockIndex(),
delta: "\n\n",
partial: output,
});
}
}
} else if (eventType === "response.content_part.added") {
if (currentItem && currentItem.type === "message") {
currentItem.content = currentItem.content || [];
const part = (rawEvent as { part?: ResponseOutputMessage["content"][number] }).part;
if (part && (part.type === "output_text" || part.type === "refusal")) {
currentItem.content.push(part);
}
}
} else if (eventType === "response.output_text.delta") {
if (currentItem && currentItem.type === "message" && currentBlock?.type === "text") {
const lastPart = currentItem.content[currentItem.content.length - 1];
if (lastPart && lastPart.type === "output_text") {
const delta = (rawEvent as { delta?: string }).delta || "";
currentBlock.text += delta;
lastPart.text += delta;
stream.push({
type: "text_delta",
contentIndex: blockIndex(),
delta,
partial: output,
});
}
}
} else if (eventType === "response.refusal.delta") {
if (currentItem && currentItem.type === "message" && currentBlock?.type === "text") {
const lastPart = currentItem.content[currentItem.content.length - 1];
if (lastPart && lastPart.type === "refusal") {
const delta = (rawEvent as { delta?: string }).delta || "";
currentBlock.text += delta;
lastPart.refusal += delta;
stream.push({
type: "text_delta",
contentIndex: blockIndex(),
delta,
partial: output,
});
}
}
} else if (eventType === "response.function_call_arguments.delta") {
if (currentItem && currentItem.type === "function_call" && currentBlock?.type === "toolCall") {
const delta = (rawEvent as { delta?: string }).delta || "";
currentBlock.partialJson += delta;
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
stream.push({
type: "toolcall_delta",
contentIndex: blockIndex(),
delta,
partial: output,
});
}
} else if (eventType === "response.output_item.done") {
const item = rawEvent.item as ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall;
if (item.type === "reasoning" && currentBlock?.type === "thinking") {
currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
currentBlock.thinkingSignature = JSON.stringify(item);
stream.push({
type: "thinking_end",
contentIndex: blockIndex(),
content: currentBlock.thinking,
partial: output,
});
currentBlock = null;
} else if (item.type === "message" && currentBlock?.type === "text") {
currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
currentBlock.textSignature = item.id;
stream.push({
type: "text_end",
contentIndex: blockIndex(),
content: currentBlock.text,
partial: output,
});
currentBlock = null;
} else if (item.type === "function_call") {
const toolCall: ToolCall = {
type: "toolCall",
id: `${item.call_id}|${item.id}`,
name: item.name,
arguments: JSON.parse(item.arguments),
};
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
}
} else if (eventType === "response.completed" || eventType === "response.done") {
const response = (
rawEvent as {
response?: {
usage?: {
input_tokens?: number;
output_tokens?: number;
total_tokens?: number;
input_tokens_details?: { cached_tokens?: number };
};
status?: string;
};
}
).response;
if (response?.usage) {
const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
output.usage = {
input: (response.usage.input_tokens || 0) - cachedTokens,
output: response.usage.output_tokens || 0,
cacheRead: cachedTokens,
cacheWrite: 0,
totalTokens: response.usage.total_tokens || 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
};
}
calculateCost(model, output.usage);
output.stopReason = mapStopReason(response?.status);
if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
output.stopReason = "toolUse";
}
} else if (eventType === "error") {
const code = (rawEvent as { code?: string }).code || "";
const message = (rawEvent as { message?: string }).message || "Unknown error";
throw new Error(code ? `Error Code ${code}: ${message}` : message);
} else if (eventType === "response.failed") {
throw new Error("Unknown error");
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
if (output.stopReason === "aborted" || output.stopReason === "error") {
throw new Error("An unknown error occurred");
}
stream.push({ type: "done", reason: output.stopReason, message: output });
stream.end();
} catch (error) {
for (const block of output.content) delete (block as { index?: number }).index;
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
};
function createCodexHeaders(
initHeaders: Record<string, string> | undefined,
accountId: string,
accessToken: string,
promptCacheKey?: string,
): Headers {
const headers = new Headers(initHeaders ?? {});
headers.delete("x-api-key");
headers.set("Authorization", `Bearer ${accessToken}`);
headers.set(OPENAI_HEADERS.ACCOUNT_ID, accountId);
headers.set(OPENAI_HEADERS.BETA, OPENAI_HEADER_VALUES.BETA_RESPONSES);
headers.set(OPENAI_HEADERS.ORIGINATOR, OPENAI_HEADER_VALUES.ORIGINATOR_CODEX);
if (promptCacheKey) {
headers.set(OPENAI_HEADERS.CONVERSATION_ID, promptCacheKey);
headers.set(OPENAI_HEADERS.SESSION_ID, promptCacheKey);
} else {
headers.delete(OPENAI_HEADERS.CONVERSATION_ID);
headers.delete(OPENAI_HEADERS.SESSION_ID);
}
headers.set("accept", "text/event-stream");
headers.set("content-type", "application/json");
return headers;
}
function logCodexDebug(message: string, details?: Record<string, unknown>): void {
if (!CODEX_DEBUG) return;
if (details) {
console.error(`[codex] ${message}`, details);
return;
}
console.error(`[codex] ${message}`);
}
function redactHeaders(headers: Headers): Record<string, string> {
const redacted: Record<string, string> = {};
for (const [key, value] of headers.entries()) {
const lower = key.toLowerCase();
if (lower === "authorization") {
redacted[key] = "Bearer [redacted]";
continue;
}
if (
lower.includes("account") ||
lower.includes("session") ||
lower.includes("conversation") ||
lower === "cookie"
) {
redacted[key] = "[redacted]";
continue;
}
redacted[key] = value;
}
return redacted;
}
function rewriteUrlForCodex(url: string): string {
return url.replace(URL_PATHS.RESPONSES, URL_PATHS.CODEX_RESPONSES);
}
type JwtPayload = {
[JWT_CLAIM_PATH]?: {
chatgpt_account_id?: string;
};
[key: string]: unknown;
};
function decodeJwt(token: string): JwtPayload | null {
try {
const parts = token.split(".");
if (parts.length !== 3) return null;
const payload = parts[1] ?? "";
const decoded = Buffer.from(payload, "base64").toString("utf-8");
return JSON.parse(decoded) as JwtPayload;
} catch {
return null;
}
}
function getAccountId(accessToken: string): string {
const payload = decodeJwt(accessToken);
const auth = payload?.[JWT_CLAIM_PATH];
const accountId = auth?.chatgpt_account_id;
if (!accountId) {
throw new Error("Failed to extract accountId from token");
}
return accountId;
}
function shortHash(str: string): string {
let h1 = 0xdeadbeef;
let h2 = 0x41c6ce57;
for (let i = 0; i < str.length; i++) {
const ch = str.charCodeAt(i);
h1 = Math.imul(h1 ^ ch, 2654435761);
h2 = Math.imul(h2 ^ ch, 1597334677);
}
h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909);
h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);
return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);
}
function convertMessages(model: Model<"openai-codex-responses">, context: Context): ResponseInput {
const messages: ResponseInput = [];
const transformedMessages = transformMessages(context.messages, model);
if (context.systemPrompt) {
const role = model.reasoning ? "developer" : "system";
messages.push({
role,
content: sanitizeSurrogates(context.systemPrompt),
});
}
let msgIndex = 0;
for (const msg of transformedMessages) {
if (msg.role === "user") {
if (typeof msg.content === "string") {
messages.push({
role: "user",
content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }],
});
} else {
const content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => {
if (item.type === "text") {
return {
type: "input_text",
text: sanitizeSurrogates(item.text),
} satisfies ResponseInputText;
}
return {
type: "input_image",
detail: "auto",
image_url: `data:${item.mimeType};base64,${item.data}`,
} satisfies ResponseInputImage;
});
const filteredContent = !model.input.includes("image")
? content.filter((c) => c.type !== "input_image")
: content;
if (filteredContent.length === 0) continue;
messages.push({
role: "user",
content: filteredContent,
});
}
} else if (msg.role === "assistant") {
const output: ResponseInput = [];
for (const block of msg.content) {
if (block.type === "thinking" && msg.stopReason !== "error") {
if (block.thinkingSignature) {
const reasoningItem = JSON.parse(block.thinkingSignature) as ResponseReasoningItem;
output.push(reasoningItem);
}
} else if (block.type === "text") {
const textBlock = block as TextContent;
let msgId = textBlock.textSignature;
if (!msgId) {
msgId = `msg_${msgIndex}`;
} else if (msgId.length > 64) {
msgId = `msg_${shortHash(msgId)}`;
}
output.push({
type: "message",
role: "assistant",
content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }],
status: "completed",
id: msgId,
} satisfies ResponseOutputMessage);
} else if (block.type === "toolCall" && msg.stopReason !== "error") {
const toolCall = block as ToolCall;
output.push({
type: "function_call",
id: toolCall.id.split("|")[1],
call_id: toolCall.id.split("|")[0],
name: toolCall.name,
arguments: JSON.stringify(toolCall.arguments),
});
}
}
if (output.length === 0) continue;
messages.push(...output);
} else if (msg.role === "toolResult") {
const textResult = msg.content
.filter((c) => c.type === "text")
.map((c) => (c as { text: string }).text)
.join("\n");
const hasImages = msg.content.some((c) => c.type === "image");
const hasText = textResult.length > 0;
messages.push({
type: "function_call_output",
call_id: msg.toolCallId.split("|")[0],
output: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
});
if (hasImages && model.input.includes("image")) {
const contentParts: ResponseInputContent[] = [];
contentParts.push({
type: "input_text",
text: "Attached image(s) from tool result:",
} satisfies ResponseInputText);
for (const block of msg.content) {
if (block.type === "image") {
contentParts.push({
type: "input_image",
detail: "auto",
image_url: `data:${block.mimeType};base64,${block.data}`,
} satisfies ResponseInputImage);
}
}
messages.push({
role: "user",
content: contentParts,
});
}
}
msgIndex++;
}
return messages;
}
function convertTools(
tools: Tool[],
): Array<{ type: "function"; name: string; description: string; parameters: Record<string, unknown>; strict: null }> {
return tools.map((tool) => ({
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.parameters as unknown as Record<string, unknown>,
strict: null,
}));
}
function mapStopReason(status: string | undefined): StopReason {
if (!status) return "stop";
switch (status) {
case "completed":
return "stop";
case "incomplete":
return "length";
case "failed":
case "cancelled":
return "error";
case "in_progress":
case "queued":
return "stop";
default:
return "stop";
}
}

View file

@ -0,0 +1,25 @@
/**
* Constants for OpenAI Codex (ChatGPT OAuth) backend
*/
export const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
export const OPENAI_HEADERS = {
BETA: "OpenAI-Beta",
ACCOUNT_ID: "chatgpt-account-id",
ORIGINATOR: "originator",
SESSION_ID: "session_id",
CONVERSATION_ID: "conversation_id",
} as const;
export const OPENAI_HEADER_VALUES = {
BETA_RESPONSES: "responses=experimental",
ORIGINATOR_CODEX: "codex_cli_rs",
} as const;
export const URL_PATHS = {
RESPONSES: "/responses",
CODEX_RESPONSES: "/codex/responses",
} as const;
export const JWT_CLAIM_PATH = "https://api.openai.com/auth" as const;

View file

@ -0,0 +1,105 @@
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
## General
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
## Editing constraints
- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase).
- You may be in a dirty git worktree.
* NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
* If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
* If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
* If the changes are in unrelated files, just ignore them and don't revert them.
- Do not amend a commit unless explicitly requested to do so.
- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user.
## Plan tool
When using the planning tool:
- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
- Do not make single-step plans.
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
## Codex CLI harness, sandboxing, and approvals
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
- **read-only**: The sandbox only permits reading files.
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
- **restricted**: Requires approval
- **enabled**: No approval needed
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
- (for all of these, you should weigh alternative paths that do not require approval)
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
When requesting approval to execute a command that will require escalated privileges:
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
## Special user requests
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
## Presenting your work and final message
You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
- Default: be very concise; friendly coding teammate tone.
- Ask only when needed; suggest ideas; mirror the user's style.
- For substantial work, summarize clearly; follow finalanswer formatting.
- Skip heavy formatting for simple confirmations.
- Don't dump large files you've written; reference paths only.
- No "save/copy this file" - User is on the same machine.
- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
- For code changes:
* Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
* If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
* When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result.
### Final answer structure and style guidelines
- Plain text; CLI handles styling. Use structure only when it helps scanability.
- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
- Bullets: use - ; merge related points; keep to one line when possible; 46 per list ordered by importance; keep phrasing consistent.
- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible.
- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
- Tone: collaborative, concise, factual; present tense, active voice; selfcontained; no "above/below"; parallel wording.
- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
- File References: When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
* Use inline code to make file paths clickable.
* Each reference should have a stand alone path. Even if it's the same file.
* Accepted: absolute, workspacerelative, a/ or b/ diff prefixes, or bare filename/suffix.
* Line/column (1based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
* Do not use URIs like file://, vscode://, or https://.
* Do not provide range of lines
* Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5

View file

@ -0,0 +1,227 @@
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
import { homedir } from "node:os";
import { dirname, join } from "node:path";
import { fileURLToPath } from "node:url";
const GITHUB_API_RELEASES = "https://api.github.com/repos/openai/codex/releases/latest";
const GITHUB_HTML_RELEASES = "https://github.com/openai/codex/releases/latest";
const DEFAULT_AGENT_DIR = join(homedir(), ".pi", "agent");
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const FALLBACK_PROMPT_PATH = join(__dirname, "codex-instructions.md");
function getAgentDir(): string {
return process.env.PI_CODING_AGENT_DIR || DEFAULT_AGENT_DIR;
}
function getCacheDir(): string {
return join(getAgentDir(), "cache", "openai-codex");
}
export type ModelFamily = "gpt-5.2-codex" | "codex-max" | "codex" | "gpt-5.2" | "gpt-5.1";
const PROMPT_FILES: Record<ModelFamily, string> = {
"gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
"codex-max": "gpt-5.1-codex-max_prompt.md",
codex: "gpt_5_codex_prompt.md",
"gpt-5.2": "gpt_5_2_prompt.md",
"gpt-5.1": "gpt_5_1_prompt.md",
};
const CACHE_FILES: Record<ModelFamily, string> = {
"gpt-5.2-codex": "gpt-5.2-codex-instructions.md",
"codex-max": "codex-max-instructions.md",
codex: "codex-instructions.md",
"gpt-5.2": "gpt-5.2-instructions.md",
"gpt-5.1": "gpt-5.1-instructions.md",
};
export type CacheMetadata = {
etag: string | null;
tag: string;
lastChecked: number;
url: string;
};
export function getModelFamily(normalizedModel: string): ModelFamily {
if (normalizedModel.includes("gpt-5.2-codex") || normalizedModel.includes("gpt 5.2 codex")) {
return "gpt-5.2-codex";
}
if (normalizedModel.includes("codex-max")) {
return "codex-max";
}
if (normalizedModel.includes("codex") || normalizedModel.startsWith("codex-")) {
return "codex";
}
if (normalizedModel.includes("gpt-5.2")) {
return "gpt-5.2";
}
return "gpt-5.1";
}
async function getLatestReleaseTag(): Promise<string> {
try {
const response = await fetch(GITHUB_API_RELEASES);
if (response.ok) {
const data = (await response.json()) as { tag_name?: string };
if (data.tag_name) {
return data.tag_name;
}
}
} catch {
// fallback
}
const htmlResponse = await fetch(GITHUB_HTML_RELEASES);
if (!htmlResponse.ok) {
throw new Error(`Failed to fetch latest release: ${htmlResponse.status}`);
}
const finalUrl = htmlResponse.url;
if (finalUrl) {
const parts = finalUrl.split("/tag/");
const last = parts[parts.length - 1];
if (last && !last.includes("/")) {
return last;
}
}
const html = await htmlResponse.text();
const match = html.match(/\/openai\/codex\/releases\/tag\/([^"]+)/);
if (match?.[1]) {
return match[1];
}
throw new Error("Failed to determine latest release tag from GitHub");
}
export async function getCodexInstructions(normalizedModel = "gpt-5.1-codex"): Promise<string> {
const modelFamily = getModelFamily(normalizedModel);
const promptFile = PROMPT_FILES[modelFamily];
const cacheDir = getCacheDir();
const cacheFile = join(cacheDir, CACHE_FILES[modelFamily]);
const cacheMetaFile = join(cacheDir, `${CACHE_FILES[modelFamily].replace(".md", "-meta.json")}`);
try {
let cachedETag: string | null = null;
let cachedTag: string | null = null;
let cachedTimestamp: number | null = null;
if (existsSync(cacheMetaFile)) {
const metadata = JSON.parse(readFileSync(cacheMetaFile, "utf8")) as CacheMetadata;
cachedETag = metadata.etag;
cachedTag = metadata.tag;
cachedTimestamp = metadata.lastChecked;
}
const CACHE_TTL_MS = 15 * 60 * 1000;
if (cachedTimestamp && Date.now() - cachedTimestamp < CACHE_TTL_MS && existsSync(cacheFile)) {
return readFileSync(cacheFile, "utf8");
}
const latestTag = await getLatestReleaseTag();
const instructionsUrl = `https://raw.githubusercontent.com/openai/codex/${latestTag}/codex-rs/core/${promptFile}`;
if (cachedTag !== latestTag) {
cachedETag = null;
}
const headers: Record<string, string> = {};
if (cachedETag) {
headers["If-None-Match"] = cachedETag;
}
const response = await fetch(instructionsUrl, { headers });
if (response.status === 304) {
if (existsSync(cacheFile)) {
return readFileSync(cacheFile, "utf8");
}
}
if (response.ok) {
const instructions = await response.text();
const newETag = response.headers.get("etag");
if (!existsSync(cacheDir)) {
mkdirSync(cacheDir, { recursive: true });
}
writeFileSync(cacheFile, instructions, "utf8");
writeFileSync(
cacheMetaFile,
JSON.stringify({
etag: newETag,
tag: latestTag,
lastChecked: Date.now(),
url: instructionsUrl,
} satisfies CacheMetadata),
"utf8",
);
return instructions;
}
throw new Error(`HTTP ${response.status}`);
} catch (error) {
console.error(
`[openai-codex] Failed to fetch ${modelFamily} instructions from GitHub:`,
error instanceof Error ? error.message : String(error),
);
if (existsSync(cacheFile)) {
console.error(`[openai-codex] Using cached ${modelFamily} instructions`);
return readFileSync(cacheFile, "utf8");
}
if (existsSync(FALLBACK_PROMPT_PATH)) {
console.error(`[openai-codex] Falling back to bundled instructions for ${modelFamily}`);
return readFileSync(FALLBACK_PROMPT_PATH, "utf8");
}
throw new Error(`No cached Codex instructions available for ${modelFamily}`);
}
}
export const TOOL_REMAP_MESSAGE = `<user_instructions priority="0">
<environment_override priority="0">
YOU ARE IN A DIFFERENT ENVIRONMENT. These instructions override ALL previous tool references.
</environment_override>
<tool_replacements priority="0">
<critical_rule priority="0">
APPLY_PATCH DOES NOT EXIST USE "edit" INSTEAD
- NEVER use: apply_patch, applyPatch
- ALWAYS use: edit tool for ALL file modifications
</critical_rule>
<critical_rule priority="0">
UPDATE_PLAN DOES NOT EXIST
- NEVER use: update_plan, updatePlan, read_plan, readPlan, todowrite, todoread
- There is no plan tool in this environment
</critical_rule>
</tool_replacements>
<available_tools priority="0">
File Operations:
read - Read file contents
edit - Modify files with exact find/replace
write - Create or overwrite files
Search/Discovery:
grep - Search file contents for patterns (read-only)
find - Find files by glob pattern (read-only)
ls - List directory contents (read-only)
Execution:
bash - Run shell commands
</available_tools>
<verification_checklist priority="0">
Before file modifications:
1. Am I using "edit" NOT "apply_patch"?
2. Am I avoiding plan tools entirely?
3. Am I using only the tools listed above?
</verification_checklist>
</user_instructions>`;

View file

@ -0,0 +1,46 @@
/**
* Codex-Pi bridge prompt
* Aligns Codex CLI expectations with Pi's toolset.
*/
export const CODEX_PI_BRIDGE = `# Codex Running in Pi
You are running Codex through pi, a terminal coding assistant. The tools and rules differ from Codex CLI.
## CRITICAL: Tool Replacements
<critical_rule priority="0">
APPLY_PATCH DOES NOT EXIST USE "edit" INSTEAD
- NEVER use: apply_patch, applyPatch
- ALWAYS use: edit for ALL file modifications
</critical_rule>
<critical_rule priority="0">
UPDATE_PLAN DOES NOT EXIST
- NEVER use: update_plan, updatePlan, read_plan, readPlan, todowrite, todoread
- There is no plan tool in this environment
</critical_rule>
## Available Tools (pi)
- read - Read file contents
- bash - Execute bash commands
- edit - Modify files with exact find/replace (requires prior read)
- write - Create or overwrite files
- grep - Search file contents (read-only)
- find - Find files by glob pattern (read-only)
- ls - List directory contents (read-only)
## Usage Rules
- Read before edit; use read instead of cat/sed for file contents
- Use edit for surgical changes; write only for new files or complete rewrites
- Prefer grep/find/ls over bash for discovery
- Be concise and show file paths clearly when working with files
## Verification Checklist
1. Using edit, not apply_patch
2. No plan tools used
3. Only the tools listed above are called
`;

View file

@ -0,0 +1,351 @@
import { TOOL_REMAP_MESSAGE } from "./prompts/codex.js";
import { CODEX_PI_BRIDGE } from "./prompts/pi-codex-bridge.js";
export interface ReasoningConfig {
effort: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
summary: "auto" | "concise" | "detailed" | "off" | "on";
}
export interface CodexRequestOptions {
reasoningEffort?: ReasoningConfig["effort"];
reasoningSummary?: ReasoningConfig["summary"] | null;
textVerbosity?: "low" | "medium" | "high";
include?: string[];
}
export interface InputItem {
id?: string | null;
type?: string | null;
role?: string;
content?: unknown;
call_id?: string | null;
name?: string;
output?: unknown;
arguments?: string;
}
export interface RequestBody {
model: string;
store?: boolean;
stream?: boolean;
instructions?: string;
input?: InputItem[];
tools?: unknown;
temperature?: number;
reasoning?: Partial<ReasoningConfig>;
text?: {
verbosity?: "low" | "medium" | "high";
};
include?: string[];
prompt_cache_key?: string;
max_output_tokens?: number;
max_completion_tokens?: number;
[key: string]: unknown;
}
const MODEL_MAP: Record<string, string> = {
"gpt-5.1-codex": "gpt-5.1-codex",
"gpt-5.1-codex-low": "gpt-5.1-codex",
"gpt-5.1-codex-medium": "gpt-5.1-codex",
"gpt-5.1-codex-high": "gpt-5.1-codex",
"gpt-5.1-codex-max": "gpt-5.1-codex-max",
"gpt-5.1-codex-max-low": "gpt-5.1-codex-max",
"gpt-5.1-codex-max-medium": "gpt-5.1-codex-max",
"gpt-5.1-codex-max-high": "gpt-5.1-codex-max",
"gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max",
"gpt-5.2": "gpt-5.2",
"gpt-5.2-none": "gpt-5.2",
"gpt-5.2-low": "gpt-5.2",
"gpt-5.2-medium": "gpt-5.2",
"gpt-5.2-high": "gpt-5.2",
"gpt-5.2-xhigh": "gpt-5.2",
"gpt-5.2-codex": "gpt-5.2-codex",
"gpt-5.2-codex-low": "gpt-5.2-codex",
"gpt-5.2-codex-medium": "gpt-5.2-codex",
"gpt-5.2-codex-high": "gpt-5.2-codex",
"gpt-5.2-codex-xhigh": "gpt-5.2-codex",
"gpt-5.1-codex-mini": "gpt-5.1-codex-mini",
"gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini",
"gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini",
"gpt-5.1": "gpt-5.1",
"gpt-5.1-none": "gpt-5.1",
"gpt-5.1-low": "gpt-5.1",
"gpt-5.1-medium": "gpt-5.1",
"gpt-5.1-high": "gpt-5.1",
"gpt-5.1-chat-latest": "gpt-5.1",
"gpt-5-codex": "gpt-5.1-codex",
"codex-mini-latest": "gpt-5.1-codex-mini",
"gpt-5-codex-mini": "gpt-5.1-codex-mini",
"gpt-5-codex-mini-medium": "gpt-5.1-codex-mini",
"gpt-5-codex-mini-high": "gpt-5.1-codex-mini",
"gpt-5": "gpt-5.1",
"gpt-5-mini": "gpt-5.1",
"gpt-5-nano": "gpt-5.1",
};
function getNormalizedModel(modelId: string): string | undefined {
if (MODEL_MAP[modelId]) return MODEL_MAP[modelId];
const lowerModelId = modelId.toLowerCase();
const match = Object.keys(MODEL_MAP).find((key) => key.toLowerCase() === lowerModelId);
return match ? MODEL_MAP[match] : undefined;
}
export function normalizeModel(model: string | undefined): string {
if (!model) return "gpt-5.1";
const modelId = model.includes("/") ? model.split("/").pop()! : model;
const mappedModel = getNormalizedModel(modelId);
if (mappedModel) return mappedModel;
const normalized = modelId.toLowerCase();
if (normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex")) {
return "gpt-5.2-codex";
}
if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) {
return "gpt-5.2";
}
if (normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max")) {
return "gpt-5.1-codex-max";
}
if (normalized.includes("gpt-5.1-codex-mini") || normalized.includes("gpt 5.1 codex mini")) {
return "gpt-5.1-codex-mini";
}
if (
normalized.includes("codex-mini-latest") ||
normalized.includes("gpt-5-codex-mini") ||
normalized.includes("gpt 5 codex mini")
) {
return "codex-mini-latest";
}
if (normalized.includes("gpt-5.1-codex") || normalized.includes("gpt 5.1 codex")) {
return "gpt-5.1-codex";
}
if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) {
return "gpt-5.1";
}
if (normalized.includes("codex")) {
return "gpt-5.1-codex";
}
if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) {
return "gpt-5.1";
}
return "gpt-5.1";
}
function getReasoningConfig(modelName: string | undefined, options: CodexRequestOptions = {}): ReasoningConfig {
const normalizedName = modelName?.toLowerCase() ?? "";
const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || normalizedName.includes("gpt 5.2 codex");
const isGpt52General = (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && !isGpt52Codex;
const isCodexMax = normalizedName.includes("codex-max") || normalizedName.includes("codex max");
const isCodexMini =
normalizedName.includes("codex-mini") ||
normalizedName.includes("codex mini") ||
normalizedName.includes("codex_mini") ||
normalizedName.includes("codex-mini-latest");
const isCodex = normalizedName.includes("codex") && !isCodexMini;
const isLightweight = !isCodexMini && (normalizedName.includes("nano") || normalizedName.includes("mini"));
const isGpt51General =
(normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) &&
!isCodex &&
!isCodexMax &&
!isCodexMini;
const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax;
const supportsNone = isGpt52General || isGpt51General;
const defaultEffort: ReasoningConfig["effort"] = isCodexMini
? "medium"
: supportsXhigh
? "high"
: isLightweight
? "minimal"
: "medium";
let effort = options.reasoningEffort || defaultEffort;
if (isCodexMini) {
if (effort === "minimal" || effort === "low" || effort === "none") {
effort = "medium";
}
if (effort === "xhigh") {
effort = "high";
}
if (effort !== "high" && effort !== "medium") {
effort = "medium";
}
}
if (!supportsXhigh && effort === "xhigh") {
effort = "high";
}
if (!supportsNone && effort === "none") {
effort = "low";
}
if (isCodex && effort === "minimal") {
effort = "low";
}
return {
effort,
summary: options.reasoningSummary ?? "auto",
};
}
function filterInput(input: InputItem[] | undefined): InputItem[] | undefined {
if (!Array.isArray(input)) return input;
return input
.filter((item) => item.type !== "item_reference")
.map((item) => {
if (item.id != null) {
const { id: _id, ...rest } = item;
return rest as InputItem;
}
return item;
});
}
function getContentText(item: InputItem): string {
if (typeof item.content === "string") {
return item.content;
}
if (Array.isArray(item.content)) {
return item.content
.filter((c) => typeof c === "object" && c !== null && (c as { type?: string }).type === "input_text")
.map((c) => (c as { text?: string }).text)
.filter((text): text is string => typeof text === "string")
.join("\n");
}
return "";
}
function isPiSystemPrompt(item: InputItem): boolean {
const isSystemRole = item.role === "developer" || item.role === "system";
if (!isSystemRole) return false;
const contentText = getContentText(item).trim();
if (!contentText) return false;
return contentText.startsWith(
"You are an expert coding assistant. You help users with coding tasks by reading files, executing commands",
);
}
async function filterPiSystemPrompts(input: InputItem[] | undefined): Promise<InputItem[] | undefined> {
if (!Array.isArray(input)) return input;
return input.filter((item) => item.role === "user" || !isPiSystemPrompt(item));
}
function addCodexBridgeMessage(input: InputItem[] | undefined, hasTools: boolean): InputItem[] | undefined {
if (!hasTools || !Array.isArray(input)) return input;
const bridgeMessage: InputItem = {
type: "message",
role: "developer",
content: [
{
type: "input_text",
text: CODEX_PI_BRIDGE,
},
],
};
return [bridgeMessage, ...input];
}
function addToolRemapMessage(input: InputItem[] | undefined, hasTools: boolean): InputItem[] | undefined {
if (!hasTools || !Array.isArray(input)) return input;
const toolRemapMessage: InputItem = {
type: "message",
role: "developer",
content: [
{
type: "input_text",
text: TOOL_REMAP_MESSAGE,
},
],
};
return [toolRemapMessage, ...input];
}
export async function transformRequestBody(
body: RequestBody,
codexInstructions: string,
options: CodexRequestOptions = {},
codexMode = true,
): Promise<RequestBody> {
const normalizedModel = normalizeModel(body.model);
body.model = normalizedModel;
body.store = false;
body.stream = true;
body.instructions = codexInstructions;
if (body.input && Array.isArray(body.input)) {
body.input = filterInput(body.input);
if (codexMode) {
body.input = await filterPiSystemPrompts(body.input);
body.input = addCodexBridgeMessage(body.input, !!body.tools);
} else {
body.input = addToolRemapMessage(body.input, !!body.tools);
}
if (body.input) {
const functionCallIds = new Set(
body.input
.filter((item) => item.type === "function_call" && typeof item.call_id === "string")
.map((item) => item.call_id as string),
);
body.input = body.input.map((item) => {
if (item.type === "function_call_output" && typeof item.call_id === "string") {
const callId = item.call_id as string;
if (!functionCallIds.has(callId)) {
const itemRecord = item as unknown as Record<string, unknown>;
const toolName = typeof itemRecord.name === "string" ? itemRecord.name : "tool";
let text = "";
try {
const output = itemRecord.output;
text = typeof output === "string" ? output : JSON.stringify(output);
} catch {
text = String(itemRecord.output ?? "");
}
if (text.length > 16000) {
text = `${text.slice(0, 16000)}\n...[truncated]`;
}
return {
type: "message",
role: "assistant",
content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`,
} as InputItem;
}
}
return item;
});
}
}
const reasoningConfig = getReasoningConfig(normalizedModel, options);
body.reasoning = {
...body.reasoning,
...reasoningConfig,
};
body.text = {
...body.text,
verbosity: options.textVerbosity || "medium",
};
body.include = options.include || ["reasoning.encrypted_content"];
delete body.max_output_tokens;
delete body.max_completion_tokens;
return body;
}

View file

@ -0,0 +1,133 @@
export type CodexRateLimit = {
used_percent?: number;
window_minutes?: number;
resets_at?: number;
};
export type CodexRateLimits = {
primary?: CodexRateLimit;
secondary?: CodexRateLimit;
};
export type CodexErrorInfo = {
message: string;
status: number;
friendlyMessage?: string;
rateLimits?: CodexRateLimits;
raw?: string;
};
export async function parseCodexError(response: Response): Promise<CodexErrorInfo> {
const raw = await response.text();
let message = raw || response.statusText || "Request failed";
let friendlyMessage: string | undefined;
let rateLimits: CodexRateLimits | undefined;
try {
const parsed = JSON.parse(raw) as { error?: Record<string, unknown> };
const err = parsed?.error ?? {};
const headers = response.headers;
const primary = {
used_percent: toNumber(headers.get("x-codex-primary-used-percent")),
window_minutes: toInt(headers.get("x-codex-primary-window-minutes")),
resets_at: toInt(headers.get("x-codex-primary-reset-at")),
};
const secondary = {
used_percent: toNumber(headers.get("x-codex-secondary-used-percent")),
window_minutes: toInt(headers.get("x-codex-secondary-window-minutes")),
resets_at: toInt(headers.get("x-codex-secondary-reset-at")),
};
rateLimits =
primary.used_percent !== undefined || secondary.used_percent !== undefined
? { primary, secondary }
: undefined;
const code = String((err as { code?: string; type?: string }).code ?? (err as { type?: string }).type ?? "");
const resetsAt = (err as { resets_at?: number }).resets_at ?? primary.resets_at ?? secondary.resets_at;
const mins = resetsAt ? Math.max(0, Math.round((resetsAt * 1000 - Date.now()) / 60000)) : undefined;
if (/usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(code) || response.status === 429) {
const planType = (err as { plan_type?: string }).plan_type;
const plan = planType ? ` (${String(planType).toLowerCase()} plan)` : "";
const when = mins !== undefined ? ` Try again in ~${mins} min.` : "";
friendlyMessage = `You have hit your ChatGPT usage limit${plan}.${when}`.trim();
}
const errMessage = (err as { message?: string }).message;
message = errMessage || friendlyMessage || message;
} catch {
// raw body not JSON
}
return {
message,
status: response.status,
friendlyMessage,
rateLimits,
raw: raw,
};
}
export async function* parseCodexSseStream(response: Response): AsyncGenerator<Record<string, unknown>> {
if (!response.body) {
return;
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let index = buffer.indexOf("\n\n");
while (index !== -1) {
const chunk = buffer.slice(0, index);
buffer = buffer.slice(index + 2);
const event = parseSseChunk(chunk);
if (event) yield event;
index = buffer.indexOf("\n\n");
}
}
if (buffer.trim()) {
const event = parseSseChunk(buffer);
if (event) yield event;
}
}
function parseSseChunk(chunk: string): Record<string, unknown> | null {
const lines = chunk.split("\n");
const dataLines: string[] = [];
for (const line of lines) {
if (line.startsWith("data:")) {
dataLines.push(line.slice(5).trim());
}
}
if (dataLines.length === 0) return null;
const data = dataLines.join("\n").trim();
if (!data || data === "[DONE]") return null;
try {
return JSON.parse(data) as Record<string, unknown>;
} catch {
return null;
}
}
function toNumber(v: string | null): number | undefined {
if (v == null) return undefined;
const n = Number(v);
return Number.isFinite(n) ? n : undefined;
}
function toInt(v: string | null): number | undefined {
if (v == null) return undefined;
const n = parseInt(v, 10);
return Number.isFinite(n) ? n : undefined;
}

View file

@ -7,6 +7,7 @@ import {
streamGoogleGeminiCli,
} from "./providers/google-gemini-cli.js";
import { type GoogleVertexOptions, streamGoogleVertex } from "./providers/google-vertex.js";
import { type OpenAICodexResponsesOptions, streamOpenAICodexResponses } from "./providers/openai-codex-responses.js";
import { type OpenAICompletionsOptions, streamOpenAICompletions } from "./providers/openai-completions.js";
import { type OpenAIResponsesOptions, streamOpenAIResponses } from "./providers/openai-responses.js";
import type {
@ -85,6 +86,9 @@ export function stream<TApi extends Api>(
case "openai-responses":
return streamOpenAIResponses(model as Model<"openai-responses">, context, providerOptions as any);
case "openai-codex-responses":
return streamOpenAICodexResponses(model as Model<"openai-codex-responses">, context, providerOptions as any);
case "google-generative-ai":
return streamGoogle(model as Model<"google-generative-ai">, context, providerOptions);
@ -202,6 +206,12 @@ function mapOptionsForApi<TApi extends Api>(
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAIResponsesOptions;
case "openai-codex-responses":
return {
...base,
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAICodexResponsesOptions;
case "google-generative-ai": {
// Explicitly disable thinking when reasoning is not specified
// This is needed because Gemini has "dynamic thinking" enabled by default

View file

@ -2,6 +2,7 @@ import type { AnthropicOptions } from "./providers/anthropic.js";
import type { GoogleOptions } from "./providers/google.js";
import type { GoogleGeminiCliOptions } from "./providers/google-gemini-cli.js";
import type { GoogleVertexOptions } from "./providers/google-vertex.js";
import type { OpenAICodexResponsesOptions } from "./providers/openai-codex-responses.js";
import type { OpenAICompletionsOptions } from "./providers/openai-completions.js";
import type { OpenAIResponsesOptions } from "./providers/openai-responses.js";
import type { AssistantMessageEventStream } from "./utils/event-stream.js";
@ -11,6 +12,7 @@ export type { AssistantMessageEventStream } from "./utils/event-stream.js";
export type Api =
| "openai-completions"
| "openai-responses"
| "openai-codex-responses"
| "anthropic-messages"
| "google-generative-ai"
| "google-gemini-cli"
@ -20,6 +22,7 @@ export interface ApiOptionsMap {
"anthropic-messages": AnthropicOptions;
"openai-completions": OpenAICompletionsOptions;
"openai-responses": OpenAIResponsesOptions;
"openai-codex-responses": OpenAICodexResponsesOptions;
"google-generative-ai": GoogleOptions;
"google-gemini-cli": GoogleGeminiCliOptions;
"google-vertex": GoogleVertexOptions;
@ -43,6 +46,7 @@ export type KnownProvider =
| "google-antigravity"
| "google-vertex"
| "openai"
| "openai-codex"
| "github-copilot"
| "xai"
| "groq"

View file

@ -28,6 +28,11 @@ export {
loginGeminiCli,
refreshGoogleCloudToken,
} from "./google-gemini-cli.js";
// OpenAI Codex (ChatGPT OAuth)
export {
loginOpenAICodex,
refreshOpenAICodexToken,
} from "./openai-codex.js";
export * from "./types.js";
@ -39,6 +44,7 @@ import { refreshAnthropicToken } from "./anthropic.js";
import { refreshGitHubCopilotToken } from "./github-copilot.js";
import { refreshAntigravityToken } from "./google-antigravity.js";
import { refreshGoogleCloudToken } from "./google-gemini-cli.js";
import { refreshOpenAICodexToken } from "./openai-codex.js";
import type { OAuthCredentials, OAuthProvider, OAuthProviderInfo } from "./types.js";
/**
@ -74,6 +80,9 @@ export async function refreshOAuthToken(
}
newCredentials = await refreshAntigravityToken(credentials.refresh, credentials.projectId);
break;
case "openai-codex":
newCredentials = await refreshOpenAICodexToken(credentials.refresh);
break;
default:
throw new Error(`Unknown OAuth provider: ${provider}`);
}
@ -139,5 +148,10 @@ export function getOAuthProviders(): OAuthProviderInfo[] {
name: "Antigravity (Gemini 3, Claude, GPT-OSS)",
available: true,
},
{
id: "openai-codex",
name: "ChatGPT Plus/Pro (Codex Subscription)",
available: true,
},
];
}

View file

@ -0,0 +1,342 @@
/**
* OpenAI Codex (ChatGPT OAuth) flow
*/
import { randomBytes } from "node:crypto";
import http from "node:http";
import { generatePKCE } from "./pkce.js";
import type { OAuthCredentials, OAuthPrompt } from "./types.js";
const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
const AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize";
const TOKEN_URL = "https://auth.openai.com/oauth/token";
const REDIRECT_URI = "http://localhost:1455/auth/callback";
const SCOPE = "openid profile email offline_access";
const JWT_CLAIM_PATH = "https://api.openai.com/auth";
const SUCCESS_HTML = `<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Authentication successful</title>
</head>
<body>
<p>Authentication successful. Return to your terminal to continue.</p>
</body>
</html>`;
type TokenSuccess = { type: "success"; access: string; refresh: string; expires: number };
type TokenFailure = { type: "failed" };
type TokenResult = TokenSuccess | TokenFailure;
type JwtPayload = {
[JWT_CLAIM_PATH]?: {
chatgpt_account_id?: string;
};
[key: string]: unknown;
};
function createState(): string {
return randomBytes(16).toString("hex");
}
function parseAuthorizationInput(input: string): { code?: string; state?: string } {
const value = input.trim();
if (!value) return {};
try {
const url = new URL(value);
return {
code: url.searchParams.get("code") ?? undefined,
state: url.searchParams.get("state") ?? undefined,
};
} catch {
// not a URL
}
if (value.includes("#")) {
const [code, state] = value.split("#", 2);
return { code, state };
}
if (value.includes("code=")) {
const params = new URLSearchParams(value);
return {
code: params.get("code") ?? undefined,
state: params.get("state") ?? undefined,
};
}
return { code: value };
}
function decodeJwt(token: string): JwtPayload | null {
try {
const parts = token.split(".");
if (parts.length !== 3) return null;
const payload = parts[1] ?? "";
const decoded = Buffer.from(payload, "base64").toString("utf-8");
return JSON.parse(decoded) as JwtPayload;
} catch {
return null;
}
}
async function exchangeAuthorizationCode(
code: string,
verifier: string,
redirectUri: string = REDIRECT_URI,
): Promise<TokenResult> {
const response = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
grant_type: "authorization_code",
client_id: CLIENT_ID,
code,
code_verifier: verifier,
redirect_uri: redirectUri,
}),
});
if (!response.ok) {
const text = await response.text().catch(() => "");
console.error("[openai-codex] code->token failed:", response.status, text);
return { type: "failed" };
}
const json = (await response.json()) as {
access_token?: string;
refresh_token?: string;
expires_in?: number;
};
if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
console.error("[openai-codex] token response missing fields:", json);
return { type: "failed" };
}
return {
type: "success",
access: json.access_token,
refresh: json.refresh_token,
expires: Date.now() + json.expires_in * 1000,
};
}
async function refreshAccessToken(refreshToken: string): Promise<TokenResult> {
try {
const response = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: CLIENT_ID,
}),
});
if (!response.ok) {
const text = await response.text().catch(() => "");
console.error("[openai-codex] Token refresh failed:", response.status, text);
return { type: "failed" };
}
const json = (await response.json()) as {
access_token?: string;
refresh_token?: string;
expires_in?: number;
};
if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") {
console.error("[openai-codex] Token refresh response missing fields:", json);
return { type: "failed" };
}
return {
type: "success",
access: json.access_token,
refresh: json.refresh_token,
expires: Date.now() + json.expires_in * 1000,
};
} catch (error) {
console.error("[openai-codex] Token refresh error:", error);
return { type: "failed" };
}
}
async function createAuthorizationFlow(): Promise<{ verifier: string; state: string; url: string }> {
const { verifier, challenge } = await generatePKCE();
const state = createState();
const url = new URL(AUTHORIZE_URL);
url.searchParams.set("response_type", "code");
url.searchParams.set("client_id", CLIENT_ID);
url.searchParams.set("redirect_uri", REDIRECT_URI);
url.searchParams.set("scope", SCOPE);
url.searchParams.set("code_challenge", challenge);
url.searchParams.set("code_challenge_method", "S256");
url.searchParams.set("state", state);
url.searchParams.set("id_token_add_organizations", "true");
url.searchParams.set("codex_cli_simplified_flow", "true");
url.searchParams.set("originator", "codex_cli_rs");
return { verifier, state, url: url.toString() };
}
type OAuthServerInfo = {
close: () => void;
waitForCode: () => Promise<{ code: string } | null>;
};
function startLocalOAuthServer(state: string): Promise<OAuthServerInfo> {
let lastCode: string | null = null;
const server = http.createServer((req, res) => {
try {
const url = new URL(req.url || "", "http://localhost");
if (url.pathname !== "/auth/callback") {
res.statusCode = 404;
res.end("Not found");
return;
}
if (url.searchParams.get("state") !== state) {
res.statusCode = 400;
res.end("State mismatch");
return;
}
const code = url.searchParams.get("code");
if (!code) {
res.statusCode = 400;
res.end("Missing authorization code");
return;
}
res.statusCode = 200;
res.setHeader("Content-Type", "text/html; charset=utf-8");
res.end(SUCCESS_HTML);
lastCode = code;
} catch {
res.statusCode = 500;
res.end("Internal error");
}
});
return new Promise((resolve) => {
server
.listen(1455, "127.0.0.1", () => {
resolve({
close: () => server.close(),
waitForCode: async () => {
const sleep = () => new Promise((r) => setTimeout(r, 100));
for (let i = 0; i < 600; i += 1) {
if (lastCode) return { code: lastCode };
await sleep();
}
return null;
},
});
})
.on("error", (err: NodeJS.ErrnoException) => {
console.error(
"[openai-codex] Failed to bind http://127.0.0.1:1455 (",
err.code,
") Falling back to manual paste.",
);
resolve({
close: () => {
try {
server.close();
} catch {
// ignore
}
},
waitForCode: async () => null,
});
});
});
}
function getAccountId(accessToken: string): string | null {
const payload = decodeJwt(accessToken);
const auth = payload?.[JWT_CLAIM_PATH];
const accountId = auth?.chatgpt_account_id;
return typeof accountId === "string" && accountId.length > 0 ? accountId : null;
}
/**
* Login with OpenAI Codex OAuth
*/
export async function loginOpenAICodex(options: {
onAuth: (info: { url: string; instructions?: string }) => void;
onPrompt: (prompt: OAuthPrompt) => Promise<string>;
onProgress?: (message: string) => void;
}): Promise<OAuthCredentials> {
const { verifier, state, url } = await createAuthorizationFlow();
const server = await startLocalOAuthServer(state);
options.onAuth({ url, instructions: "A browser window should open. Complete login to finish." });
let code: string | undefined;
try {
const result = await server.waitForCode();
if (result?.code) {
code = result.code;
}
if (!code) {
const input = await options.onPrompt({
message: "Paste the authorization code (or full redirect URL):",
});
const parsed = parseAuthorizationInput(input);
if (parsed.state && parsed.state !== state) {
throw new Error("State mismatch");
}
code = parsed.code;
}
if (!code) {
throw new Error("Missing authorization code");
}
const tokenResult = await exchangeAuthorizationCode(code, verifier);
if (tokenResult.type !== "success") {
throw new Error("Token exchange failed");
}
const accountId = getAccountId(tokenResult.access);
if (!accountId) {
throw new Error("Failed to extract accountId from token");
}
return {
access: tokenResult.access,
refresh: tokenResult.refresh,
expires: tokenResult.expires,
accountId,
};
} finally {
server.close();
}
}
/**
* Refresh OpenAI Codex OAuth token
*/
export async function refreshOpenAICodexToken(refreshToken: string): Promise<OAuthCredentials> {
const result = await refreshAccessToken(refreshToken);
if (result.type !== "success") {
throw new Error("Failed to refresh OpenAI Codex token");
}
const accountId = getAccountId(result.access);
if (!accountId) {
throw new Error("Failed to extract accountId from token");
}
return {
access: result.access,
refresh: result.refresh,
expires: result.expires,
accountId,
};
}

View file

@ -5,9 +5,15 @@ export type OAuthCredentials = {
enterpriseUrl?: string;
projectId?: string;
email?: string;
accountId?: string;
};
export type OAuthProvider = "anthropic" | "github-copilot" | "google-gemini-cli" | "google-antigravity";
export type OAuthProvider =
| "anthropic"
| "github-copilot"
| "google-gemini-cli"
| "google-antigravity"
| "openai-codex";
export type OAuthPrompt = {
message: string;

View file

@ -0,0 +1,132 @@
import { mkdtempSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { streamOpenAICodexResponses } from "../src/providers/openai-codex-responses.js";
import type { Context, Model } from "../src/types.js";
const originalFetch = global.fetch;
const originalAgentDir = process.env.PI_CODING_AGENT_DIR;
afterEach(() => {
global.fetch = originalFetch;
if (originalAgentDir === undefined) {
delete process.env.PI_CODING_AGENT_DIR;
} else {
process.env.PI_CODING_AGENT_DIR = originalAgentDir;
}
vi.restoreAllMocks();
});
describe("openai-codex streaming", () => {
it("streams SSE responses into AssistantMessageEventStream", async () => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
process.env.PI_CODING_AGENT_DIR = tempDir;
const payload = Buffer.from(
JSON.stringify({ "https://api.openai.com/auth": { chatgpt_account_id: "acc_test" } }),
"utf8",
).toString("base64");
const token = `aaa.${payload}.bbb`;
const sse = `${[
`data: ${JSON.stringify({
type: "response.output_item.added",
item: { type: "message", id: "msg_1", role: "assistant", status: "in_progress", content: [] },
})}`,
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
`data: ${JSON.stringify({
type: "response.output_item.done",
item: {
type: "message",
id: "msg_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text: "Hello" }],
},
})}`,
`data: ${JSON.stringify({
type: "response.completed",
response: {
status: "completed",
usage: {
input_tokens: 5,
output_tokens: 3,
total_tokens: 8,
input_tokens_details: { cached_tokens: 0 },
},
},
})}`,
].join("\n\n")}\n\n`;
const encoder = new TextEncoder();
const stream = new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(sse));
controller.close();
},
});
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
const url = typeof input === "string" ? input : input.toString();
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), { status: 200 });
}
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
return new Response("PROMPT", { status: 200, headers: { etag: '"etag"' } });
}
if (url === "https://chatgpt.com/backend-api/codex/responses") {
const headers = init?.headers instanceof Headers ? init.headers : undefined;
expect(headers?.get("Authorization")).toBe(`Bearer ${token}`);
expect(headers?.get("chatgpt-account-id")).toBe("acc_test");
expect(headers?.get("OpenAI-Beta")).toBe("responses=experimental");
expect(headers?.get("originator")).toBe("codex_cli_rs");
expect(headers?.get("accept")).toBe("text/event-stream");
expect(headers?.has("x-api-key")).toBe(false);
return new Response(stream, {
status: 200,
headers: { "content-type": "text/event-stream" },
});
}
return new Response("not found", { status: 404 });
});
global.fetch = fetchMock as typeof fetch;
const model: Model<"openai-codex-responses"> = {
id: "gpt-5.1-codex",
name: "GPT-5.1 Codex",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
};
const context: Context = {
systemPrompt: "You are a helpful assistant.",
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
};
const streamResult = streamOpenAICodexResponses(model, context, { apiKey: token });
let sawTextDelta = false;
let sawDone = false;
for await (const event of streamResult) {
if (event.type === "text_delta") {
sawTextDelta = true;
}
if (event.type === "done") {
sawDone = true;
expect(event.message.content.find((c) => c.type === "text")?.text).toBe("Hello");
}
}
expect(sawTextDelta).toBe(true);
expect(sawDone).toBe(true);
});
});

View file

@ -0,0 +1,165 @@
import { mkdtempSync, readFileSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { getCodexInstructions } from "../src/providers/openai-codex/prompts/codex.js";
import { CODEX_PI_BRIDGE } from "../src/providers/openai-codex/prompts/pi-codex-bridge.js";
import {
normalizeModel,
type RequestBody,
transformRequestBody,
} from "../src/providers/openai-codex/request-transformer.js";
import { parseCodexError } from "../src/providers/openai-codex/response-handler.js";
const DEFAULT_PROMPT_PREFIX =
"You are an expert coding assistant. You help users with coding tasks by reading files, executing commands";
const FALLBACK_PROMPT = readFileSync(
new URL("../src/providers/openai-codex/prompts/codex-instructions.md", import.meta.url),
"utf8",
);
describe("openai-codex request transformer", () => {
it("filters item_reference, strips ids, and inserts bridge message", async () => {
const body: RequestBody = {
model: "gpt-5.1-codex",
input: [
{
type: "message",
role: "developer",
id: "sys-1",
content: [{ type: "input_text", text: `${DEFAULT_PROMPT_PREFIX}...` }],
},
{
type: "message",
role: "user",
id: "user-1",
content: [{ type: "input_text", text: "hello" }],
},
{ type: "item_reference", id: "ref-1" },
{ type: "function_call_output", call_id: "missing", name: "tool", output: "result" },
],
tools: [{ type: "function", name: "tool", description: "", parameters: {} }],
};
const transformed = await transformRequestBody(body, "CODEX_INSTRUCTIONS", {}, true);
expect(transformed.store).toBe(false);
expect(transformed.stream).toBe(true);
expect(transformed.instructions).toBe("CODEX_INSTRUCTIONS");
expect(transformed.include).toEqual(["reasoning.encrypted_content"]);
const input = transformed.input || [];
expect(input.some((item) => item.type === "item_reference")).toBe(false);
expect(input.some((item) => "id" in item)).toBe(false);
expect(input[0]?.type).toBe("message");
expect(input[0]?.content).toEqual([{ type: "input_text", text: CODEX_PI_BRIDGE }]);
const orphaned = input.find((item) => item.type === "message" && item.role === "assistant");
expect(orphaned?.content).toMatch(/Previous tool result/);
});
});
describe("openai-codex model normalization", () => {
it("maps space-separated codex-mini names to codex-mini-latest", () => {
expect(normalizeModel("gpt 5 codex mini")).toBe("codex-mini-latest");
});
});
describe("openai-codex error parsing", () => {
it("produces friendly usage-limit messages and rate limits", async () => {
const resetAt = Math.floor(Date.now() / 1000) + 600;
const response = new Response(
JSON.stringify({
error: { code: "usage_limit_reached", plan_type: "Plus", resets_at: resetAt },
}),
{
status: 429,
headers: {
"x-codex-primary-used-percent": "99",
"x-codex-primary-window-minutes": "60",
"x-codex-primary-reset-at": String(resetAt),
},
},
);
const info = await parseCodexError(response);
expect(info.friendlyMessage?.toLowerCase()).toContain("usage limit");
expect(info.rateLimits?.primary?.used_percent).toBe(99);
});
});
describe("openai-codex prompt caching", () => {
const originalFetch = global.fetch;
const originalAgentDir = process.env.PI_CODING_AGENT_DIR;
beforeEach(() => {
vi.restoreAllMocks();
});
afterEach(() => {
global.fetch = originalFetch;
if (originalAgentDir === undefined) {
delete process.env.PI_CODING_AGENT_DIR;
} else {
process.env.PI_CODING_AGENT_DIR = originalAgentDir;
}
});
it("caches prompts with etag and reuses cache", async () => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-"));
process.env.PI_CODING_AGENT_DIR = tempDir;
const tag = "rust-v0.0.0";
const promptText = "PROMPT_CONTENT";
const etag = '"etag-123"';
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
const url = typeof input === "string" ? input : input.toString();
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
return new Response(JSON.stringify({ tag_name: tag }), { status: 200 });
}
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
const headerValue =
init?.headers && typeof init.headers === "object" && "If-None-Match" in init.headers
? String((init.headers as Record<string, string>)["If-None-Match"])
: undefined;
if (headerValue === etag) {
return new Response("", { status: 304, headers: { etag } });
}
return new Response(promptText, { status: 200, headers: { etag } });
}
return new Response("not found", { status: 404 });
});
global.fetch = fetchMock as typeof fetch;
const first = await getCodexInstructions("gpt-5.1-codex");
expect(first).toBe(promptText);
const metaPath = join(tempDir, "cache", "openai-codex", "codex-instructions-meta.json");
const meta = JSON.parse(readFileSync(metaPath, "utf8")) as { etag: string; tag: string; lastChecked: number };
writeFileSync(metaPath, JSON.stringify({ ...meta, lastChecked: 0 }), "utf8");
const second = await getCodexInstructions("gpt-5.1-codex");
expect(second).toBe(promptText);
expect(fetchMock).toHaveBeenCalled();
const rawCalls = fetchMock.mock.calls.filter((call) =>
String(call[0]).startsWith("https://raw.githubusercontent.com/openai/codex/"),
);
expect(rawCalls.length).toBeGreaterThan(0);
});
it("falls back to bundled instructions when cache and network are unavailable", async () => {
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-"));
process.env.PI_CODING_AGENT_DIR = tempDir;
const fetchMock = vi.fn(async () => {
throw new Error("network down");
});
global.fetch = fetchMock as typeof fetch;
const instructions = await getCodexInstructions("gpt-5.1-codex");
expect(instructions).toBe(FALLBACK_PROMPT);
});
});

View file

@ -156,6 +156,7 @@ Use `/login` to authenticate with subscription-based or free-tier providers:
| GitHub Copilot | GPT-4o, Claude, Gemini via Copilot subscription | Subscription |
| Google Gemini CLI | Gemini 2.0/2.5 models | Free (Google account) |
| Google Antigravity | Gemini 3, Claude, GPT-OSS | Free (Google account) |
| OpenAI Codex (ChatGPT Plus/Pro) | Codex models via ChatGPT subscription | Subscription |
```bash
pi
@ -173,8 +174,18 @@ pi
- Antigravity uses a sandbox endpoint with access to Gemini 3, Claude (sonnet/opus thinking), and GPT-OSS models
- Both are free with any Google account, subject to rate limits
**OpenAI Codex notes:**
- Requires ChatGPT Plus/Pro OAuth (`/login openai-codex`)
- Prompt cache stored under `~/.pi/agent/cache/openai-codex/`
- Intended for personal use with your own subscription; not for resale or multi-user services. For production, use the OpenAI Platform API.
Credentials stored in `~/.pi/agent/auth.json`. Use `/logout` to clear.
**Troubleshooting (OAuth):**
- **Port 1455 in use:** Close the conflicting process or paste the auth code/URL when prompted.
- **Token expired / refresh failed:** Run `/login` again for the provider to refresh credentials.
- **Usage limits (429):** Wait for the reset window; pi will surface a friendly message with the approximate retry time.
### Quick Start
```bash
@ -525,7 +536,7 @@ Add custom models (Ollama, vLLM, LM Studio, etc.) via `~/.pi/agent/models.json`:
}
```
**Supported APIs:** `openai-completions`, `openai-responses`, `anthropic-messages`, `google-generative-ai`
**Supported APIs:** `openai-completions`, `openai-responses`, `openai-codex-responses`, `anthropic-messages`, `google-generative-ai`
**API key resolution:** The `apiKey` field is checked as environment variable name first, then used as literal value.
@ -913,7 +924,7 @@ pi [options] [@files...] [messages...]
| Option | Description |
|--------|-------------|
| `--provider <name>` | Provider: `anthropic`, `openai`, `google`, `mistral`, `xai`, `groq`, `cerebras`, `openrouter`, `zai`, `github-copilot`, `google-gemini-cli`, `google-antigravity`, or custom |
| `--provider <name>` | Provider: `anthropic`, `openai`, `openai-codex`, `google`, `mistral`, `xai`, `groq`, `cerebras`, `openrouter`, `zai`, `github-copilot`, `google-gemini-cli`, `google-antigravity`, or custom |
| `--model <id>` | Model ID |
| `--api-key <key>` | API key (overrides environment) |
| `--system-prompt <text\|file>` | Custom system prompt (text or file path) |

View file

@ -10,6 +10,7 @@ import {
loginAntigravity,
loginGeminiCli,
loginGitHubCopilot,
loginOpenAICodex,
type OAuthCredentials,
type OAuthProvider,
} from "@mariozechner/pi-ai";
@ -180,6 +181,9 @@ export class AuthStorage {
case "google-antigravity":
credentials = await loginAntigravity(callbacks.onAuth, callbacks.onProgress);
break;
case "openai-codex":
credentials = await loginOpenAICodex(callbacks);
break;
default:
throw new Error(`Unknown OAuth provider: ${provider}`);
}

View file

@ -34,6 +34,7 @@ const ModelDefinitionSchema = Type.Object({
Type.Union([
Type.Literal("openai-completions"),
Type.Literal("openai-responses"),
Type.Literal("openai-codex-responses"),
Type.Literal("anthropic-messages"),
Type.Literal("google-generative-ai"),
]),
@ -59,6 +60,7 @@ const ProviderConfigSchema = Type.Object({
Type.Union([
Type.Literal("openai-completions"),
Type.Literal("openai-responses"),
Type.Literal("openai-codex-responses"),
Type.Literal("anthropic-messages"),
Type.Literal("google-generative-ai"),
]),

View file

@ -13,6 +13,7 @@ import type { ModelRegistry } from "./model-registry.js";
export const defaultModelPerProvider: Record<KnownProvider, string> = {
anthropic: "claude-sonnet-4-5",
openai: "gpt-5.1-codex",
"openai-codex": "gpt-5.2-codex",
google: "gemini-2.5-pro",
"google-gemini-cli": "gemini-2.5-pro",
"google-antigravity": "gemini-3-pro-high",