mirror of
https://github.com/harivansh-afk/clanker-agent.git
synced 2026-04-15 23:01:34 +00:00
- Copy all pi-mono source into apps/companion-os/ - Update Dockerfile to COPY pre-built binary instead of downloading from GitHub Releases - Update deploy-staging.yml to build pi from source (bun compile) before Docker build - Add apps/companion-os/** to path triggers - No more cross-repo dispatch needed Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
506 lines
16 KiB
TypeScript
506 lines
16 KiB
TypeScript
import { mkdtempSync } from "node:fs";
|
|
import { tmpdir } from "node:os";
|
|
import { join } from "node:path";
|
|
import { afterEach, describe, expect, it, vi } from "vitest";
|
|
import { streamOpenAICodexResponses } from "../src/providers/openai-codex-responses.js";
|
|
import type { Context, Model } from "../src/types.js";
|
|
|
|
const originalFetch = global.fetch;
|
|
const originalAgentDir = process.env.PI_CODING_AGENT_DIR;
|
|
|
|
afterEach(() => {
|
|
global.fetch = originalFetch;
|
|
if (originalAgentDir === undefined) {
|
|
delete process.env.PI_CODING_AGENT_DIR;
|
|
} else {
|
|
process.env.PI_CODING_AGENT_DIR = originalAgentDir;
|
|
}
|
|
vi.restoreAllMocks();
|
|
});
|
|
|
|
describe("openai-codex streaming", () => {
|
|
it("streams SSE responses into AssistantMessageEventStream", async () => {
|
|
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
|
process.env.PI_CODING_AGENT_DIR = tempDir;
|
|
|
|
const payload = Buffer.from(
|
|
JSON.stringify({
|
|
"https://api.openai.com/auth": { chatgpt_account_id: "acc_test" },
|
|
}),
|
|
"utf8",
|
|
).toString("base64");
|
|
const token = `aaa.${payload}.bbb`;
|
|
|
|
const sse = `${[
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.added",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "in_progress",
|
|
content: [],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
|
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.done",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "completed",
|
|
content: [{ type: "output_text", text: "Hello" }],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.completed",
|
|
response: {
|
|
status: "completed",
|
|
usage: {
|
|
input_tokens: 5,
|
|
output_tokens: 3,
|
|
total_tokens: 8,
|
|
input_tokens_details: { cached_tokens: 0 },
|
|
},
|
|
},
|
|
})}`,
|
|
].join("\n\n")}\n\n`;
|
|
|
|
const encoder = new TextEncoder();
|
|
const stream = new ReadableStream<Uint8Array>({
|
|
start(controller) {
|
|
controller.enqueue(encoder.encode(sse));
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
|
|
const url = typeof input === "string" ? input : input.toString();
|
|
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
|
|
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), {
|
|
status: 200,
|
|
});
|
|
}
|
|
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
|
|
return new Response("PROMPT", {
|
|
status: 200,
|
|
headers: { etag: '"etag"' },
|
|
});
|
|
}
|
|
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
|
const headers =
|
|
init?.headers instanceof Headers ? init.headers : undefined;
|
|
expect(headers?.get("Authorization")).toBe(`Bearer ${token}`);
|
|
expect(headers?.get("chatgpt-account-id")).toBe("acc_test");
|
|
expect(headers?.get("OpenAI-Beta")).toBe("responses=experimental");
|
|
expect(headers?.get("originator")).toBe("pi");
|
|
expect(headers?.get("accept")).toBe("text/event-stream");
|
|
expect(headers?.has("x-api-key")).toBe(false);
|
|
return new Response(stream, {
|
|
status: 200,
|
|
headers: { "content-type": "text/event-stream" },
|
|
});
|
|
}
|
|
return new Response("not found", { status: 404 });
|
|
});
|
|
|
|
global.fetch = fetchMock as typeof fetch;
|
|
|
|
const model: Model<"openai-codex-responses"> = {
|
|
id: "gpt-5.1-codex",
|
|
name: "GPT-5.1 Codex",
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
baseUrl: "https://chatgpt.com/backend-api",
|
|
reasoning: true,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 400000,
|
|
maxTokens: 128000,
|
|
};
|
|
|
|
const context: Context = {
|
|
systemPrompt: "You are a helpful assistant.",
|
|
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
|
|
};
|
|
|
|
const streamResult = streamOpenAICodexResponses(model, context, {
|
|
apiKey: token,
|
|
});
|
|
let sawTextDelta = false;
|
|
let sawDone = false;
|
|
|
|
for await (const event of streamResult) {
|
|
if (event.type === "text_delta") {
|
|
sawTextDelta = true;
|
|
}
|
|
if (event.type === "done") {
|
|
sawDone = true;
|
|
expect(event.message.content.find((c) => c.type === "text")?.text).toBe(
|
|
"Hello",
|
|
);
|
|
}
|
|
}
|
|
|
|
expect(sawTextDelta).toBe(true);
|
|
expect(sawDone).toBe(true);
|
|
});
|
|
|
|
it("sets conversation_id/session_id headers and prompt_cache_key when sessionId is provided", async () => {
|
|
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
|
process.env.PI_CODING_AGENT_DIR = tempDir;
|
|
|
|
const payload = Buffer.from(
|
|
JSON.stringify({
|
|
"https://api.openai.com/auth": { chatgpt_account_id: "acc_test" },
|
|
}),
|
|
"utf8",
|
|
).toString("base64");
|
|
const token = `aaa.${payload}.bbb`;
|
|
|
|
const sse = `${[
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.added",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "in_progress",
|
|
content: [],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
|
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.done",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "completed",
|
|
content: [{ type: "output_text", text: "Hello" }],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.completed",
|
|
response: {
|
|
status: "completed",
|
|
usage: {
|
|
input_tokens: 5,
|
|
output_tokens: 3,
|
|
total_tokens: 8,
|
|
input_tokens_details: { cached_tokens: 0 },
|
|
},
|
|
},
|
|
})}`,
|
|
].join("\n\n")}\n\n`;
|
|
|
|
const encoder = new TextEncoder();
|
|
const stream = new ReadableStream<Uint8Array>({
|
|
start(controller) {
|
|
controller.enqueue(encoder.encode(sse));
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const sessionId = "test-session-123";
|
|
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
|
|
const url = typeof input === "string" ? input : input.toString();
|
|
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
|
|
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), {
|
|
status: 200,
|
|
});
|
|
}
|
|
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
|
|
return new Response("PROMPT", {
|
|
status: 200,
|
|
headers: { etag: '"etag"' },
|
|
});
|
|
}
|
|
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
|
const headers =
|
|
init?.headers instanceof Headers ? init.headers : undefined;
|
|
// Verify sessionId is set in headers
|
|
expect(headers?.get("conversation_id")).toBe(sessionId);
|
|
expect(headers?.get("session_id")).toBe(sessionId);
|
|
|
|
// Verify sessionId is set in request body as prompt_cache_key
|
|
const body =
|
|
typeof init?.body === "string"
|
|
? (JSON.parse(init.body) as Record<string, unknown>)
|
|
: null;
|
|
expect(body?.prompt_cache_key).toBe(sessionId);
|
|
expect(body?.prompt_cache_retention).toBe("in-memory");
|
|
|
|
return new Response(stream, {
|
|
status: 200,
|
|
headers: { "content-type": "text/event-stream" },
|
|
});
|
|
}
|
|
return new Response("not found", { status: 404 });
|
|
});
|
|
|
|
global.fetch = fetchMock as typeof fetch;
|
|
|
|
const model: Model<"openai-codex-responses"> = {
|
|
id: "gpt-5.1-codex",
|
|
name: "GPT-5.1 Codex",
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
baseUrl: "https://chatgpt.com/backend-api",
|
|
reasoning: true,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 400000,
|
|
maxTokens: 128000,
|
|
};
|
|
|
|
const context: Context = {
|
|
systemPrompt: "You are a helpful assistant.",
|
|
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
|
|
};
|
|
|
|
const streamResult = streamOpenAICodexResponses(model, context, {
|
|
apiKey: token,
|
|
sessionId,
|
|
});
|
|
await streamResult.result();
|
|
});
|
|
|
|
it.each(["gpt-5.3-codex", "gpt-5.4"])(
|
|
"clamps %s minimal reasoning effort to low",
|
|
async (modelId) => {
|
|
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
|
process.env.PI_CODING_AGENT_DIR = tempDir;
|
|
|
|
const payload = Buffer.from(
|
|
JSON.stringify({
|
|
"https://api.openai.com/auth": { chatgpt_account_id: "acc_test" },
|
|
}),
|
|
"utf8",
|
|
).toString("base64");
|
|
const token = `aaa.${payload}.bbb`;
|
|
|
|
const sse = `${[
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.added",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "in_progress",
|
|
content: [],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
|
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.done",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "completed",
|
|
content: [{ type: "output_text", text: "Hello" }],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.completed",
|
|
response: {
|
|
status: "completed",
|
|
usage: {
|
|
input_tokens: 5,
|
|
output_tokens: 3,
|
|
total_tokens: 8,
|
|
input_tokens_details: { cached_tokens: 0 },
|
|
},
|
|
},
|
|
})}`,
|
|
].join("\n\n")}\n\n`;
|
|
|
|
const encoder = new TextEncoder();
|
|
const stream = new ReadableStream<Uint8Array>({
|
|
start(controller) {
|
|
controller.enqueue(encoder.encode(sse));
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const fetchMock = vi.fn(
|
|
async (input: string | URL, init?: RequestInit) => {
|
|
const url = typeof input === "string" ? input : input.toString();
|
|
if (
|
|
url === "https://api.github.com/repos/openai/codex/releases/latest"
|
|
) {
|
|
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), {
|
|
status: 200,
|
|
});
|
|
}
|
|
if (
|
|
url.startsWith("https://raw.githubusercontent.com/openai/codex/")
|
|
) {
|
|
return new Response("PROMPT", {
|
|
status: 200,
|
|
headers: { etag: '"etag"' },
|
|
});
|
|
}
|
|
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
|
const body =
|
|
typeof init?.body === "string"
|
|
? (JSON.parse(init.body) as Record<string, unknown>)
|
|
: null;
|
|
expect(body?.reasoning).toEqual({ effort: "low", summary: "auto" });
|
|
|
|
return new Response(stream, {
|
|
status: 200,
|
|
headers: { "content-type": "text/event-stream" },
|
|
});
|
|
}
|
|
return new Response("not found", { status: 404 });
|
|
},
|
|
);
|
|
|
|
global.fetch = fetchMock as typeof fetch;
|
|
|
|
const model: Model<"openai-codex-responses"> = {
|
|
id: modelId,
|
|
name: modelId,
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
baseUrl: "https://chatgpt.com/backend-api",
|
|
reasoning: true,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 400000,
|
|
maxTokens: 128000,
|
|
};
|
|
|
|
const context: Context = {
|
|
systemPrompt: "You are a helpful assistant.",
|
|
messages: [
|
|
{ role: "user", content: "Say hello", timestamp: Date.now() },
|
|
],
|
|
};
|
|
|
|
const streamResult = streamOpenAICodexResponses(model, context, {
|
|
apiKey: token,
|
|
reasoningEffort: "minimal",
|
|
});
|
|
await streamResult.result();
|
|
},
|
|
);
|
|
|
|
it("does not set conversation_id/session_id headers when sessionId is not provided", async () => {
|
|
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
|
process.env.PI_CODING_AGENT_DIR = tempDir;
|
|
|
|
const payload = Buffer.from(
|
|
JSON.stringify({
|
|
"https://api.openai.com/auth": { chatgpt_account_id: "acc_test" },
|
|
}),
|
|
"utf8",
|
|
).toString("base64");
|
|
const token = `aaa.${payload}.bbb`;
|
|
|
|
const sse = `${[
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.added",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "in_progress",
|
|
content: [],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
|
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.output_item.done",
|
|
item: {
|
|
type: "message",
|
|
id: "msg_1",
|
|
role: "assistant",
|
|
status: "completed",
|
|
content: [{ type: "output_text", text: "Hello" }],
|
|
},
|
|
})}`,
|
|
`data: ${JSON.stringify({
|
|
type: "response.completed",
|
|
response: {
|
|
status: "completed",
|
|
usage: {
|
|
input_tokens: 5,
|
|
output_tokens: 3,
|
|
total_tokens: 8,
|
|
input_tokens_details: { cached_tokens: 0 },
|
|
},
|
|
},
|
|
})}`,
|
|
].join("\n\n")}\n\n`;
|
|
|
|
const encoder = new TextEncoder();
|
|
const stream = new ReadableStream<Uint8Array>({
|
|
start(controller) {
|
|
controller.enqueue(encoder.encode(sse));
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
|
|
const url = typeof input === "string" ? input : input.toString();
|
|
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
|
|
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), {
|
|
status: 200,
|
|
});
|
|
}
|
|
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
|
|
return new Response("PROMPT", {
|
|
status: 200,
|
|
headers: { etag: '"etag"' },
|
|
});
|
|
}
|
|
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
|
const headers =
|
|
init?.headers instanceof Headers ? init.headers : undefined;
|
|
// Verify headers are not set when sessionId is not provided
|
|
expect(headers?.has("conversation_id")).toBe(false);
|
|
expect(headers?.has("session_id")).toBe(false);
|
|
|
|
return new Response(stream, {
|
|
status: 200,
|
|
headers: { "content-type": "text/event-stream" },
|
|
});
|
|
}
|
|
return new Response("not found", { status: 404 });
|
|
});
|
|
|
|
global.fetch = fetchMock as typeof fetch;
|
|
|
|
const model: Model<"openai-codex-responses"> = {
|
|
id: "gpt-5.1-codex",
|
|
name: "GPT-5.1 Codex",
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
baseUrl: "https://chatgpt.com/backend-api",
|
|
reasoning: true,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 400000,
|
|
maxTokens: 128000,
|
|
};
|
|
|
|
const context: Context = {
|
|
systemPrompt: "You are a helpful assistant.",
|
|
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
|
|
};
|
|
|
|
// No sessionId provided
|
|
const streamResult = streamOpenAICodexResponses(model, context, {
|
|
apiKey: token,
|
|
});
|
|
await streamResult.result();
|
|
});
|
|
});
|