mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 06:04:51 +00:00
feat(ai,agent,coding-agent): add sessionId for provider session-based caching
- Add sessionId to StreamOptions for providers that support session-based caching - OpenAI Codex provider uses sessionId for prompt_cache_key and routing headers - Agent class now accepts and forwards sessionId to stream functions - coding-agent passes session ID from SessionManager and updates on session changes - Update ai package README with table of contents, OpenAI Codex OAuth docs, and env vars table - Increase Codex instructions cache TTL from 15 minutes to 24 hours - Add tests for sessionId forwarding in ai and agent packages
This commit is contained in:
parent
858c6bae8a
commit
edb0da9611
14 changed files with 335 additions and 56 deletions
|
|
@ -129,4 +129,201 @@ describe("openai-codex streaming", () => {
|
|||
expect(sawTextDelta).toBe(true);
|
||||
expect(sawDone).toBe(true);
|
||||
});
|
||||
|
||||
it("sets conversation_id/session_id headers and prompt_cache_key when sessionId is provided", async () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
||||
process.env.PI_CODING_AGENT_DIR = tempDir;
|
||||
|
||||
const payload = Buffer.from(
|
||||
JSON.stringify({ "https://api.openai.com/auth": { chatgpt_account_id: "acc_test" } }),
|
||||
"utf8",
|
||||
).toString("base64");
|
||||
const token = `aaa.${payload}.bbb`;
|
||||
|
||||
const sse = `${[
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.output_item.added",
|
||||
item: { type: "message", id: "msg_1", role: "assistant", status: "in_progress", content: [] },
|
||||
})}`,
|
||||
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
||||
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.output_item.done",
|
||||
item: {
|
||||
type: "message",
|
||||
id: "msg_1",
|
||||
role: "assistant",
|
||||
status: "completed",
|
||||
content: [{ type: "output_text", text: "Hello" }],
|
||||
},
|
||||
})}`,
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.completed",
|
||||
response: {
|
||||
status: "completed",
|
||||
usage: {
|
||||
input_tokens: 5,
|
||||
output_tokens: 3,
|
||||
total_tokens: 8,
|
||||
input_tokens_details: { cached_tokens: 0 },
|
||||
},
|
||||
},
|
||||
})}`,
|
||||
].join("\n\n")}\n\n`;
|
||||
|
||||
const encoder = new TextEncoder();
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(encoder.encode(sse));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
const sessionId = "test-session-123";
|
||||
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
|
||||
const url = typeof input === "string" ? input : input.toString();
|
||||
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
|
||||
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), { status: 200 });
|
||||
}
|
||||
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
|
||||
return new Response("PROMPT", { status: 200, headers: { etag: '"etag"' } });
|
||||
}
|
||||
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
||||
const headers = init?.headers instanceof Headers ? init.headers : undefined;
|
||||
// Verify sessionId is set in headers
|
||||
expect(headers?.get("conversation_id")).toBe(sessionId);
|
||||
expect(headers?.get("session_id")).toBe(sessionId);
|
||||
|
||||
// Verify sessionId is set in request body as prompt_cache_key
|
||||
const body = typeof init?.body === "string" ? (JSON.parse(init.body) as Record<string, unknown>) : null;
|
||||
expect(body?.prompt_cache_key).toBe(sessionId);
|
||||
expect(body?.prompt_cache_retention).toBe("in-memory");
|
||||
|
||||
return new Response(stream, {
|
||||
status: 200,
|
||||
headers: { "content-type": "text/event-stream" },
|
||||
});
|
||||
}
|
||||
return new Response("not found", { status: 404 });
|
||||
});
|
||||
|
||||
global.fetch = fetchMock as typeof fetch;
|
||||
|
||||
const model: Model<"openai-codex-responses"> = {
|
||||
id: "gpt-5.1-codex",
|
||||
name: "GPT-5.1 Codex",
|
||||
api: "openai-codex-responses",
|
||||
provider: "openai-codex",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400000,
|
||||
maxTokens: 128000,
|
||||
};
|
||||
|
||||
const context: Context = {
|
||||
systemPrompt: "You are a helpful assistant.",
|
||||
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
|
||||
};
|
||||
|
||||
const streamResult = streamOpenAICodexResponses(model, context, { apiKey: token, sessionId });
|
||||
await streamResult.result();
|
||||
});
|
||||
|
||||
it("does not set conversation_id/session_id headers when sessionId is not provided", async () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), "pi-codex-stream-"));
|
||||
process.env.PI_CODING_AGENT_DIR = tempDir;
|
||||
|
||||
const payload = Buffer.from(
|
||||
JSON.stringify({ "https://api.openai.com/auth": { chatgpt_account_id: "acc_test" } }),
|
||||
"utf8",
|
||||
).toString("base64");
|
||||
const token = `aaa.${payload}.bbb`;
|
||||
|
||||
const sse = `${[
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.output_item.added",
|
||||
item: { type: "message", id: "msg_1", role: "assistant", status: "in_progress", content: [] },
|
||||
})}`,
|
||||
`data: ${JSON.stringify({ type: "response.content_part.added", part: { type: "output_text", text: "" } })}`,
|
||||
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: "Hello" })}`,
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.output_item.done",
|
||||
item: {
|
||||
type: "message",
|
||||
id: "msg_1",
|
||||
role: "assistant",
|
||||
status: "completed",
|
||||
content: [{ type: "output_text", text: "Hello" }],
|
||||
},
|
||||
})}`,
|
||||
`data: ${JSON.stringify({
|
||||
type: "response.completed",
|
||||
response: {
|
||||
status: "completed",
|
||||
usage: {
|
||||
input_tokens: 5,
|
||||
output_tokens: 3,
|
||||
total_tokens: 8,
|
||||
input_tokens_details: { cached_tokens: 0 },
|
||||
},
|
||||
},
|
||||
})}`,
|
||||
].join("\n\n")}\n\n`;
|
||||
|
||||
const encoder = new TextEncoder();
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(encoder.encode(sse));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
const fetchMock = vi.fn(async (input: string | URL, init?: RequestInit) => {
|
||||
const url = typeof input === "string" ? input : input.toString();
|
||||
if (url === "https://api.github.com/repos/openai/codex/releases/latest") {
|
||||
return new Response(JSON.stringify({ tag_name: "rust-v0.0.0" }), { status: 200 });
|
||||
}
|
||||
if (url.startsWith("https://raw.githubusercontent.com/openai/codex/")) {
|
||||
return new Response("PROMPT", { status: 200, headers: { etag: '"etag"' } });
|
||||
}
|
||||
if (url === "https://chatgpt.com/backend-api/codex/responses") {
|
||||
const headers = init?.headers instanceof Headers ? init.headers : undefined;
|
||||
// Verify headers are not set when sessionId is not provided
|
||||
expect(headers?.has("conversation_id")).toBe(false);
|
||||
expect(headers?.has("session_id")).toBe(false);
|
||||
|
||||
return new Response(stream, {
|
||||
status: 200,
|
||||
headers: { "content-type": "text/event-stream" },
|
||||
});
|
||||
}
|
||||
return new Response("not found", { status: 404 });
|
||||
});
|
||||
|
||||
global.fetch = fetchMock as typeof fetch;
|
||||
|
||||
const model: Model<"openai-codex-responses"> = {
|
||||
id: "gpt-5.1-codex",
|
||||
name: "GPT-5.1 Codex",
|
||||
api: "openai-codex-responses",
|
||||
provider: "openai-codex",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400000,
|
||||
maxTokens: 128000,
|
||||
};
|
||||
|
||||
const context: Context = {
|
||||
systemPrompt: "You are a helpful assistant.",
|
||||
messages: [{ role: "user", content: "Say hello", timestamp: Date.now() }],
|
||||
};
|
||||
|
||||
// No sessionId provided
|
||||
const streamResult = streamOpenAICodexResponses(model, context, { apiKey: token });
|
||||
await streamResult.result();
|
||||
});
|
||||
});
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue