co-mono/packages/ai/test/abort.test.ts
Mario Zechner bb50738f7e fix(ai): append system prompt to codex bridge message instead of converting to input
Previously the system prompt was converted to an input message in convertMessages,
then stripped out by filterPiSystemPrompts. Now the system prompt is passed directly
to transformRequestBody and appended after CODEX_PI_BRIDGE in the bridge message.
2026-01-05 06:03:07 +01:00

157 lines
5.1 KiB
TypeScript

import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete, stream } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi } from "../src/types.js";
import { resolveApiKey } from "./oauth.js";
// Resolve OAuth tokens at module level (async, runs before tests)
const [geminiCliToken, openaiCodexToken] = await Promise.all([
resolveApiKey("google-gemini-cli"),
resolveApiKey("openai-codex"),
]);
async function testAbortSignal<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
const context: Context = {
messages: [
{
role: "user",
content: "What is 15 + 27? Think step by step. Then list 50 first names.",
timestamp: Date.now(),
},
],
};
let abortFired = false;
let text = "";
const controller = new AbortController();
const response = await stream(llm, context, { ...options, signal: controller.signal });
for await (const event of response) {
if (abortFired) return;
if (event.type === "text_delta" || event.type === "thinking_delta") {
text += event.delta;
}
if (text.length >= 50) {
controller.abort();
abortFired = true;
}
}
const msg = await response.result();
// If we get here without throwing, the abort didn't work
expect(msg.stopReason).toBe("aborted");
expect(msg.content.length).toBeGreaterThan(0);
context.messages.push(msg);
context.messages.push({
role: "user",
content: "Please continue, but only generate 5 names.",
timestamp: Date.now(),
});
const followUp = await complete(llm, context, options);
expect(followUp.stopReason).toBe("stop");
expect(followUp.content.length).toBeGreaterThan(0);
}
async function testImmediateAbort<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
const controller = new AbortController();
controller.abort();
const context: Context = {
messages: [{ role: "user", content: "Hello", timestamp: Date.now() }],
};
const response = await complete(llm, context, { ...options, signal: controller.signal });
expect(response.stopReason).toBe("aborted");
}
describe("AI Providers Abort Tests", () => {
describe.skipIf(!process.env.GEMINI_API_KEY)("Google Provider Abort", () => {
const llm = getModel("google", "gemini-2.5-flash");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm, { thinking: { enabled: true } });
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm, { thinking: { enabled: true } });
});
});
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider Abort", () => {
const llm: Model<"openai-completions"> = {
...getModel("openai", "gpt-4o-mini")!,
api: "openai-completions",
};
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm);
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm);
});
});
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Responses Provider Abort", () => {
const llm = getModel("openai", "gpt-5-mini");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm);
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm);
});
});
describe.skipIf(!process.env.ANTHROPIC_OAUTH_TOKEN)("Anthropic Provider Abort", () => {
const llm = getModel("anthropic", "claude-opus-4-1-20250805");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
});
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider Abort", () => {
const llm = getModel("mistral", "devstral-medium-latest");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm);
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm);
});
});
// Google Gemini CLI / Antigravity share the same provider, so one test covers both
describe("Google Gemini CLI Provider Abort", () => {
it.skipIf(!geminiCliToken)("should abort mid-stream", { retry: 3 }, async () => {
const llm = getModel("google-gemini-cli", "gemini-2.5-flash");
await testAbortSignal(llm, { apiKey: geminiCliToken });
});
it.skipIf(!geminiCliToken)("should handle immediate abort", { retry: 3 }, async () => {
const llm = getModel("google-gemini-cli", "gemini-2.5-flash");
await testImmediateAbort(llm, { apiKey: geminiCliToken });
});
});
describe("OpenAI Codex Provider Abort", () => {
it.skipIf(!openaiCodexToken)("should abort mid-stream", { retry: 3 }, async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testAbortSignal(llm, { apiKey: openaiCodexToken });
});
it.skipIf(!openaiCodexToken)("should handle immediate abort", { retry: 3 }, async () => {
const llm = getModel("openai-codex", "gpt-5.2-xhigh");
await testImmediateAbort(llm, { apiKey: openaiCodexToken });
});
});
});