clanker-agent/packages/ai/test/xhigh.test.ts
Harivansh Rathi 0250f72976 move pi-mono into companion-cloud as apps/companion-os
- Copy all pi-mono source into apps/companion-os/
- Update Dockerfile to COPY pre-built binary instead of downloading from GitHub Releases
- Update deploy-staging.yml to build pi from source (bun compile) before Docker build
- Add apps/companion-os/** to path triggers
- No more cross-repo dispatch needed

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 09:22:50 -08:00

81 lines
2.5 KiB
TypeScript

import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { stream } from "../src/stream.js";
import type { Context, Model } from "../src/types.js";
function makeContext(): Context {
return {
messages: [
{
role: "user",
content: `What is ${(Math.random() * 100) | 0} + ${(Math.random() * 100) | 0}? Think step by step.`,
timestamp: Date.now(),
},
],
};
}
describe.skipIf(!process.env.OPENAI_API_KEY)("xhigh reasoning", () => {
describe("codex-max (supports xhigh)", () => {
// Note: codex models only support the responses API, not chat completions
it("should work with openai-responses", async () => {
const model = getModel("openai", "gpt-5.1-codex-max");
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
let hasThinking = false;
for await (const event of s) {
if (
event.type === "thinking_start" ||
event.type === "thinking_delta"
) {
hasThinking = true;
}
}
const response = await s.result();
expect(response.stopReason, `Error: ${response.errorMessage}`).toBe(
"stop",
);
expect(response.content.some((b) => b.type === "text")).toBe(true);
expect(
hasThinking || response.content.some((b) => b.type === "thinking"),
).toBe(true);
});
});
describe("gpt-5-mini (does not support xhigh)", () => {
it("should error with openai-responses when using xhigh", async () => {
const model = getModel("openai", "gpt-5-mini");
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
for await (const _ of s) {
// drain events
}
const response = await s.result();
expect(response.stopReason).toBe("error");
expect(response.errorMessage).toContain("xhigh");
});
it("should error with openai-completions when using xhigh", async () => {
const { compat: _compat, ...baseModel } = getModel(
"openai",
"gpt-5-mini",
);
void _compat;
const model: Model<"openai-completions"> = {
...baseModel,
api: "openai-completions",
};
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
for await (const _ of s) {
// drain events
}
const response = await s.result();
expect(response.stopReason).toBe("error");
expect(response.errorMessage).toContain("xhigh");
});
});
});