mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 17:00:59 +00:00
Major changes: - Replace monolithic SessionEvent with reason discriminator with individual event types: session_start, session_before_switch, session_switch, session_before_new, session_new, session_before_branch, session_branch, session_before_compact, session_compact, session_shutdown - Each event has dedicated result type (SessionBeforeSwitchResult, etc.) - HookHandler type now allows bare return statements (void in return type) - HookAPI.on() has proper overloads for each event with correct typing Additional fixes: - AgentSession now always subscribes to agent in constructor (was only subscribing when external subscribe() called, breaking internal handlers) - Standardize on undefined over null throughout codebase - HookUIContext methods return undefined instead of null - SessionManager methods return undefined instead of null - Simplify hook exports to 'export type * from types.js' - Add detailed JSDoc for skipConversationRestore vs cancel - Fix createBranchedSession to rebuild index in persist mode - newSession() now returns the session file path Updated all example hooks, tests, and emission sites to use new event types.
202 lines
7.4 KiB
TypeScript
202 lines
7.4 KiB
TypeScript
import type { Model } from "@mariozechner/pi-ai";
|
|
import { describe, expect, test } from "vitest";
|
|
import { parseModelPattern } from "../src/core/model-resolver.js";
|
|
|
|
// Mock models for testing
|
|
const mockModels: Model<"anthropic-messages">[] = [
|
|
{
|
|
id: "claude-sonnet-4-5",
|
|
name: "Claude Sonnet 4.5",
|
|
api: "anthropic-messages",
|
|
provider: "anthropic",
|
|
baseUrl: "https://api.anthropic.com",
|
|
reasoning: true,
|
|
input: ["text", "image"],
|
|
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
contextWindow: 200000,
|
|
maxTokens: 8192,
|
|
},
|
|
{
|
|
id: "gpt-4o",
|
|
name: "GPT-4o",
|
|
api: "anthropic-messages", // Using same type for simplicity
|
|
provider: "openai",
|
|
baseUrl: "https://api.openai.com",
|
|
reasoning: false,
|
|
input: ["text", "image"],
|
|
cost: { input: 5, output: 15, cacheRead: 0.5, cacheWrite: 5 },
|
|
contextWindow: 128000,
|
|
maxTokens: 4096,
|
|
},
|
|
];
|
|
|
|
// Mock OpenRouter models with colons in IDs
|
|
const mockOpenRouterModels: Model<"anthropic-messages">[] = [
|
|
{
|
|
id: "qwen/qwen3-coder:exacto",
|
|
name: "Qwen3 Coder Exacto",
|
|
api: "anthropic-messages",
|
|
provider: "openrouter",
|
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
reasoning: true,
|
|
input: ["text"],
|
|
cost: { input: 1, output: 2, cacheRead: 0.1, cacheWrite: 1 },
|
|
contextWindow: 128000,
|
|
maxTokens: 8192,
|
|
},
|
|
{
|
|
id: "openai/gpt-4o:extended",
|
|
name: "GPT-4o Extended",
|
|
api: "anthropic-messages",
|
|
provider: "openrouter",
|
|
baseUrl: "https://openrouter.ai/api/v1",
|
|
reasoning: false,
|
|
input: ["text", "image"],
|
|
cost: { input: 5, output: 15, cacheRead: 0.5, cacheWrite: 5 },
|
|
contextWindow: 128000,
|
|
maxTokens: 4096,
|
|
},
|
|
];
|
|
|
|
const allModels = [...mockModels, ...mockOpenRouterModels];
|
|
|
|
describe("parseModelPattern", () => {
|
|
describe("simple patterns without colons", () => {
|
|
test("exact match returns model with off thinking level", () => {
|
|
const result = parseModelPattern("claude-sonnet-4-5", allModels);
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("partial match returns best model", () => {
|
|
const result = parseModelPattern("sonnet", allModels);
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("no match returns null model", () => {
|
|
const result = parseModelPattern("nonexistent", allModels);
|
|
expect(result.model).toBeUndefined();
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe("patterns with valid thinking levels", () => {
|
|
test("sonnet:high returns sonnet with high thinking level", () => {
|
|
const result = parseModelPattern("sonnet:high", allModels);
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.thinkingLevel).toBe("high");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("gpt-4o:medium returns gpt-4o with medium thinking level", () => {
|
|
const result = parseModelPattern("gpt-4o:medium", allModels);
|
|
expect(result.model?.id).toBe("gpt-4o");
|
|
expect(result.thinkingLevel).toBe("medium");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("all valid thinking levels work", () => {
|
|
for (const level of ["off", "minimal", "low", "medium", "high", "xhigh"]) {
|
|
const result = parseModelPattern(`sonnet:${level}`, allModels);
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.thinkingLevel).toBe(level);
|
|
expect(result.warning).toBeUndefined();
|
|
}
|
|
});
|
|
});
|
|
|
|
describe("patterns with invalid thinking levels", () => {
|
|
test("sonnet:random returns sonnet with off and warning", () => {
|
|
const result = parseModelPattern("sonnet:random", allModels);
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toContain("Invalid thinking level");
|
|
expect(result.warning).toContain("random");
|
|
});
|
|
|
|
test("gpt-4o:invalid returns gpt-4o with off and warning", () => {
|
|
const result = parseModelPattern("gpt-4o:invalid", allModels);
|
|
expect(result.model?.id).toBe("gpt-4o");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toContain("Invalid thinking level");
|
|
});
|
|
});
|
|
|
|
describe("OpenRouter models with colons in IDs", () => {
|
|
test("qwen3-coder:exacto matches the model with off", () => {
|
|
const result = parseModelPattern("qwen/qwen3-coder:exacto", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("openrouter/qwen/qwen3-coder:exacto matches with provider prefix", () => {
|
|
const result = parseModelPattern("openrouter/qwen/qwen3-coder:exacto", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.model?.provider).toBe("openrouter");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("qwen3-coder:exacto:high matches model with high thinking level", () => {
|
|
const result = parseModelPattern("qwen/qwen3-coder:exacto:high", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.thinkingLevel).toBe("high");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("openrouter/qwen/qwen3-coder:exacto:high matches with provider and thinking level", () => {
|
|
const result = parseModelPattern("openrouter/qwen/qwen3-coder:exacto:high", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.model?.provider).toBe("openrouter");
|
|
expect(result.thinkingLevel).toBe("high");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
|
|
test("gpt-4o:extended matches the extended model", () => {
|
|
const result = parseModelPattern("openai/gpt-4o:extended", allModels);
|
|
expect(result.model?.id).toBe("openai/gpt-4o:extended");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe("invalid thinking levels with OpenRouter models", () => {
|
|
test("qwen3-coder:exacto:random returns model with off and warning", () => {
|
|
const result = parseModelPattern("qwen/qwen3-coder:exacto:random", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toContain("Invalid thinking level");
|
|
expect(result.warning).toContain("random");
|
|
});
|
|
|
|
test("qwen3-coder:exacto:high:random returns model with off and warning", () => {
|
|
const result = parseModelPattern("qwen/qwen3-coder:exacto:high:random", allModels);
|
|
expect(result.model?.id).toBe("qwen/qwen3-coder:exacto");
|
|
expect(result.thinkingLevel).toBe("off");
|
|
expect(result.warning).toContain("Invalid thinking level");
|
|
expect(result.warning).toContain("random");
|
|
});
|
|
});
|
|
|
|
describe("edge cases", () => {
|
|
test("empty pattern matches via partial matching", () => {
|
|
// Empty string is included in all model IDs, so partial matching finds a match
|
|
const result = parseModelPattern("", allModels);
|
|
expect(result.model).not.toBeNull();
|
|
expect(result.thinkingLevel).toBe("off");
|
|
});
|
|
|
|
test("pattern ending with colon treats empty suffix as invalid", () => {
|
|
const result = parseModelPattern("sonnet:", allModels);
|
|
// Empty string after colon is not a valid thinking level
|
|
// So it tries to match "sonnet:" which won't match, then tries "sonnet"
|
|
expect(result.model?.id).toBe("claude-sonnet-4-5");
|
|
expect(result.warning).toContain("Invalid thinking level");
|
|
});
|
|
});
|
|
});
|