Add xhigh thinking level for OpenAI codex-max models

- Add 'xhigh' to ThinkingLevel type in ai and agent packages
- Map xhigh to reasoning_effort: 'max' for OpenAI providers
- Add thinkingXhigh color token to theme schema and built-in themes
- Show xhigh option only when using codex-max models
- Update CHANGELOG for both ai and coding-agent packages

closes #143
This commit is contained in:
Mario Zechner 2025-12-08 21:12:54 +01:00
parent 87a1a9ded4
commit 00370cab39
19 changed files with 300 additions and 54 deletions

79
package-lock.json generated
View file

@ -45,12 +45,32 @@
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.61.0",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.61.0.tgz",
"integrity": "sha512-GnlOXrPxow0uoaVB3DGNh9EJBU1MyagCBCLpU+bwDVlj/oOPYIwoiasMWlykkfYcQOrDP2x/zHnRD0xN7PeZPw==",
"version": "0.71.2",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.71.2.tgz",
"integrity": "sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ==",
"license": "MIT",
"dependencies": {
"json-schema-to-ts": "^3.1.1"
},
"bin": {
"anthropic-ai-sdk": "bin/cli"
},
"peerDependencies": {
"zod": "^3.25.0 || ^4.0.0"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
},
"node_modules/@babel/runtime": {
"version": "7.28.4",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
"integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@biomejs/biome": {
@ -3879,6 +3899,19 @@
"bignumber.js": "^9.0.0"
}
},
"node_modules/json-schema-to-ts": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-3.1.1.tgz",
"integrity": "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.18.3",
"ts-algebra": "^2.0.0"
},
"engines": {
"node": ">=16"
}
},
"node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
@ -4550,16 +4583,16 @@
}
},
"node_modules/openai": {
"version": "5.21.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-5.21.0.tgz",
"integrity": "sha512-E9LuV51vgvwbahPJaZu2x4V6SWMq9g3X6Bj2/wnFiNfV7lmAxYVxPxcQNZqCWbAVMaEoers9HzIxpOp6Vvgn8w==",
"version": "6.10.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-6.10.0.tgz",
"integrity": "sha512-ITxOGo7rO3XRMiKA5l7tQ43iNNu+iXGFAcf2t+aWVzzqRaS0i7m1K2BhxNdaveB+5eENhO0VY1FkiZzhBk4v3A==",
"license": "Apache-2.0",
"bin": {
"openai": "bin/cli"
},
"peerDependencies": {
"ws": "^8.18.0",
"zod": "^3.23.8"
"zod": "^3.25 || ^4.0"
},
"peerDependenciesMeta": {
"ws": {
@ -5465,6 +5498,12 @@
"tree-kill": "cli.js"
}
},
"node_modules/ts-algebra": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-2.0.0.tgz",
"integrity": "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==",
"license": "MIT"
},
"node_modules/tslib": {
"version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
@ -6462,8 +6501,8 @@
"version": "0.13.2",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-ai": "^0.13.1",
"@mariozechner/pi-tui": "^0.13.1"
"@mariozechner/pi-ai": "^0.13.2",
"@mariozechner/pi-tui": "^0.13.2"
},
"devDependencies": {
"@types/node": "^24.3.0",
@ -6496,13 +6535,13 @@
"version": "0.13.2",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.61.0",
"@google/genai": "^1.30.0",
"@anthropic-ai/sdk": "0.71.2",
"@google/genai": "1.31.0",
"@sinclair/typebox": "^0.34.41",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"chalk": "^5.6.2",
"openai": "5.21.0",
"openai": "6.10.0",
"partial-json": "^0.1.7",
"zod-to-json-schema": "^3.24.6"
},
@ -6537,9 +6576,9 @@
"version": "0.13.2",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent-core": "^0.13.1",
"@mariozechner/pi-ai": "^0.13.1",
"@mariozechner/pi-tui": "^0.13.1",
"@mariozechner/pi-agent-core": "^0.13.2",
"@mariozechner/pi-ai": "^0.13.2",
"@mariozechner/pi-tui": "^0.13.2",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"
@ -6580,8 +6619,8 @@
"license": "MIT",
"dependencies": {
"@anthropic-ai/sandbox-runtime": "^0.0.16",
"@mariozechner/pi-agent-core": "^0.13.1",
"@mariozechner/pi-ai": "^0.13.1",
"@mariozechner/pi-agent-core": "^0.13.2",
"@mariozechner/pi-ai": "^0.13.2",
"@sinclair/typebox": "^0.34.0",
"@slack/socket-mode": "^2.0.0",
"@slack/web-api": "^7.0.0",
@ -6622,7 +6661,7 @@
"version": "0.13.2",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent-core": "^0.13.1",
"@mariozechner/pi-agent-core": "^0.13.2",
"chalk": "^5.5.0"
},
"bin": {
@ -6699,8 +6738,8 @@
"license": "MIT",
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.13.1",
"@mariozechner/pi-tui": "^0.13.1",
"@mariozechner/pi-ai": "^0.13.2",
"@mariozechner/pi-tui": "^0.13.2",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",

View file

@ -7,7 +7,7 @@ export interface AgentRunConfig {
systemPrompt: string;
tools: AgentTool<any>[];
model: Model<any>;
reasoning?: "low" | "medium" | "high";
reasoning?: "low" | "medium" | "high" | "xhigh";
getQueuedMessages?: <T>() => Promise<QueuedMessage<T>[]>;
}

View file

@ -24,8 +24,9 @@ export interface Attachment {
/**
* Thinking/reasoning level for models that support it.
* Note: "xhigh" is only supported by OpenAI codex-max models.
*/
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high";
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
/**
* User message with optional attachments.

View file

@ -12,6 +12,12 @@
- **OpenAI compatibility overrides**: Added `compat` field to `Model` for `openai-completions` API, allowing explicit configuration of provider quirks (`supportsStore`, `supportsDeveloperRole`, `supportsReasoningEffort`, `maxTokensField`). Falls back to URL-based detection if not set. Useful for LiteLLM, custom proxies, and other non-standard endpoints. ([#133](https://github.com/badlogic/pi-mono/issues/133), thanks @fink-andreas for the initial idea and PR)
- **xhigh reasoning level**: Added `xhigh` to `ReasoningEffort` type for OpenAI codex-max models. For non-OpenAI providers (Anthropic, Google), `xhigh` is automatically mapped to `high`. ([#143](https://github.com/badlogic/pi-mono/issues/143))
### Changed
- **Updated SDK versions**: OpenAI SDK 5.21.0 → 6.10.0, Anthropic SDK 0.61.0 → 0.71.2, Google GenAI SDK 1.30.0 → 1.31.0
## [0.13.0] - 2025-12-06
### Breaking Changes

View file

@ -387,7 +387,7 @@ if (model.reasoning) {
const response = await completeSimple(model, {
messages: [{ role: 'user', content: 'Solve: 2x + 5 = 13' }]
}, {
reasoning: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
reasoning: 'medium' // 'minimal' | 'low' | 'medium' | 'high' | 'xhigh' (xhigh maps to high on non-OpenAI providers)
});
// Access thinking and text blocks

View file

@ -20,13 +20,13 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.61.0",
"@google/genai": "^1.30.0",
"@anthropic-ai/sdk": "0.71.2",
"@google/genai": "1.31.0",
"@sinclair/typebox": "^0.34.41",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"chalk": "^5.6.2",
"openai": "5.21.0",
"openai": "6.10.0",
"partial-json": "^0.1.7",
"zod-to-json-schema": "^3.24.6"
},

View file

@ -29,7 +29,7 @@ import { transformMessages } from "./transorm-messages.js";
export interface OpenAICompletionsOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } };
reasoningEffort?: "minimal" | "low" | "medium" | "high";
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
}
export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (

View file

@ -32,7 +32,7 @@ import { transformMessages } from "./transorm-messages.js";
// OpenAI Responses-specific options
export interface OpenAIResponsesOptions extends StreamOptions {
reasoningEffort?: "minimal" | "low" | "medium" | "high";
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
reasoningSummary?: "auto" | "detailed" | "concise" | null;
}
@ -158,7 +158,10 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
else if (event.type === "response.content_part.added") {
if (currentItem && currentItem.type === "message") {
currentItem.content = currentItem.content || [];
currentItem.content.push(event.part);
// Filter out ReasoningText, only accept output_text and refusal
if (event.part.type === "output_text" || event.part.type === "refusal") {
currentItem.content.push(event.part);
}
}
} else if (event.type === "response.output_text.delta") {
if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {

View file

@ -122,6 +122,9 @@ function mapOptionsForApi<TApi extends Api>(
apiKey: apiKey || options?.apiKey,
};
// Helper to clamp xhigh to high for providers that don't support it
const clampReasoning = (effort: ReasoningEffort | undefined) => (effort === "xhigh" ? "high" : effort);
switch (model.api) {
case "anthropic-messages": {
if (!options?.reasoning) return base satisfies AnthropicOptions;
@ -136,7 +139,7 @@ function mapOptionsForApi<TApi extends Api>(
return {
...base,
thinkingEnabled: true,
thinkingBudgetTokens: anthropicBudgets[options.reasoning],
thinkingBudgetTokens: anthropicBudgets[clampReasoning(options.reasoning)!],
} satisfies AnthropicOptions;
}
@ -155,7 +158,10 @@ function mapOptionsForApi<TApi extends Api>(
case "google-generative-ai": {
if (!options?.reasoning) return base as any;
const googleBudget = getGoogleBudget(model as Model<"google-generative-ai">, options.reasoning);
const googleBudget = getGoogleBudget(
model as Model<"google-generative-ai">,
clampReasoning(options.reasoning)!,
);
return {
...base,
thinking: {
@ -173,10 +179,12 @@ function mapOptionsForApi<TApi extends Api>(
}
}
function getGoogleBudget(model: Model<"google-generative-ai">, effort: ReasoningEffort): number {
type ClampedReasoningEffort = Exclude<ReasoningEffort, "xhigh">;
function getGoogleBudget(model: Model<"google-generative-ai">, effort: ClampedReasoningEffort): number {
// See https://ai.google.dev/gemini-api/docs/thinking#set-budget
if (model.id.includes("2.5-pro")) {
const budgets = {
const budgets: Record<ClampedReasoningEffort, number> = {
minimal: 128,
low: 2048,
medium: 8192,
@ -187,7 +195,7 @@ function getGoogleBudget(model: Model<"google-generative-ai">, effort: Reasoning
if (model.id.includes("2.5-flash")) {
// Covers 2.5-flash-lite as well
const budgets = {
const budgets: Record<ClampedReasoningEffort, number> = {
minimal: 128,
low: 2048,
medium: 8192,

View file

@ -29,7 +29,7 @@ export type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];
export type KnownProvider = "anthropic" | "google" | "openai" | "xai" | "groq" | "cerebras" | "openrouter" | "zai";
export type Provider = KnownProvider | string;
export type ReasoningEffort = "minimal" | "low" | "medium" | "high";
export type ReasoningEffort = "minimal" | "low" | "medium" | "high" | "xhigh";
// Base options all providers share
export interface StreamOptions {

View file

@ -0,0 +1,69 @@
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { stream } from "../src/stream.js";
import type { Context, Model } from "../src/types.js";
function makeContext(): Context {
return {
messages: [
{
role: "user",
content: `What is ${(Math.random() * 100) | 0} + ${(Math.random() * 100) | 0}? Think step by step.`,
timestamp: Date.now(),
},
],
};
}
describe.skipIf(!process.env.OPENAI_API_KEY)("xhigh reasoning", () => {
describe("codex-max (supports xhigh)", () => {
// Note: codex models only support the responses API, not chat completions
it("should work with openai-responses", async () => {
const model = getModel("openai", "gpt-5.1-codex-max");
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
let hasThinking = false;
for await (const event of s) {
if (event.type === "thinking_start" || event.type === "thinking_delta") {
hasThinking = true;
}
}
const response = await s.result();
expect(response.stopReason, `Error: ${response.errorMessage}`).toBe("stop");
expect(response.content.some((b) => b.type === "text")).toBe(true);
expect(hasThinking || response.content.some((b) => b.type === "thinking")).toBe(true);
});
});
describe("gpt-5-mini (does not support xhigh)", () => {
it("should error with openai-responses when using xhigh", async () => {
const model = getModel("openai", "gpt-5-mini");
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
for await (const _ of s) {
// drain events
}
const response = await s.result();
expect(response.stopReason).toBe("error");
expect(response.errorMessage).toContain("xhigh");
});
it("should error with openai-completions when using xhigh", async () => {
const model: Model<"openai-completions"> = {
...getModel("openai", "gpt-5-mini"),
api: "openai-completions",
};
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
for await (const _ of s) {
// drain events
}
const response = await s.result();
expect(response.stopReason).toBe("error");
expect(response.errorMessage).toContain("xhigh");
});
});
});

View file

@ -2,10 +2,18 @@
## [Unreleased]
### Breaking Changes
- **Custom themes require new color tokens**: Themes must now include `thinkingXhigh` and `bashMode` color tokens. The theme loader provides helpful error messages listing missing tokens. See built-in themes (dark.json, light.json) for reference values.
### Added
- **OpenAI compatibility overrides in models.json**: Custom models using `openai-completions` API can now specify a `compat` object to override provider quirks (`supportsStore`, `supportsDeveloperRole`, `supportsReasoningEffort`, `maxTokensField`). Useful for LiteLLM, custom proxies, and other non-standard endpoints. ([#133](https://github.com/badlogic/pi-mono/issues/133), thanks @fink-andreas for the initial idea and PR)
- **xhigh thinking level**: Added `xhigh` thinking level for OpenAI codex-max models. Cycle through thinking levels with Shift+Tab; `xhigh` appears only when using a codex-max model. ([#143](https://github.com/badlogic/pi-mono/issues/143))
- **Collapse changelog setting**: Add `"collapseChangelog": true` to `~/.pi/agent/settings.json` to show a condensed "Updated to vX.Y.Z" message instead of the full changelog after updates. Use `/changelog` to view the full changelog. ([#148](https://github.com/badlogic/pi-mono/issues/148))
## [0.13.2] - 2025-12-07
### Changed

View file

@ -112,12 +112,19 @@ function parseArgs(args: string[]): Args {
result.tools = validTools;
} else if (arg === "--thinking" && i + 1 < args.length) {
const level = args[++i];
if (level === "off" || level === "minimal" || level === "low" || level === "medium" || level === "high") {
if (
level === "off" ||
level === "minimal" ||
level === "low" ||
level === "medium" ||
level === "high" ||
level === "xhigh"
) {
result.thinking = level;
} else {
console.error(
chalk.yellow(
`Warning: Invalid thinking level "${level}". Valid values: off, minimal, low, medium, high`,
`Warning: Invalid thinking level "${level}". Valid values: off, minimal, low, medium, high, xhigh`,
),
);
}
@ -248,7 +255,7 @@ ${chalk.bold("Options:")}
--models <patterns> Comma-separated model patterns for quick cycling with Ctrl+P
--tools <tools> Comma-separated list of tools to enable (default: read,bash,edit,write)
Available: read, bash, edit, write, grep, find, ls
--thinking <level> Set thinking level: off, minimal, low, medium, high
--thinking <level> Set thinking level: off, minimal, low, medium, high, xhigh
--export <file> Export session file to HTML and exit
--help, -h Show this help
@ -593,7 +600,14 @@ async function resolveModelScope(
if (parts.length > 1) {
const level = parts[1];
if (level === "off" || level === "minimal" || level === "low" || level === "medium" || level === "high") {
if (
level === "off" ||
level === "minimal" ||
level === "low" ||
level === "medium" ||
level === "high" ||
level === "xhigh"
) {
thinkingLevel = level;
} else {
console.warn(
@ -716,6 +730,7 @@ async function runInteractiveMode(
settingsManager: SettingsManager,
version: string,
changelogMarkdown: string | null = null,
collapseChangelog = false,
modelFallbackMessage: string | null = null,
versionCheckPromise: Promise<string | null>,
scopedModels: Array<{ model: Model<Api>; thinkingLevel: ThinkingLevel }> = [],
@ -730,6 +745,7 @@ async function runInteractiveMode(
settingsManager,
version,
changelogMarkdown,
collapseChangelog,
scopedModels,
fdPath,
);
@ -1385,12 +1401,14 @@ export async function main(args: string[]) {
const fdPath = await ensureTool("fd");
// Interactive mode - use TUI (may have initial messages from CLI args)
const collapseChangelog = settingsManager.getCollapseChangelog();
await runInteractiveMode(
agent,
sessionManager,
settingsManager,
VERSION,
changelogMarkdown,
collapseChangelog,
modelFallbackMessage,
versionCheckPromise,
scopedModels,

View file

@ -12,12 +12,13 @@ export interface Settings {
lastChangelogVersion?: string;
defaultProvider?: string;
defaultModel?: string;
defaultThinkingLevel?: "off" | "minimal" | "low" | "medium" | "high";
defaultThinkingLevel?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
queueMode?: "all" | "one-at-a-time";
theme?: string;
compaction?: CompactionSettings;
hideThinkingBlock?: boolean;
shellPath?: string; // Custom shell path (e.g., for Cygwin users on Windows)
collapseChangelog?: boolean; // Show condensed changelog after update (use /changelog for full)
}
export class SettingsManager {
@ -109,11 +110,11 @@ export class SettingsManager {
this.save();
}
getDefaultThinkingLevel(): "off" | "minimal" | "low" | "medium" | "high" | undefined {
getDefaultThinkingLevel(): "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined {
return this.settings.defaultThinkingLevel;
}
setDefaultThinkingLevel(level: "off" | "minimal" | "low" | "medium" | "high"): void {
setDefaultThinkingLevel(level: "off" | "minimal" | "low" | "medium" | "high" | "xhigh"): void {
this.settings.defaultThinkingLevel = level;
this.save();
}
@ -163,4 +164,13 @@ export class SettingsManager {
this.settings.shellPath = path;
this.save();
}
getCollapseChangelog(): boolean {
return this.settings.collapseChangelog ?? false;
}
setCollapseChangelog(collapse: boolean): void {
this.settings.collapseChangelog = collapse;
this.save();
}
}

View file

@ -65,6 +65,9 @@
"thinkingMinimal": "#6e6e6e",
"thinkingLow": "#5f87af",
"thinkingMedium": "#81a2be",
"thinkingHigh": "#b294bb"
"thinkingHigh": "#b294bb",
"thinkingXhigh": "#d183e8",
"bashMode": "green"
}
}

View file

@ -64,6 +64,9 @@
"thinkingMinimal": "#9e9e9e",
"thinkingLow": "#5f87af",
"thinkingMedium": "#5f8787",
"thinkingHigh": "#875f87"
"thinkingHigh": "#875f87",
"thinkingXhigh": "#8b008b",
"bashMode": "green"
}
}

View file

@ -221,6 +221,34 @@
"syntaxPunctuation": {
"$ref": "#/$defs/colorValue",
"description": "Syntax highlighting: punctuation"
},
"thinkingOff": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: off"
},
"thinkingMinimal": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: minimal"
},
"thinkingLow": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: low"
},
"thinkingMedium": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: medium"
},
"thinkingHigh": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: high"
},
"thinkingXhigh": {
"$ref": "#/$defs/colorValue",
"description": "Thinking level border: xhigh (OpenAI codex-max only)"
},
"bashMode": {
"$ref": "#/$defs/colorValue",
"description": "Editor border color in bash mode"
}
},
"additionalProperties": false

View file

@ -66,12 +66,15 @@ const ThemeJsonSchema = Type.Object({
syntaxType: ColorValueSchema,
syntaxOperator: ColorValueSchema,
syntaxPunctuation: ColorValueSchema,
// Thinking Level Borders (5 colors)
// Thinking Level Borders (6 colors)
thinkingOff: ColorValueSchema,
thinkingMinimal: ColorValueSchema,
thinkingLow: ColorValueSchema,
thinkingMedium: ColorValueSchema,
thinkingHigh: ColorValueSchema,
thinkingXhigh: ColorValueSchema,
// Bash Mode (1 color)
bashMode: ColorValueSchema,
}),
});
@ -119,7 +122,9 @@ export type ThemeColor =
| "thinkingMinimal"
| "thinkingLow"
| "thinkingMedium"
| "thinkingHigh";
| "thinkingHigh"
| "thinkingXhigh"
| "bashMode";
export type ThemeBg = "userMessageBg" | "toolPendingBg" | "toolSuccessBg" | "toolErrorBg";
@ -295,7 +300,7 @@ export class Theme {
return this.mode;
}
getThinkingBorderColor(level: "off" | "minimal" | "low" | "medium" | "high"): (str: string) => string {
getThinkingBorderColor(level: "off" | "minimal" | "low" | "medium" | "high" | "xhigh"): (str: string) => string {
// Map thinking levels to dedicated theme colors
switch (level) {
case "off":
@ -308,10 +313,16 @@ export class Theme {
return (str: string) => this.fg("thinkingMedium", str);
case "high":
return (str: string) => this.fg("thinkingHigh", str);
case "xhigh":
return (str: string) => this.fg("thinkingXhigh", str);
default:
return (str: string) => this.fg("thinkingOff", str);
}
}
getBashModeBorderColor(): (str: string) => string {
return (str: string) => this.fg("bashMode", str);
}
}
// ============================================================================
@ -366,8 +377,31 @@ function loadThemeJson(name: string): ThemeJson {
}
if (!validateThemeJson.Check(json)) {
const errors = Array.from(validateThemeJson.Errors(json));
const errorMessages = errors.map((e) => ` - ${e.path}: ${e.message}`).join("\n");
throw new Error(`Invalid theme ${name}:\n${errorMessages}`);
const missingColors: string[] = [];
const otherErrors: string[] = [];
for (const e of errors) {
// Check for missing required color properties
const match = e.path.match(/^\/colors\/(\w+)$/);
if (match && e.message.includes("Required")) {
missingColors.push(match[1]);
} else {
otherErrors.push(` - ${e.path}: ${e.message}`);
}
}
let errorMessage = `Invalid theme "${name}":\n`;
if (missingColors.length > 0) {
errorMessage += `\nMissing required color tokens:\n`;
errorMessage += missingColors.map((c) => ` - ${c}`).join("\n");
errorMessage += `\n\nPlease add these colors to your theme's "colors" object.`;
errorMessage += `\nSee the built-in themes (dark.json, light.json) for reference values.`;
}
if (otherErrors.length > 0) {
errorMessage += `\n\nOther errors:\n${otherErrors.join("\n")}`;
}
throw new Error(errorMessage);
}
return json as ThemeJson;
}

View file

@ -71,6 +71,7 @@ export class TuiRenderer {
private lastSigintTime = 0;
private lastEscapeTime = 0;
private changelogMarkdown: string | null = null;
private collapseChangelog = false;
// Message queueing
private queuedMessages: string[] = [];
@ -126,6 +127,7 @@ export class TuiRenderer {
settingsManager: SettingsManager,
version: string,
changelogMarkdown: string | null = null,
collapseChangelog = false,
scopedModels: Array<{ model: Model<any>; thinkingLevel: ThinkingLevel }> = [],
fdPath: string | null = null,
) {
@ -134,6 +136,7 @@ export class TuiRenderer {
this.settingsManager = settingsManager;
this.version = version;
this.changelogMarkdown = changelogMarkdown;
this.collapseChangelog = collapseChangelog;
this.scopedModels = scopedModels;
this.ui = new TUI(new ProcessTerminal());
this.chatContainer = new Container();
@ -304,10 +307,18 @@ export class TuiRenderer {
// Add changelog if provided
if (this.changelogMarkdown) {
this.ui.addChild(new DynamicBorder());
this.ui.addChild(new Text(theme.bold(theme.fg("accent", "What's New")), 1, 0));
this.ui.addChild(new Spacer(1));
this.ui.addChild(new Markdown(this.changelogMarkdown.trim(), 1, 0, getMarkdownTheme()));
this.ui.addChild(new Spacer(1));
if (this.collapseChangelog) {
// Show condensed version with hint to use /changelog
const versionMatch = this.changelogMarkdown.match(/##\s+\[?(\d+\.\d+\.\d+)\]?/);
const latestVersion = versionMatch ? versionMatch[1] : this.version;
const condensedText = `Updated to v${latestVersion}. Use ${theme.bold("/changelog")} to view full changelog.`;
this.ui.addChild(new Text(condensedText, 1, 0));
} else {
this.ui.addChild(new Text(theme.bold(theme.fg("accent", "What's New")), 1, 0));
this.ui.addChild(new Spacer(1));
this.ui.addChild(new Markdown(this.changelogMarkdown.trim(), 1, 0, getMarkdownTheme()));
this.ui.addChild(new Spacer(1));
}
this.ui.addChild(new DynamicBorder());
}
@ -1019,7 +1030,12 @@ export class TuiRenderer {
return;
}
const levels: ThinkingLevel[] = ["off", "minimal", "low", "medium", "high"];
// xhigh is only available for codex-max models
const modelId = this.agent.state.model?.id || "";
const supportsXhigh = modelId.includes("codex-max");
const levels: ThinkingLevel[] = supportsXhigh
? ["off", "minimal", "low", "medium", "high", "xhigh"]
: ["off", "minimal", "low", "medium", "high"];
const currentLevel = this.agent.state.thinkingLevel || "off";
const currentIndex = levels.indexOf(currentLevel);
const nextIndex = (currentIndex + 1) % levels.length;