From d893ba7f207e8b8a0b2f24558eff199347e8082d Mon Sep 17 00:00:00 2001 From: Zhou Rui Date: Wed, 7 Jan 2026 23:09:20 +0800 Subject: [PATCH 1/8] fix(ai): clean up openai-codex models and token limits --- packages/ai/CHANGELOG.md | 4 + packages/ai/scripts/generate-models.ts | 123 ++----------- packages/ai/src/models.generated.ts | 173 +++--------------- .../src/providers/openai-codex-responses.ts | 5 +- .../providers/openai-codex/prompts/codex.ts | 14 +- .../openai-codex/request-transformer.ts | 160 ++-------------- packages/ai/test/openai-codex.test.ts | 24 ++- 7 files changed, 84 insertions(+), 419 deletions(-) diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index ed94d524..48e5af4f 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -2,6 +2,10 @@ ## [Unreleased] +### Fixed + +- Fixed OpenAI Codex OAuth model list (removed aliases), aligned context window/maxTokens with observed backend limits, and refined reasoning effort clamping. + ## [0.37.8] - 2026-01-07 ## [0.37.7] - 2026-01-07 diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index 4f52d8a2..431c8678 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -443,37 +443,16 @@ async function generateModels() { } // OpenAI Codex (ChatGPT OAuth) models + // NOTE: These are not fetched from models.dev; we keep a small, explicit list to avoid aliases. + // Context window is based on observed server limits (400s above ~272k), not marketing numbers. const CODEX_BASE_URL = "https://chatgpt.com/backend-api"; - const CODEX_CONTEXT = 400000; - const CODEX_MAX_TOKENS = 128000; + const CODEX_CONTEXT = 272000; + // Use the same max output token budget as Codex CLI. + const CODEX_MAX_TOKENS = 10000; const codexModels: Model<"openai-codex-responses">[] = [ { - id: "gpt-5.2-codex", - name: "GPT-5.2 Codex", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5.2", - name: "GPT-5.2", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5.1-codex-max", - name: "GPT-5.1 Codex Max", + id: "gpt-5.1", + name: "GPT-5.1", api: "openai-codex-responses", provider: "openai-codex", baseUrl: CODEX_BASE_URL, @@ -484,8 +463,8 @@ async function generateModels() { maxTokens: CODEX_MAX_TOKENS, }, { - id: "gpt-5.1-codex", - name: "GPT-5.1 Codex", + id: "gpt-5.1-codex-max", + name: "GPT-5.1 Codex Max", api: "openai-codex-responses", provider: "openai-codex", baseUrl: CODEX_BASE_URL, @@ -508,98 +487,26 @@ async function generateModels() { maxTokens: CODEX_MAX_TOKENS, }, { - id: "codex-mini-latest", - name: "Codex Mini Latest", + id: "gpt-5.2", + name: "GPT-5.2", api: "openai-codex-responses", provider: "openai-codex", baseUrl: CODEX_BASE_URL, reasoning: true, input: ["text", "image"], - cost: { input: 1.5, output: 6, cacheRead: 0.375, cacheWrite: 0 }, + cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 }, contextWindow: CODEX_CONTEXT, maxTokens: CODEX_MAX_TOKENS, }, { - id: "gpt-5-codex-mini", - name: "gpt-5-codex-mini", + id: "gpt-5.2-codex", + name: "GPT-5.2 Codex", api: "openai-codex-responses", provider: "openai-codex", baseUrl: CODEX_BASE_URL, reasoning: true, input: ["text", "image"], - cost: { input: 0.25, output: 2, cacheRead: 0.025, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5-codex", - name: "gpt-5-codex", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.25, output: 10, cacheRead: 0.125, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5.1", - name: "GPT-5.1", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.25, output: 10, cacheRead: 0.125, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5.1-chat-latest", - name: "gpt-5.1-chat-latest", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.25, output: 10, cacheRead: 0.125, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5", - name: "gpt-5", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 1.25, output: 10, cacheRead: 0.125, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5-mini", - name: "gpt-5-mini", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 0.25, output: 2, cacheRead: 0.025, cacheWrite: 0 }, - contextWindow: CODEX_CONTEXT, - maxTokens: CODEX_MAX_TOKENS, - }, - { - id: "gpt-5-nano", - name: "gpt-5-nano", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: CODEX_BASE_URL, - reasoning: true, - input: ["text", "image"], - cost: { input: 0.05, output: 0.4, cacheRead: 0.005, cacheWrite: 0 }, + cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 }, contextWindow: CODEX_CONTEXT, maxTokens: CODEX_MAX_TOKENS, }, diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 82bebbc7..4e86ee0d 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -415,6 +415,23 @@ export const MODELS = { contextWindow: 131072, maxTokens: 40960, } satisfies Model<"openai-completions">, + "zai-glm-4.7": { + id: "zai-glm-4.7", + name: "Z.AI GLM-4.7", + api: "openai-completions", + provider: "cerebras", + baseUrl: "https://api.cerebras.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 40000, + } satisfies Model<"openai-completions">, }, "github-copilot": { "claude-haiku-4.5": { @@ -2774,108 +2791,6 @@ export const MODELS = { } satisfies Model<"openai-responses">, }, "openai-codex": { - "codex-mini-latest": { - id: "codex-mini-latest", - name: "Codex Mini Latest", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.5, - output: 6, - cacheRead: 0.375, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5": { - id: "gpt-5", - name: "gpt-5", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.25, - output: 10, - cacheRead: 0.125, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5-codex": { - id: "gpt-5-codex", - name: "gpt-5-codex", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.25, - output: 10, - cacheRead: 0.125, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5-codex-mini": { - id: "gpt-5-codex-mini", - name: "gpt-5-codex-mini", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 0.25, - output: 2, - cacheRead: 0.025, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5-mini": { - id: "gpt-5-mini", - name: "gpt-5-mini", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 0.25, - output: 2, - cacheRead: 0.025, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5-nano": { - id: "gpt-5-nano", - name: "gpt-5-nano", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 0.05, - output: 0.4, - cacheRead: 0.005, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, "gpt-5.1": { id: "gpt-5.1", name: "GPT-5.1", @@ -2890,42 +2805,8 @@ export const MODELS = { cacheRead: 0.125, cacheWrite: 0, }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5.1-chat-latest": { - id: "gpt-5.1-chat-latest", - name: "gpt-5.1-chat-latest", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.25, - output: 10, - cacheRead: 0.125, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, - } satisfies Model<"openai-codex-responses">, - "gpt-5.1-codex": { - id: "gpt-5.1-codex", - name: "GPT-5.1 Codex", - api: "openai-codex-responses", - provider: "openai-codex", - baseUrl: "https://chatgpt.com/backend-api", - reasoning: true, - input: ["text", "image"], - cost: { - input: 1.25, - output: 10, - cacheRead: 0.125, - cacheWrite: 0, - }, - contextWindow: 400000, - maxTokens: 128000, + contextWindow: 272000, + maxTokens: 10000, } satisfies Model<"openai-codex-responses">, "gpt-5.1-codex-max": { id: "gpt-5.1-codex-max", @@ -2941,8 +2822,8 @@ export const MODELS = { cacheRead: 0.125, cacheWrite: 0, }, - contextWindow: 400000, - maxTokens: 128000, + contextWindow: 272000, + maxTokens: 10000, } satisfies Model<"openai-codex-responses">, "gpt-5.1-codex-mini": { id: "gpt-5.1-codex-mini", @@ -2958,8 +2839,8 @@ export const MODELS = { cacheRead: 0.025, cacheWrite: 0, }, - contextWindow: 400000, - maxTokens: 128000, + contextWindow: 272000, + maxTokens: 10000, } satisfies Model<"openai-codex-responses">, "gpt-5.2": { id: "gpt-5.2", @@ -2975,8 +2856,8 @@ export const MODELS = { cacheRead: 0.175, cacheWrite: 0, }, - contextWindow: 400000, - maxTokens: 128000, + contextWindow: 272000, + maxTokens: 10000, } satisfies Model<"openai-codex-responses">, "gpt-5.2-codex": { id: "gpt-5.2-codex", @@ -2992,8 +2873,8 @@ export const MODELS = { cacheRead: 0.175, cacheWrite: 0, }, - contextWindow: 400000, - maxTokens: 128000, + contextWindow: 272000, + maxTokens: 10000, } satisfies Model<"openai-codex-responses">, }, "openrouter": { diff --git a/packages/ai/src/providers/openai-codex-responses.ts b/packages/ai/src/providers/openai-codex-responses.ts index 0f74b635..74cb2206 100644 --- a/packages/ai/src/providers/openai-codex-responses.ts +++ b/packages/ai/src/providers/openai-codex-responses.ts @@ -37,7 +37,6 @@ import { buildCodexPiBridge } from "./openai-codex/prompts/pi-codex-bridge.js"; import { buildCodexSystemPrompt } from "./openai-codex/prompts/system-prompt.js"; import { type CodexRequestOptions, - normalizeModel, type RequestBody, transformRequestBody, } from "./openai-codex/request-transformer.js"; @@ -111,8 +110,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses" params.tools = convertTools(context.tools); } - const normalizedModel = normalizeModel(params.model); - const codexInstructions = await getCodexInstructions(normalizedModel); + const codexInstructions = await getCodexInstructions(params.model); const bridgeText = buildCodexPiBridge(context.tools); const systemPrompt = buildCodexSystemPrompt({ codexInstructions, @@ -120,7 +118,6 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses" userSystemPrompt: context.systemPrompt, }); - params.model = normalizedModel; params.instructions = systemPrompt.instructions; const codexOptions: CodexRequestOptions = { diff --git a/packages/ai/src/providers/openai-codex/prompts/codex.ts b/packages/ai/src/providers/openai-codex/prompts/codex.ts index db94e780..869add2e 100644 --- a/packages/ai/src/providers/openai-codex/prompts/codex.ts +++ b/packages/ai/src/providers/openai-codex/prompts/codex.ts @@ -44,17 +44,17 @@ export type CacheMetadata = { url: string; }; -export function getModelFamily(normalizedModel: string): ModelFamily { - if (normalizedModel.includes("gpt-5.2-codex") || normalizedModel.includes("gpt 5.2 codex")) { +export function getModelFamily(model: string): ModelFamily { + if (model.includes("gpt-5.2-codex") || model.includes("gpt 5.2 codex")) { return "gpt-5.2-codex"; } - if (normalizedModel.includes("codex-max")) { + if (model.includes("codex-max")) { return "codex-max"; } - if (normalizedModel.includes("codex") || normalizedModel.startsWith("codex-")) { + if (model.includes("codex") || model.startsWith("codex-")) { return "codex"; } - if (normalizedModel.includes("gpt-5.2")) { + if (model.includes("gpt-5.2")) { return "gpt-5.2"; } return "gpt-5.1"; @@ -96,8 +96,8 @@ async function getLatestReleaseTag(): Promise { throw new Error("Failed to determine latest release tag from GitHub"); } -export async function getCodexInstructions(normalizedModel = "gpt-5.1-codex"): Promise { - const modelFamily = getModelFamily(normalizedModel); +export async function getCodexInstructions(model = "gpt-5.1-codex"): Promise { + const modelFamily = getModelFamily(model); const promptFile = PROMPT_FILES[modelFamily]; const cacheDir = getCacheDir(); const cacheFile = join(cacheDir, CACHE_FILES[modelFamily]); diff --git a/packages/ai/src/providers/openai-codex/request-transformer.ts b/packages/ai/src/providers/openai-codex/request-transformer.ts index 32b21e59..bd3f9926 100644 --- a/packages/ai/src/providers/openai-codex/request-transformer.ts +++ b/packages/ai/src/providers/openai-codex/request-transformer.ts @@ -41,155 +41,26 @@ export interface RequestBody { [key: string]: unknown; } -const MODEL_MAP: Record = { - "gpt-5.1-codex": "gpt-5.1-codex", - "gpt-5.1-codex-low": "gpt-5.1-codex", - "gpt-5.1-codex-medium": "gpt-5.1-codex", - "gpt-5.1-codex-high": "gpt-5.1-codex", - "gpt-5.1-codex-max": "gpt-5.1-codex-max", - "gpt-5.1-codex-max-low": "gpt-5.1-codex-max", - "gpt-5.1-codex-max-medium": "gpt-5.1-codex-max", - "gpt-5.1-codex-max-high": "gpt-5.1-codex-max", - "gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max", - "gpt-5.2": "gpt-5.2", - "gpt-5.2-none": "gpt-5.2", - "gpt-5.2-low": "gpt-5.2", - "gpt-5.2-medium": "gpt-5.2", - "gpt-5.2-high": "gpt-5.2", - "gpt-5.2-xhigh": "gpt-5.2", - "gpt-5.2-codex": "gpt-5.2-codex", - "gpt-5.2-codex-low": "gpt-5.2-codex", - "gpt-5.2-codex-medium": "gpt-5.2-codex", - "gpt-5.2-codex-high": "gpt-5.2-codex", - "gpt-5.2-codex-xhigh": "gpt-5.2-codex", - "gpt-5.1-codex-mini": "gpt-5.1-codex-mini", - "gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini", - "gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini", - "gpt-5.1": "gpt-5.1", - "gpt-5.1-none": "gpt-5.1", - "gpt-5.1-low": "gpt-5.1", - "gpt-5.1-medium": "gpt-5.1", - "gpt-5.1-high": "gpt-5.1", - "gpt-5.1-chat-latest": "gpt-5.1", - "gpt-5-codex": "gpt-5.1-codex", - "codex-mini-latest": "gpt-5.1-codex-mini", - "gpt-5-codex-mini": "gpt-5.1-codex-mini", - "gpt-5-codex-mini-medium": "gpt-5.1-codex-mini", - "gpt-5-codex-mini-high": "gpt-5.1-codex-mini", - "gpt-5": "gpt-5.1", - "gpt-5-mini": "gpt-5.1", - "gpt-5-nano": "gpt-5.1", -}; - -function getNormalizedModel(modelId: string): string | undefined { - if (MODEL_MAP[modelId]) return MODEL_MAP[modelId]; - const lowerModelId = modelId.toLowerCase(); - const match = Object.keys(MODEL_MAP).find((key) => key.toLowerCase() === lowerModelId); - return match ? MODEL_MAP[match] : undefined; -} - -export function normalizeModel(model: string | undefined): string { - if (!model) return "gpt-5.1"; - +function clampReasoningEffort(model: string, effort: ReasoningConfig["effort"]): ReasoningConfig["effort"] { + // Codex backend expects exact model IDs. Do not normalize model names here. const modelId = model.includes("/") ? model.split("/").pop()! : model; - const mappedModel = getNormalizedModel(modelId); - if (mappedModel) return mappedModel; - const normalized = modelId.toLowerCase(); - - if (normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex")) { - return "gpt-5.2-codex"; - } - if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { - return "gpt-5.2"; - } - if (normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max")) { - return "gpt-5.1-codex-max"; - } - if (normalized.includes("gpt-5.1-codex-mini") || normalized.includes("gpt 5.1 codex mini")) { - return "gpt-5.1-codex-mini"; - } - if ( - normalized.includes("codex-mini-latest") || - normalized.includes("gpt-5-codex-mini") || - normalized.includes("gpt 5 codex mini") - ) { - return "codex-mini-latest"; - } - if (normalized.includes("gpt-5.1-codex") || normalized.includes("gpt 5.1 codex")) { - return "gpt-5.1-codex"; - } - if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { - return "gpt-5.1"; - } - if (normalized.includes("codex")) { - return "gpt-5.1-codex"; - } - if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { - return "gpt-5.1"; + // gpt-5.1 does not support xhigh. + if (modelId === "gpt-5.1" && effort === "xhigh") { + return "high"; } - return "gpt-5.1"; + // gpt-5.1-codex-mini only supports medium/high. + if (modelId === "gpt-5.1-codex-mini") { + return effort === "high" || effort === "xhigh" ? "high" : "medium"; + } + + return effort; } -function getReasoningConfig(modelName: string | undefined, options: CodexRequestOptions = {}): ReasoningConfig { - const normalizedName = modelName?.toLowerCase() ?? ""; - - const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || normalizedName.includes("gpt 5.2 codex"); - const isGpt52General = (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && !isGpt52Codex; - const isCodexMax = normalizedName.includes("codex-max") || normalizedName.includes("codex max"); - const isCodexMini = - normalizedName.includes("codex-mini") || - normalizedName.includes("codex mini") || - normalizedName.includes("codex_mini") || - normalizedName.includes("codex-mini-latest"); - const isCodex = normalizedName.includes("codex") && !isCodexMini; - const isLightweight = !isCodexMini && (normalizedName.includes("nano") || normalizedName.includes("mini")); - const isGpt51General = - (normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) && - !isCodex && - !isCodexMax && - !isCodexMini; - - const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax; - const supportsNone = isGpt52General || isGpt51General; - - const defaultEffort: ReasoningConfig["effort"] = isCodexMini - ? "medium" - : supportsXhigh - ? "high" - : isLightweight - ? "minimal" - : "medium"; - - let effort = options.reasoningEffort || defaultEffort; - - if (isCodexMini) { - if (effort === "minimal" || effort === "low" || effort === "none") { - effort = "medium"; - } - if (effort === "xhigh") { - effort = "high"; - } - if (effort !== "high" && effort !== "medium") { - effort = "medium"; - } - } - - if (!supportsXhigh && effort === "xhigh") { - effort = "high"; - } - - if (!supportsNone && effort === "none") { - effort = "low"; - } - - if (isCodex && effort === "minimal") { - effort = "low"; - } - +function getReasoningConfig(model: string, options: CodexRequestOptions): ReasoningConfig { return { - effort, + effort: clampReasoningEffort(model, options.reasoningEffort as ReasoningConfig["effort"]), summary: options.reasoningSummary ?? "auto", }; } @@ -213,9 +84,6 @@ export async function transformRequestBody( options: CodexRequestOptions = {}, prompt?: { instructions: string; developerMessages: string[] }, ): Promise { - const normalizedModel = normalizeModel(body.model); - - body.model = normalizedModel; body.store = false; body.stream = true; @@ -270,7 +138,7 @@ export async function transformRequestBody( } if (options.reasoningEffort !== undefined) { - const reasoningConfig = getReasoningConfig(normalizedModel, options); + const reasoningConfig = getReasoningConfig(body.model, options); body.reasoning = { ...body.reasoning, ...reasoningConfig, diff --git a/packages/ai/test/openai-codex.test.ts b/packages/ai/test/openai-codex.test.ts index c2b91dd8..9252656b 100644 --- a/packages/ai/test/openai-codex.test.ts +++ b/packages/ai/test/openai-codex.test.ts @@ -3,11 +3,7 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { getCodexInstructions } from "../src/providers/openai-codex/prompts/codex.js"; -import { - normalizeModel, - type RequestBody, - transformRequestBody, -} from "../src/providers/openai-codex/request-transformer.js"; +import { type RequestBody, transformRequestBody } from "../src/providers/openai-codex/request-transformer.js"; import { parseCodexError } from "../src/providers/openai-codex/response-handler.js"; const DEFAULT_PROMPT_PREFIX = @@ -59,9 +55,21 @@ describe("openai-codex request transformer", () => { }); }); -describe("openai-codex model normalization", () => { - it("maps space-separated codex-mini names to codex-mini-latest", () => { - expect(normalizeModel("gpt 5 codex mini")).toBe("codex-mini-latest"); +describe("openai-codex reasoning effort clamping", () => { + it("clamps gpt-5.1 xhigh to high", async () => { + const body: RequestBody = { model: "gpt-5.1", input: [] }; + const transformed = await transformRequestBody(body, { reasoningEffort: "xhigh" }); + expect(transformed.reasoning?.effort).toBe("high"); + }); + + it("clamps gpt-5.1-codex-mini to medium/high only", async () => { + const body: RequestBody = { model: "gpt-5.1-codex-mini", input: [] }; + + const low = await transformRequestBody({ ...body }, { reasoningEffort: "low" }); + expect(low.reasoning?.effort).toBe("medium"); + + const xhigh = await transformRequestBody({ ...body }, { reasoningEffort: "xhigh" }); + expect(xhigh.reasoning?.effort).toBe("high"); }); }); From 09471ebc7d28bc7d65546f368e38320e7030bb8c Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Wed, 7 Jan 2026 16:11:49 +0100 Subject: [PATCH 2/8] feat(coding-agent): add ctx.ui.setEditorComponent() extension API - Add setEditorComponent() to ctx.ui for custom editor components - Add CustomEditor base class for extensions (handles app keybindings) - Add keybindings parameter to ctx.ui.custom() factory (breaking change) - Add modal-editor.ts example (vim-like modes) - Add rainbow-editor.ts example (animated text highlighting) - Update docs: extensions.md, tui.md Pattern 7 - Clean up terminal on TUI render errors --- .pi/prompts/pr.md | 7 +- packages/coding-agent/CHANGELOG.md | 5 + packages/coding-agent/docs/extensions.md | 51 +++++- packages/coding-agent/docs/tui.md | 85 +++++++++- .../examples/extensions/README.md | 1 + .../examples/extensions/handoff.ts | 2 +- .../examples/extensions/modal-editor.ts | 85 ++++++++++ .../examples/extensions/preset.ts | 2 +- .../coding-agent/examples/extensions/qna.ts | 2 +- .../examples/extensions/rainbow-editor.ts | 95 +++++++++++ .../coding-agent/examples/extensions/snake.ts | 2 +- .../coding-agent/examples/extensions/todo.ts | 2 +- .../coding-agent/examples/extensions/tools.ts | 2 +- .../coding-agent/src/core/extensions/index.ts | 3 + .../src/core/extensions/loader.ts | 1 + .../src/core/extensions/runner.ts | 1 + .../coding-agent/src/core/extensions/types.ts | 42 ++++- packages/coding-agent/src/core/sdk.ts | 1 + packages/coding-agent/src/index.ts | 2 + .../interactive/components/custom-editor.ts | 2 +- .../src/modes/interactive/interactive-mode.ts | 158 +++++++++++++----- .../coding-agent/src/modes/rpc/rpc-mode.ts | 4 + .../test/compaction-extensions.test.ts | 1 + packages/tui/CHANGELOG.md | 4 + packages/tui/src/editor-component.ts | 65 +++++++ packages/tui/src/index.ts | 2 + packages/tui/src/tui.ts | 14 +- 27 files changed, 578 insertions(+), 63 deletions(-) create mode 100644 packages/coding-agent/examples/extensions/modal-editor.ts create mode 100644 packages/coding-agent/examples/extensions/rainbow-editor.ts create mode 100644 packages/tui/src/editor-component.ts diff --git a/.pi/prompts/pr.md b/.pi/prompts/pr.md index e5c41884..f7e2a378 100644 --- a/.pi/prompts/pr.md +++ b/.pi/prompts/pr.md @@ -8,12 +8,13 @@ For each PR URL, do the following in order: 2. Identify any linked issues referenced in the PR body, comments, commit messages, or cross links. Read each issue in full, including all comments. 3. Analyze the PR diff. Read all relevant code files in full with no truncation. Include related code paths that are not in the diff but are required to validate behavior. 4. Check for a changelog entry in the relevant `packages/*/CHANGELOG.md` files. Report whether an entry exists. If missing, state that a changelog entry is required before merge and that you will add it if the user decides to merge. Follow the changelog format rules in AGENTS.md. -5. Provide a structured review with these sections: +5. Check if packages/coding-agent/README.md, packages/coding-agent/docs/*.md, packages/coding-agent/examples/**/*.md require modification. This is usually the case when existing features have been changed, or new features have been added. +6. Provide a structured review with these sections: - Good: solid choices or improvements - Bad: concrete issues, regressions, missing tests, or risks - Ugly: subtle or high impact problems -6. Add Questions or Assumptions if anything is unclear. -7. Add Change summary and Tests. +7. Add Questions or Assumptions if anything is unclear. +8. Add Change summary and Tests. Output format per PR: PR: diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index 8eecb31e..92561529 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -2,9 +2,14 @@ ## [Unreleased] +### Breaking Changes + +- `ctx.ui.custom()` factory signature changed from `(tui, theme, done)` to `(tui, theme, keybindings, done)` for consistency with other input-handling factories + ### Added - Extension UI dialogs (`ctx.ui.select()`, `ctx.ui.confirm()`, `ctx.ui.input()`) now support a `timeout` option that auto-dismisses the dialog with a live countdown display. Simpler alternative to `AbortSignal` for timed dialogs. +- Extensions can now provide custom editor components via `ctx.ui.setEditorComponent((tui, theme, keybindings) => ...)`. Extend `CustomEditor` for full app keybinding support (escape, ctrl+d, model switching, etc.). See `examples/extensions/modal-editor.ts`, `examples/extensions/rainbow-editor.ts`, and `docs/tui.md` Pattern 7. ## [0.37.8] - 2026-01-07 diff --git a/packages/coding-agent/docs/extensions.md b/packages/coding-agent/docs/extensions.md index a85e8d7f..75718389 100644 --- a/packages/coding-agent/docs/extensions.md +++ b/packages/coding-agent/docs/extensions.md @@ -1170,6 +1170,10 @@ ctx.ui.setTitle("pi - my-project"); // Editor text ctx.ui.setEditorText("Prefill text"); const current = ctx.ui.getEditorText(); + +// Custom editor (vim mode, emacs mode, etc.) +ctx.ui.setEditorComponent((tui, theme, keybindings) => new VimEditor(tui, theme, keybindings)); +ctx.ui.setEditorComponent(undefined); // Restore default editor ``` **Examples:** @@ -1177,6 +1181,7 @@ const current = ctx.ui.getEditorText(); - `ctx.ui.setWidget()`: [plan-mode.ts](../examples/extensions/plan-mode.ts) - `ctx.ui.setFooter()`: [custom-footer.ts](../examples/extensions/custom-footer.ts) - `ctx.ui.setHeader()`: [custom-header.ts](../examples/extensions/custom-header.ts) +- `ctx.ui.setEditorComponent()`: [modal-editor.ts](../examples/extensions/modal-editor.ts) ### Custom Components @@ -1185,7 +1190,7 @@ For complex UI, use `ctx.ui.custom()`. This temporarily replaces the editor with ```typescript import { Text, Component } from "@mariozechner/pi-tui"; -const result = await ctx.ui.custom((tui, theme, done) => { +const result = await ctx.ui.custom((tui, theme, keybindings, done) => { const text = new Text("Press Enter to confirm, Escape to cancel", 1, 1); text.onKey = (key) => { @@ -1205,12 +1210,56 @@ if (result) { The callback receives: - `tui` - TUI instance (for screen dimensions, focus management) - `theme` - Current theme for styling +- `keybindings` - App keybinding manager (for checking shortcuts) - `done(value)` - Call to close component and return value See [tui.md](tui.md) for the full component API. **Examples:** [handoff.ts](../examples/extensions/handoff.ts), [plan-mode.ts](../examples/extensions/plan-mode.ts), [preset.ts](../examples/extensions/preset.ts), [qna.ts](../examples/extensions/qna.ts), [snake.ts](../examples/extensions/snake.ts), [todo.ts](../examples/extensions/todo.ts), [tools.ts](../examples/extensions/tools.ts) +### Custom Editor + +Replace the main input editor with a custom implementation (vim mode, emacs mode, etc.): + +```typescript +import { CustomEditor, type ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import { matchesKey } from "@mariozechner/pi-tui"; + +class VimEditor extends CustomEditor { + private mode: "normal" | "insert" = "insert"; + + handleInput(data: string): void { + if (matchesKey(data, "escape") && this.mode === "insert") { + this.mode = "normal"; + return; + } + if (this.mode === "normal" && data === "i") { + this.mode = "insert"; + return; + } + super.handleInput(data); // App keybindings + text editing + } +} + +export default function (pi: ExtensionAPI) { + pi.on("session_start", (_event, ctx) => { + ctx.ui.setEditorComponent((_tui, theme, keybindings) => + new VimEditor(theme, keybindings) + ); + }); +} +``` + +**Key points:** +- Extend `CustomEditor` (not base `Editor`) to get app keybindings (escape to abort, ctrl+d, model switching) +- Call `super.handleInput(data)` for keys you don't handle +- Factory receives `theme` and `keybindings` from the app +- Pass `undefined` to restore default: `ctx.ui.setEditorComponent(undefined)` + +See [tui.md](tui.md) Pattern 7 for a complete example with mode indicator. + +**Examples:** [modal-editor.ts](../examples/extensions/modal-editor.ts) + ### Message Rendering Register a custom renderer for messages with your `customType`: diff --git a/packages/coding-agent/docs/tui.md b/packages/coding-agent/docs/tui.md index 0db6d97d..b0c3651f 100644 --- a/packages/coding-agent/docs/tui.md +++ b/packages/coding-agent/docs/tui.md @@ -361,7 +361,7 @@ pi.registerCommand("pick", { { value: "opt3", label: "Option 3" }, // description is optional ]; - const result = await ctx.ui.custom((tui, theme, done) => { + const result = await ctx.ui.custom((tui, theme, _kb, done) => { const container = new Container(); // Top border @@ -413,7 +413,7 @@ import { BorderedLoader } from "@mariozechner/pi-coding-agent"; pi.registerCommand("fetch", { handler: async (_args, ctx) => { - const result = await ctx.ui.custom((tui, theme, done) => { + const result = await ctx.ui.custom((tui, theme, _kb, done) => { const loader = new BorderedLoader(tui, theme, "Fetching data..."); loader.onAbort = () => done(null); @@ -451,7 +451,7 @@ pi.registerCommand("settings", { { id: "color", label: "Color output", currentValue: "on", values: ["on", "off"] }, ]; - await ctx.ui.custom((_tui, theme, done) => { + await ctx.ui.custom((_tui, theme, _kb, done) => { const container = new Container(); container.addChild(new Text(theme.fg("accent", theme.bold("Settings")), 1, 1)); @@ -541,9 +541,85 @@ ctx.ui.setFooter(undefined); **Examples:** [custom-footer.ts](../examples/extensions/custom-footer.ts) +### Pattern 7: Custom Editor (vim mode, etc.) + +Replace the main input editor with a custom implementation. Useful for modal editing (vim), different keybindings (emacs), or specialized input handling. + +```typescript +import { CustomEditor, type ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import { matchesKey, truncateToWidth } from "@mariozechner/pi-tui"; + +type Mode = "normal" | "insert"; + +class VimEditor extends CustomEditor { + private mode: Mode = "insert"; + + handleInput(data: string): void { + // Escape: switch to normal mode, or pass through for app handling + if (matchesKey(data, "escape")) { + if (this.mode === "insert") { + this.mode = "normal"; + return; + } + // In normal mode, escape aborts agent (handled by CustomEditor) + super.handleInput(data); + return; + } + + // Insert mode: pass everything to CustomEditor + if (this.mode === "insert") { + super.handleInput(data); + return; + } + + // Normal mode: vim-style navigation + switch (data) { + case "i": this.mode = "insert"; return; + case "h": super.handleInput("\x1b[D"); return; // Left + case "j": super.handleInput("\x1b[B"); return; // Down + case "k": super.handleInput("\x1b[A"); return; // Up + case "l": super.handleInput("\x1b[C"); return; // Right + } + // Pass unhandled keys to super (ctrl+c, etc.), but filter printable chars + if (data.length === 1 && data.charCodeAt(0) >= 32) return; + super.handleInput(data); + } + + render(width: number): string[] { + const lines = super.render(width); + // Add mode indicator to bottom border (use truncateToWidth for ANSI-safe truncation) + if (lines.length > 0) { + const label = this.mode === "normal" ? " NORMAL " : " INSERT "; + const lastLine = lines[lines.length - 1]!; + // Pass "" as ellipsis to avoid adding "..." when truncating + lines[lines.length - 1] = truncateToWidth(lastLine, width - label.length, "") + label; + } + return lines; + } +} + +export default function (pi: ExtensionAPI) { + pi.on("session_start", (_event, ctx) => { + // Factory receives theme and keybindings from the app + ctx.ui.setEditorComponent((tui, theme, keybindings) => + new VimEditor(theme, keybindings) + ); + }); +} +``` + +**Key points:** + +- **Extend `CustomEditor`** (not base `Editor`) to get app keybindings (escape to abort, ctrl+d to exit, model switching, etc.) +- **Call `super.handleInput(data)`** for keys you don't handle +- **Factory pattern**: `setEditorComponent` receives a factory function that gets `tui`, `theme`, and `keybindings` +- **Pass `undefined`** to restore the default editor: `ctx.ui.setEditorComponent(undefined)` + +**Examples:** [modal-editor.ts](../examples/extensions/modal-editor.ts) + ## Key Rules -1. **Always use theme from callback** - Don't import theme directly. Use `theme` from the `ctx.ui.custom((tui, theme, done) => ...)` callback. +1. **Always use theme from callback** - Don't import theme directly. Use `theme` from the `ctx.ui.custom((tui, theme, keybindings, done) => ...)` callback. 2. **Always type DynamicBorder color param** - Write `(s: string) => theme.fg("accent", s)`, not `(s) => theme.fg("accent", s)`. @@ -560,5 +636,6 @@ ctx.ui.setFooter(undefined); - **Settings toggles**: [examples/extensions/tools.ts](../examples/extensions/tools.ts) - SettingsList for tool enable/disable - **Status indicators**: [examples/extensions/plan-mode.ts](../examples/extensions/plan-mode.ts) - setStatus and setWidget - **Custom footer**: [examples/extensions/custom-footer.ts](../examples/extensions/custom-footer.ts) - setFooter with stats +- **Custom editor**: [examples/extensions/modal-editor.ts](../examples/extensions/modal-editor.ts) - Vim-like modal editing - **Snake game**: [examples/extensions/snake.ts](../examples/extensions/snake.ts) - Full game with keyboard input, game loop - **Custom tool rendering**: [examples/extensions/todo.ts](../examples/extensions/todo.ts) - renderCall and renderResult diff --git a/packages/coding-agent/examples/extensions/README.md b/packages/coding-agent/examples/extensions/README.md index 91ce0918..dd696aef 100644 --- a/packages/coding-agent/examples/extensions/README.md +++ b/packages/coding-agent/examples/extensions/README.md @@ -45,6 +45,7 @@ cp permission-gate.ts ~/.pi/agent/extensions/ | `snake.ts` | Snake game with custom UI, keyboard handling, and session persistence | | `send-user-message.ts` | Demonstrates `pi.sendUserMessage()` for sending user messages from extensions | | `timed-confirm.ts` | Demonstrates AbortSignal for auto-dismissing `ctx.ui.confirm()` and `ctx.ui.select()` dialogs | +| `modal-editor.ts` | Custom vim-like modal editor via `ctx.ui.setEditorComponent()` | ### Git Integration diff --git a/packages/coding-agent/examples/extensions/handoff.ts b/packages/coding-agent/examples/extensions/handoff.ts index 09c7227c..f8559a87 100644 --- a/packages/coding-agent/examples/extensions/handoff.ts +++ b/packages/coding-agent/examples/extensions/handoff.ts @@ -75,7 +75,7 @@ export default function (pi: ExtensionAPI) { const currentSessionFile = ctx.sessionManager.getSessionFile(); // Generate the handoff prompt with loader UI - const result = await ctx.ui.custom((tui, theme, done) => { + const result = await ctx.ui.custom((tui, theme, _kb, done) => { const loader = new BorderedLoader(tui, theme, `Generating handoff prompt...`); loader.onAbort = () => done(null); diff --git a/packages/coding-agent/examples/extensions/modal-editor.ts b/packages/coding-agent/examples/extensions/modal-editor.ts new file mode 100644 index 00000000..ad060269 --- /dev/null +++ b/packages/coding-agent/examples/extensions/modal-editor.ts @@ -0,0 +1,85 @@ +/** + * Modal Editor - vim-like modal editing example + * + * Usage: pi --extension ./examples/extensions/modal-editor.ts + * + * - Escape: insert → normal mode (in normal mode, aborts agent) + * - i: normal → insert mode + * - hjkl: navigation in normal mode + * - ctrl+c, ctrl+d, etc. work in both modes + */ + +import { CustomEditor, type ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import { matchesKey, truncateToWidth, visibleWidth } from "@mariozechner/pi-tui"; + +// Normal mode key mappings: key -> escape sequence (or null for mode switch) +const NORMAL_KEYS: Record = { + h: "\x1b[D", // left + j: "\x1b[B", // down + k: "\x1b[A", // up + l: "\x1b[C", // right + "0": "\x01", // line start + $: "\x05", // line end + x: "\x1b[3~", // delete char + i: null, // insert mode + a: null, // append (insert + right) +}; + +class ModalEditor extends CustomEditor { + private mode: "normal" | "insert" = "insert"; + + handleInput(data: string): void { + // Escape toggles to normal mode, or passes through for app handling + if (matchesKey(data, "escape")) { + if (this.mode === "insert") { + this.mode = "normal"; + } else { + super.handleInput(data); // abort agent, etc. + } + return; + } + + // Insert mode: pass everything through + if (this.mode === "insert") { + super.handleInput(data); + return; + } + + // Normal mode: check mapped keys + if (data in NORMAL_KEYS) { + const seq = NORMAL_KEYS[data]; + if (data === "i") { + this.mode = "insert"; + } else if (data === "a") { + this.mode = "insert"; + super.handleInput("\x1b[C"); // move right first + } else if (seq) { + super.handleInput(seq); + } + return; + } + + // Pass control sequences (ctrl+c, etc.) to super, ignore printable chars + if (data.length === 1 && data.charCodeAt(0) >= 32) return; + super.handleInput(data); + } + + render(width: number): string[] { + const lines = super.render(width); + if (lines.length === 0) return lines; + + // Add mode indicator to bottom border + const label = this.mode === "normal" ? " NORMAL " : " INSERT "; + const last = lines.length - 1; + if (visibleWidth(lines[last]!) >= label.length) { + lines[last] = truncateToWidth(lines[last]!, width - label.length, "") + label; + } + return lines; + } +} + +export default function (pi: ExtensionAPI) { + pi.on("session_start", (_event, ctx) => { + ctx.ui.setEditorComponent((_tui, theme, kb) => new ModalEditor(theme, kb)); + }); +} diff --git a/packages/coding-agent/examples/extensions/preset.ts b/packages/coding-agent/examples/extensions/preset.ts index befc7560..32e02eb5 100644 --- a/packages/coding-agent/examples/extensions/preset.ts +++ b/packages/coding-agent/examples/extensions/preset.ts @@ -206,7 +206,7 @@ export default function presetExtension(pi: ExtensionAPI) { description: "Clear active preset, restore defaults", }); - const result = await ctx.ui.custom((tui, theme, done) => { + const result = await ctx.ui.custom((tui, theme, _kb, done) => { const container = new Container(); container.addChild(new DynamicBorder((str) => theme.fg("accent", str))); diff --git a/packages/coding-agent/examples/extensions/qna.ts b/packages/coding-agent/examples/extensions/qna.ts index 39ae902d..fc80c41f 100644 --- a/packages/coding-agent/examples/extensions/qna.ts +++ b/packages/coding-agent/examples/extensions/qna.ts @@ -71,7 +71,7 @@ export default function (pi: ExtensionAPI) { } // Run extraction with loader UI - const result = await ctx.ui.custom((tui, theme, done) => { + const result = await ctx.ui.custom((tui, theme, _kb, done) => { const loader = new BorderedLoader(tui, theme, `Extracting questions using ${ctx.model!.id}...`); loader.onAbort = () => done(null); diff --git a/packages/coding-agent/examples/extensions/rainbow-editor.ts b/packages/coding-agent/examples/extensions/rainbow-editor.ts new file mode 100644 index 00000000..060a393c --- /dev/null +++ b/packages/coding-agent/examples/extensions/rainbow-editor.ts @@ -0,0 +1,95 @@ +/** + * Rainbow Editor - highlights "ultrathink" with animated shine effect + * + * Usage: pi --extension ./examples/extensions/rainbow-editor.ts + */ + +import { CustomEditor, type ExtensionAPI, type KeybindingsManager } from "@mariozechner/pi-coding-agent"; +import type { EditorTheme, TUI } from "@mariozechner/pi-tui"; + +// Base colors (coral → yellow → green → teal → blue → purple → pink) +const COLORS: [number, number, number][] = [ + [233, 137, 115], // coral + [228, 186, 103], // yellow + [141, 192, 122], // green + [102, 194, 179], // teal + [121, 157, 207], // blue + [157, 134, 195], // purple + [206, 130, 172], // pink +]; +const RESET = "\x1b[0m"; + +function brighten(rgb: [number, number, number], factor: number): string { + const [r, g, b] = rgb.map((c) => Math.round(c + (255 - c) * factor)); + return `\x1b[38;2;${r};${g};${b}m`; +} + +function colorize(text: string, shinePos: number): string { + return ( + [...text] + .map((c, i) => { + const baseColor = COLORS[i % COLORS.length]!; + // 3-letter shine: center bright, adjacent dimmer + let factor = 0; + if (shinePos >= 0) { + const dist = Math.abs(i - shinePos); + if (dist === 0) factor = 0.7; + else if (dist === 1) factor = 0.35; + } + return `${brighten(baseColor, factor)}${c}`; + }) + .join("") + RESET + ); +} + +class RainbowEditor extends CustomEditor { + private animationTimer?: ReturnType; + private tui: TUI; + private frame = 0; + + constructor(tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager) { + super(theme, keybindings); + this.tui = tui; + } + + private hasUltrathink(): boolean { + return /ultrathink/i.test(this.getText()); + } + + private startAnimation(): void { + if (this.animationTimer) return; + this.animationTimer = setInterval(() => { + this.frame++; + this.tui.requestRender(); + }, 60); + } + + private stopAnimation(): void { + if (this.animationTimer) { + clearInterval(this.animationTimer); + this.animationTimer = undefined; + } + } + + handleInput(data: string): void { + super.handleInput(data); + if (this.hasUltrathink()) { + this.startAnimation(); + } else { + this.stopAnimation(); + } + } + + render(width: number): string[] { + // Cycle: 10 shine positions + 10 pause frames + const cycle = this.frame % 20; + const shinePos = cycle < 10 ? cycle : -1; // -1 means no shine (pause) + return super.render(width).map((line) => line.replace(/ultrathink/gi, (m) => colorize(m, shinePos))); + } +} + +export default function (pi: ExtensionAPI) { + pi.on("session_start", (_event, ctx) => { + ctx.ui.setEditorComponent((tui, theme, kb) => new RainbowEditor(tui, theme, kb)); + }); +} diff --git a/packages/coding-agent/examples/extensions/snake.ts b/packages/coding-agent/examples/extensions/snake.ts index 7f0d3cdc..4378f758 100644 --- a/packages/coding-agent/examples/extensions/snake.ts +++ b/packages/coding-agent/examples/extensions/snake.ts @@ -327,7 +327,7 @@ export default function (pi: ExtensionAPI) { } } - await ctx.ui.custom((tui, _theme, done) => { + await ctx.ui.custom((tui, _theme, _kb, done) => { return new SnakeComponent( tui, () => done(undefined), diff --git a/packages/coding-agent/examples/extensions/todo.ts b/packages/coding-agent/examples/extensions/todo.ts index 8b85582e..346ab93e 100644 --- a/packages/coding-agent/examples/extensions/todo.ts +++ b/packages/coding-agent/examples/extensions/todo.ts @@ -291,7 +291,7 @@ export default function (pi: ExtensionAPI) { return; } - await ctx.ui.custom((_tui, theme, done) => { + await ctx.ui.custom((_tui, theme, _kb, done) => { return new TodoListComponent(todos, theme, () => done()); }); }, diff --git a/packages/coding-agent/examples/extensions/tools.ts b/packages/coding-agent/examples/extensions/tools.ts index 7a79bb3f..dbd47377 100644 --- a/packages/coding-agent/examples/extensions/tools.ts +++ b/packages/coding-agent/examples/extensions/tools.ts @@ -69,7 +69,7 @@ export default function toolsExtension(pi: ExtensionAPI) { // Refresh tool list allTools = pi.getAllTools(); - await ctx.ui.custom((tui, theme, done) => { + await ctx.ui.custom((tui, theme, _kb, done) => { // Build settings items for each tool const items: SettingItem[] = allTools.map((tool) => ({ id: tool, diff --git a/packages/coding-agent/src/core/extensions/index.ts b/packages/coding-agent/src/core/extensions/index.ts index d62ef0a0..66f8756f 100644 --- a/packages/coding-agent/src/core/extensions/index.ts +++ b/packages/coding-agent/src/core/extensions/index.ts @@ -11,6 +11,8 @@ export type { // Re-exports AgentToolResult, AgentToolUpdateCallback, + // App keybindings (for custom editors) + AppAction, AppendEntryHandler, BashToolResultEvent, BeforeAgentStartEvent, @@ -42,6 +44,7 @@ export type { GetAllToolsHandler, GetThinkingLevelHandler, GrepToolResultEvent, + KeybindingsManager, LoadExtensionsResult, // Loaded Extension LoadedExtension, diff --git a/packages/coding-agent/src/core/extensions/loader.ts b/packages/coding-agent/src/core/extensions/loader.ts index 03042a87..16460b16 100644 --- a/packages/coding-agent/src/core/extensions/loader.ts +++ b/packages/coding-agent/src/core/extensions/loader.ts @@ -99,6 +99,7 @@ function createNoOpUIContext(): ExtensionUIContext { setEditorText: () => {}, getEditorText: () => "", editor: async () => undefined, + setEditorComponent: () => {}, get theme() { return theme; }, diff --git a/packages/coding-agent/src/core/extensions/runner.ts b/packages/coding-agent/src/core/extensions/runner.ts index fffe126d..79ddcd65 100644 --- a/packages/coding-agent/src/core/extensions/runner.ts +++ b/packages/coding-agent/src/core/extensions/runner.ts @@ -74,6 +74,7 @@ const noOpUIContext: ExtensionUIContext = { setEditorText: () => {}, getEditorText: () => "", editor: async () => undefined, + setEditorComponent: () => {}, get theme() { return theme; }, diff --git a/packages/coding-agent/src/core/extensions/types.ts b/packages/coding-agent/src/core/extensions/types.ts index 01c2f9ab..dd5de3ab 100644 --- a/packages/coding-agent/src/core/extensions/types.ts +++ b/packages/coding-agent/src/core/extensions/types.ts @@ -15,12 +15,13 @@ import type { ThinkingLevel, } from "@mariozechner/pi-agent-core"; import type { ImageContent, Model, TextContent, ToolResultMessage } from "@mariozechner/pi-ai"; -import type { Component, KeyId, TUI } from "@mariozechner/pi-tui"; +import type { Component, EditorComponent, EditorTheme, KeyId, TUI } from "@mariozechner/pi-tui"; import type { Static, TSchema } from "@sinclair/typebox"; import type { Theme } from "../../modes/interactive/theme/theme.js"; import type { CompactionPreparation, CompactionResult } from "../compaction/index.js"; import type { EventBus } from "../event-bus.js"; import type { ExecOptions, ExecResult } from "../exec.js"; +import type { AppAction, KeybindingsManager } from "../keybindings.js"; import type { CustomMessage } from "../messages.js"; import type { ModelRegistry } from "../model-registry.js"; import type { @@ -41,6 +42,7 @@ import type { export type { ExecOptions, ExecResult } from "../exec.js"; export type { AgentToolResult, AgentToolUpdateCallback }; +export type { AppAction, KeybindingsManager } from "../keybindings.js"; // ============================================================================ // UI Context @@ -92,6 +94,7 @@ export interface ExtensionUIContext { factory: ( tui: TUI, theme: Theme, + keybindings: KeybindingsManager, done: (result: T) => void, ) => (Component & { dispose?(): void }) | Promise, ): Promise; @@ -105,6 +108,43 @@ export interface ExtensionUIContext { /** Show a multi-line editor for text editing. */ editor(title: string, prefill?: string): Promise; + /** + * Set a custom editor component via factory function. + * Pass undefined to restore the default editor. + * + * The factory receives: + * - `theme`: EditorTheme for styling borders and autocomplete + * - `keybindings`: KeybindingsManager for app-level keybindings + * + * For full app keybinding support (escape, ctrl+d, model switching, etc.), + * extend `CustomEditor` from `@mariozechner/pi-coding-agent` and call + * `super.handleInput(data)` for keys you don't handle. + * + * @example + * ```ts + * import { CustomEditor } from "@mariozechner/pi-coding-agent"; + * + * class VimEditor extends CustomEditor { + * private mode: "normal" | "insert" = "insert"; + * + * handleInput(data: string): void { + * if (this.mode === "normal") { + * // Handle vim normal mode keys... + * if (data === "i") { this.mode = "insert"; return; } + * } + * super.handleInput(data); // App keybindings + text editing + * } + * } + * + * ctx.ui.setEditorComponent((tui, theme, keybindings) => + * new VimEditor(tui, theme, keybindings) + * ); + * ``` + */ + setEditorComponent( + factory: ((tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager) => EditorComponent) | undefined, + ): void; + /** Get the current theme for styling. */ readonly theme: Theme; } diff --git a/packages/coding-agent/src/core/sdk.ts b/packages/coding-agent/src/core/sdk.ts index 34b456ad..97812b58 100644 --- a/packages/coding-agent/src/core/sdk.ts +++ b/packages/coding-agent/src/core/sdk.ts @@ -531,6 +531,7 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {} setEditorText: () => {}, getEditorText: () => "", editor: async () => undefined, + setEditorComponent: () => {}, get theme() { return {} as any; }, diff --git a/packages/coding-agent/src/index.ts b/packages/coding-agent/src/index.ts index 3fc4ae39..7b81f39c 100644 --- a/packages/coding-agent/src/index.ts +++ b/packages/coding-agent/src/index.ts @@ -40,6 +40,7 @@ export type { AgentStartEvent, AgentToolResult, AgentToolUpdateCallback, + AppAction, BeforeAgentStartEvent, ContextEvent, ExecOptions, @@ -55,6 +56,7 @@ export type { ExtensionShortcut, ExtensionUIContext, ExtensionUIDialogOptions, + KeybindingsManager, LoadExtensionsResult, LoadedExtension, MessageRenderer, diff --git a/packages/coding-agent/src/modes/interactive/components/custom-editor.ts b/packages/coding-agent/src/modes/interactive/components/custom-editor.ts index b9dc765b..93f5af78 100644 --- a/packages/coding-agent/src/modes/interactive/components/custom-editor.ts +++ b/packages/coding-agent/src/modes/interactive/components/custom-editor.ts @@ -6,7 +6,7 @@ import type { AppAction, KeybindingsManager } from "../../../core/keybindings.js */ export class CustomEditor extends Editor { private keybindings: KeybindingsManager; - private actionHandlers: Map void> = new Map(); + public actionHandlers: Map void> = new Map(); // Special handlers that can be dynamically replaced public onEscape?: () => void; diff --git a/packages/coding-agent/src/modes/interactive/interactive-mode.ts b/packages/coding-agent/src/modes/interactive/interactive-mode.ts index 823d1c1c..6ae1f24f 100644 --- a/packages/coding-agent/src/modes/interactive/interactive-mode.ts +++ b/packages/coding-agent/src/modes/interactive/interactive-mode.ts @@ -9,7 +9,7 @@ import * as os from "node:os"; import * as path from "node:path"; import type { AgentMessage } from "@mariozechner/pi-agent-core"; import { type AssistantMessage, getOAuthProviders, type Message, type OAuthProvider } from "@mariozechner/pi-ai"; -import type { KeyId, SlashCommand } from "@mariozechner/pi-tui"; +import type { EditorComponent, EditorTheme, KeyId, SlashCommand } from "@mariozechner/pi-tui"; import { CombinedAutocompleteProvider, type Component, @@ -96,7 +96,9 @@ export class InteractiveMode { private chatContainer: Container; private pendingMessagesContainer: Container; private statusContainer: Container; - private editor: CustomEditor; + private defaultEditor: CustomEditor; + private editor: EditorComponent; + private autocompleteProvider: CombinedAutocompleteProvider | undefined; private editorContainer: Container; private footer: FooterComponent; private keybindings: KeybindingsManager; @@ -195,9 +197,10 @@ export class InteractiveMode { this.statusContainer = new Container(); this.widgetContainer = new Container(); this.keybindings = KeybindingsManager.create(); - this.editor = new CustomEditor(getEditorTheme(), this.keybindings); + this.defaultEditor = new CustomEditor(getEditorTheme(), this.keybindings); + this.editor = this.defaultEditor; this.editorContainer = new Container(); - this.editorContainer.addChild(this.editor); + this.editorContainer.addChild(this.editor as Component); this.footer = new FooterComponent(session); this.footer.setAutoCompactEnabled(session.autoCompactionEnabled); @@ -238,12 +241,12 @@ export class InteractiveMode { ); // Setup autocomplete - const autocompleteProvider = new CombinedAutocompleteProvider( + this.autocompleteProvider = new CombinedAutocompleteProvider( [...slashCommands, ...templateCommands, ...extensionCommands], process.cwd(), fdPath, ); - this.editor.setAutocompleteProvider(autocompleteProvider); + this.defaultEditor.setAutocompleteProvider(this.autocompleteProvider); } async init(): Promise { @@ -595,8 +598,8 @@ export class InteractiveMode { hasPendingMessages: () => this.session.pendingMessageCount > 0, }); - // Set up the extension shortcut handler on the editor - this.editor.onExtensionShortcut = (data: string) => { + // Set up the extension shortcut handler on the default editor + this.defaultEditor.onExtensionShortcut = (data: string) => { for (const [shortcutStr, shortcut] of shortcuts) { // Cast to KeyId - extension shortcuts use the same format if (matchesKey(data, shortcutStr as KeyId)) { @@ -753,6 +756,7 @@ export class InteractiveMode { setEditorText: (text) => this.editor.setText(text), getEditorText: () => this.editor.getText(), editor: (title, prefill) => this.showExtensionEditor(title, prefill), + setEditorComponent: (factory) => this.setCustomEditorComponent(factory), get theme() { return theme; }, @@ -918,6 +922,65 @@ export class InteractiveMode { this.ui.requestRender(); } + /** + * Set a custom editor component from an extension. + * Pass undefined to restore the default editor. + */ + private setCustomEditorComponent( + factory: ((tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager) => EditorComponent) | undefined, + ): void { + // Save text from current editor before switching + const currentText = this.editor.getText(); + + this.editorContainer.clear(); + + if (factory) { + // Create the custom editor with tui, theme, and keybindings + const newEditor = factory(this.ui, getEditorTheme(), this.keybindings); + + // Wire up callbacks from the default editor + newEditor.onSubmit = this.defaultEditor.onSubmit; + newEditor.onChange = this.defaultEditor.onChange; + + // Copy text from previous editor + newEditor.setText(currentText); + + // Copy appearance settings if supported + if (newEditor.borderColor !== undefined) { + newEditor.borderColor = this.defaultEditor.borderColor; + } + + // Set autocomplete if supported + if (newEditor.setAutocompleteProvider && this.autocompleteProvider) { + newEditor.setAutocompleteProvider(this.autocompleteProvider); + } + + // If extending CustomEditor, copy app-level handlers + // Use duck typing since instanceof fails across jiti module boundaries + const customEditor = newEditor as unknown as Record; + if ("actionHandlers" in customEditor && customEditor.actionHandlers instanceof Map) { + customEditor.onEscape = this.defaultEditor.onEscape; + customEditor.onCtrlD = this.defaultEditor.onCtrlD; + customEditor.onPasteImage = this.defaultEditor.onPasteImage; + customEditor.onExtensionShortcut = this.defaultEditor.onExtensionShortcut; + // Copy action handlers (clear, suspend, model switching, etc.) + for (const [action, handler] of this.defaultEditor.actionHandlers) { + (customEditor.actionHandlers as Map void>).set(action, handler); + } + } + + this.editor = newEditor; + } else { + // Restore default editor with text from custom editor + this.defaultEditor.setText(currentText); + this.editor = this.defaultEditor; + } + + this.editorContainer.addChild(this.editor as Component); + this.ui.setFocus(this.editor as Component); + this.ui.requestRender(); + } + /** * Show a notification for extensions. */ @@ -938,6 +1001,7 @@ export class InteractiveMode { factory: ( tui: TUI, theme: Theme, + keybindings: KeybindingsManager, done: (result: T) => void, ) => (Component & { dispose?(): void }) | Promise, ): Promise { @@ -956,7 +1020,7 @@ export class InteractiveMode { resolve(result); }; - Promise.resolve(factory(this.ui, theme, close)).then((c) => { + Promise.resolve(factory(this.ui, theme, this.keybindings, close)).then((c) => { component = c; this.editorContainer.clear(); this.editorContainer.addChild(component); @@ -992,7 +1056,9 @@ export class InteractiveMode { // ========================================================================= private setupKeyHandlers(): void { - this.editor.onEscape = () => { + // Set up handlers on defaultEditor - they use this.editor for text access + // so they work correctly regardless of which editor is active + this.defaultEditor.onEscape = () => { if (this.loadingAnimation) { // Abort and restore queued messages to editor const { steering, followUp } = this.session.clearQueue(); @@ -1026,22 +1092,22 @@ export class InteractiveMode { }; // Register app action handlers - this.editor.onAction("clear", () => this.handleCtrlC()); - this.editor.onCtrlD = () => this.handleCtrlD(); - this.editor.onAction("suspend", () => this.handleCtrlZ()); - this.editor.onAction("cycleThinkingLevel", () => this.cycleThinkingLevel()); - this.editor.onAction("cycleModelForward", () => this.cycleModel("forward")); - this.editor.onAction("cycleModelBackward", () => this.cycleModel("backward")); + this.defaultEditor.onAction("clear", () => this.handleCtrlC()); + this.defaultEditor.onCtrlD = () => this.handleCtrlD(); + this.defaultEditor.onAction("suspend", () => this.handleCtrlZ()); + this.defaultEditor.onAction("cycleThinkingLevel", () => this.cycleThinkingLevel()); + this.defaultEditor.onAction("cycleModelForward", () => this.cycleModel("forward")); + this.defaultEditor.onAction("cycleModelBackward", () => this.cycleModel("backward")); // Global debug handler on TUI (works regardless of focus) this.ui.onDebug = () => this.handleDebugCommand(); - this.editor.onAction("selectModel", () => this.showModelSelector()); - this.editor.onAction("expandTools", () => this.toggleToolOutputExpansion()); - this.editor.onAction("toggleThinking", () => this.toggleThinkingBlockVisibility()); - this.editor.onAction("externalEditor", () => this.openExternalEditor()); - this.editor.onAction("followUp", () => this.handleFollowUp()); + this.defaultEditor.onAction("selectModel", () => this.showModelSelector()); + this.defaultEditor.onAction("expandTools", () => this.toggleToolOutputExpansion()); + this.defaultEditor.onAction("toggleThinking", () => this.toggleThinkingBlockVisibility()); + this.defaultEditor.onAction("externalEditor", () => this.openExternalEditor()); + this.defaultEditor.onAction("followUp", () => this.handleFollowUp()); - this.editor.onChange = (text: string) => { + this.defaultEditor.onChange = (text: string) => { const wasBashMode = this.isBashMode; this.isBashMode = text.trimStart().startsWith("!"); if (wasBashMode !== this.isBashMode) { @@ -1050,7 +1116,7 @@ export class InteractiveMode { }; // Handle clipboard image paste (triggered on Ctrl+V) - this.editor.onPasteImage = () => { + this.defaultEditor.onPasteImage = () => { this.handleClipboardImagePaste(); }; } @@ -1070,7 +1136,7 @@ export class InteractiveMode { fs.writeFileSync(filePath, Buffer.from(image.bytes)); // Insert file path directly - this.editor.insertTextAtCursor(filePath); + this.editor.insertTextAtCursor?.(filePath); this.ui.requestRender(); } catch { // Silently ignore clipboard errors (may not have permission, etc.) @@ -1078,7 +1144,7 @@ export class InteractiveMode { } private setupEditorSubmitHandler(): void { - this.editor.onSubmit = async (text: string) => { + this.defaultEditor.onSubmit = async (text: string) => { text = text.trim(); if (!text) return; @@ -1185,7 +1251,7 @@ export class InteractiveMode { this.editor.setText(text); return; } - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); await this.handleBashCommand(command, isExcluded); this.isBashMode = false; this.updateEditorBorderColor(); @@ -1196,7 +1262,7 @@ export class InteractiveMode { // Queue input during compaction (extension commands execute immediately) if (this.session.isCompacting) { if (this.isExtensionCommand(text)) { - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); this.editor.setText(""); await this.session.prompt(text); } else { @@ -1208,7 +1274,7 @@ export class InteractiveMode { // If streaming, use prompt() with steer behavior // This handles extension commands (execute immediately), prompt template expansion, and queueing if (this.session.isStreaming) { - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); this.editor.setText(""); await this.session.prompt(text, { streamingBehavior: "steer" }); this.updatePendingMessagesDisplay(); @@ -1223,7 +1289,7 @@ export class InteractiveMode { if (this.onInputCallback) { this.onInputCallback(text); } - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); }; } @@ -1393,8 +1459,8 @@ export class InteractiveMode { case "auto_compaction_start": { // Keep editor active; submissions are queued during compaction. // Set up escape to abort auto-compaction - this.autoCompactionEscapeHandler = this.editor.onEscape; - this.editor.onEscape = () => { + this.autoCompactionEscapeHandler = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { this.session.abortCompaction(); }; // Show compacting indicator with reason @@ -1414,7 +1480,7 @@ export class InteractiveMode { case "auto_compaction_end": { // Restore escape handler if (this.autoCompactionEscapeHandler) { - this.editor.onEscape = this.autoCompactionEscapeHandler; + this.defaultEditor.onEscape = this.autoCompactionEscapeHandler; this.autoCompactionEscapeHandler = undefined; } // Stop loader @@ -1446,8 +1512,8 @@ export class InteractiveMode { case "auto_retry_start": { // Set up escape to abort retry - this.retryEscapeHandler = this.editor.onEscape; - this.editor.onEscape = () => { + this.retryEscapeHandler = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { this.session.abortRetry(); }; // Show retry indicator @@ -1467,7 +1533,7 @@ export class InteractiveMode { case "auto_retry_end": { // Restore escape handler if (this.retryEscapeHandler) { - this.editor.onEscape = this.retryEscapeHandler; + this.defaultEditor.onEscape = this.retryEscapeHandler; this.retryEscapeHandler = undefined; } // Stop loader @@ -1565,7 +1631,7 @@ export class InteractiveMode { const userComponent = new UserMessageComponent(textContent); this.chatContainer.addChild(userComponent); if (options?.populateHistory) { - this.editor.addToHistory(textContent); + this.editor.addToHistory?.(textContent); } } break; @@ -1734,7 +1800,7 @@ export class InteractiveMode { // Queue input during compaction (extension commands execute immediately) if (this.session.isCompacting) { if (this.isExtensionCommand(text)) { - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); this.editor.setText(""); await this.session.prompt(text); } else { @@ -1746,7 +1812,7 @@ export class InteractiveMode { // Alt+Enter queues a follow-up message (waits until agent finishes) // This handles extension commands (execute immediately), prompt template expansion, and queueing if (this.session.isStreaming) { - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); this.editor.setText(""); await this.session.prompt(text, { streamingBehavior: "followUp" }); this.updatePendingMessagesDisplay(); @@ -1833,7 +1899,7 @@ export class InteractiveMode { return; } - const currentText = this.editor.getExpandedText(); + const currentText = this.editor.getExpandedText?.() ?? this.editor.getText(); const tmpFile = path.join(os.tmpdir(), `pi-editor-${Date.now()}.pi.md`); try { @@ -1934,7 +2000,7 @@ export class InteractiveMode { private queueCompactionMessage(text: string, mode: "steer" | "followUp"): void { this.compactionQueuedMessages.push({ text, mode }); - this.editor.addToHistory(text); + this.editor.addToHistory?.(text); this.editor.setText(""); this.updatePendingMessagesDisplay(); this.showStatus("Queued message for after compaction"); @@ -2253,10 +2319,10 @@ export class InteractiveMode { // Set up escape handler and loader if summarizing let summaryLoader: Loader | undefined; - const originalOnEscape = this.editor.onEscape; + const originalOnEscape = this.defaultEditor.onEscape; if (wantsSummary) { - this.editor.onEscape = () => { + this.defaultEditor.onEscape = () => { this.session.abortBranchSummary(); }; this.chatContainer.addChild(new Spacer(1)); @@ -2298,7 +2364,7 @@ export class InteractiveMode { summaryLoader.stop(); this.statusContainer.clear(); } - this.editor.onEscape = originalOnEscape; + this.defaultEditor.onEscape = originalOnEscape; } }, () => { @@ -2921,8 +2987,8 @@ export class InteractiveMode { this.statusContainer.clear(); // Set up escape handler during compaction - const originalOnEscape = this.editor.onEscape; - this.editor.onEscape = () => { + const originalOnEscape = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { this.session.abortCompaction(); }; @@ -2959,7 +3025,7 @@ export class InteractiveMode { } finally { compactingLoader.stop(); this.statusContainer.clear(); - this.editor.onEscape = originalOnEscape; + this.defaultEditor.onEscape = originalOnEscape; } void this.flushCompactionQueue({ willRetry: false }); } diff --git a/packages/coding-agent/src/modes/rpc/rpc-mode.ts b/packages/coding-agent/src/modes/rpc/rpc-mode.ts index 928d18f7..4906b695 100644 --- a/packages/coding-agent/src/modes/rpc/rpc-mode.ts +++ b/packages/coding-agent/src/modes/rpc/rpc-mode.ts @@ -219,6 +219,10 @@ export async function runRpcMode(session: AgentSession): Promise { }); }, + setEditorComponent(): void { + // Custom editor components not supported in RPC mode + }, + get theme() { return theme; }, diff --git a/packages/coding-agent/test/compaction-extensions.test.ts b/packages/coding-agent/test/compaction-extensions.test.ts index c062d806..bf906256 100644 --- a/packages/coding-agent/test/compaction-extensions.test.ts +++ b/packages/coding-agent/test/compaction-extensions.test.ts @@ -137,6 +137,7 @@ describe.skipIf(!API_KEY)("Compaction extensions", () => { setEditorText: () => {}, getEditorText: () => "", editor: async () => undefined, + setEditorComponent: () => {}, get theme() { return theme; }, diff --git a/packages/tui/CHANGELOG.md b/packages/tui/CHANGELOG.md index 2dbedd4d..843ce36a 100644 --- a/packages/tui/CHANGELOG.md +++ b/packages/tui/CHANGELOG.md @@ -2,6 +2,10 @@ ## [Unreleased] +### Added + +- `EditorComponent` interface for custom editor implementations + ## [0.37.8] - 2026-01-07 ### Added diff --git a/packages/tui/src/editor-component.ts b/packages/tui/src/editor-component.ts new file mode 100644 index 00000000..d59d2e29 --- /dev/null +++ b/packages/tui/src/editor-component.ts @@ -0,0 +1,65 @@ +import type { AutocompleteProvider } from "./autocomplete.js"; +import type { Component } from "./tui.js"; + +/** + * Interface for custom editor components. + * + * This allows extensions to provide their own editor implementation + * (e.g., vim mode, emacs mode, custom keybindings) while maintaining + * compatibility with the core application. + */ +export interface EditorComponent extends Component { + // ========================================================================= + // Core text access (required) + // ========================================================================= + + /** Get the current text content */ + getText(): string; + + /** Set the text content */ + setText(text: string): void; + + // ========================================================================= + // Callbacks (required) + // ========================================================================= + + /** Called when user submits (e.g., Enter key) */ + onSubmit?: (text: string) => void; + + /** Called when text changes */ + onChange?: (text: string) => void; + + // ========================================================================= + // History support (optional) + // ========================================================================= + + /** Add text to history for up/down navigation */ + addToHistory?(text: string): void; + + // ========================================================================= + // Advanced text manipulation (optional) + // ========================================================================= + + /** Insert text at current cursor position */ + insertTextAtCursor?(text: string): void; + + /** + * Get text with any markers expanded (e.g., paste markers). + * Falls back to getText() if not implemented. + */ + getExpandedText?(): string; + + // ========================================================================= + // Autocomplete support (optional) + // ========================================================================= + + /** Set the autocomplete provider */ + setAutocompleteProvider?(provider: AutocompleteProvider): void; + + // ========================================================================= + // Appearance (optional) + // ========================================================================= + + /** Border color function */ + borderColor?: (str: string) => string; +} diff --git a/packages/tui/src/index.ts b/packages/tui/src/index.ts index 23efc663..3a7944ca 100644 --- a/packages/tui/src/index.ts +++ b/packages/tui/src/index.ts @@ -20,6 +20,8 @@ export { type SettingItem, SettingsList, type SettingsListTheme } from "./compon export { Spacer } from "./components/spacer.js"; export { Text } from "./components/text.js"; export { TruncatedText } from "./components/truncated-text.js"; +// Editor component interface (for custom editors) +export type { EditorComponent } from "./editor-component.js"; // Keybindings export { DEFAULT_EDITOR_KEYBINDINGS, diff --git a/packages/tui/src/tui.ts b/packages/tui/src/tui.ts index fdce33d5..a7c086a7 100644 --- a/packages/tui/src/tui.ts +++ b/packages/tui/src/tui.ts @@ -332,7 +332,19 @@ export class TUI extends Container { ].join("\n"); fs.mkdirSync(path.dirname(crashLogPath), { recursive: true }); fs.writeFileSync(crashLogPath, crashData); - throw new Error(`Rendered line ${i} exceeds terminal width. Debug log written to ${crashLogPath}`); + + // Clean up terminal state before throwing + this.stop(); + + const errorMsg = [ + `Rendered line ${i} exceeds terminal width (${visibleWidth(line)} > ${width}).`, + "", + "This is likely caused by a custom TUI component not truncating its output.", + "Use visibleWidth() to measure and truncateToWidth() to truncate lines.", + "", + `Debug log written to: ${crashLogPath}`, + ].join("\n"); + throw new Error(errorMsg); } buffer += line; } From 615ed0ae2e926f8bc6fa484a22a77e3bdf72f178 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Wed, 7 Jan 2026 16:50:18 +0100 Subject: [PATCH 3/8] Fix compaction UX oddities when model switching (#535) --- .../coding-agent/src/core/agent-session.ts | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/packages/coding-agent/src/core/agent-session.ts b/packages/coding-agent/src/core/agent-session.ts index 9deedc53..88c66a61 100644 --- a/packages/coding-agent/src/core/agent-session.ts +++ b/packages/coding-agent/src/core/agent-session.ts @@ -1262,8 +1262,24 @@ export class AgentSession { const contextWindow = this.model?.contextWindow ?? 0; + // Skip overflow check if the message came from a different model. + // This handles the case where user switched from a smaller-context model (e.g. opus) + // to a larger-context model (e.g. codex) - the overflow error from the old model + // shouldn't trigger compaction for the new model. + const sameModel = + this.model && assistantMessage.provider === this.model.provider && assistantMessage.model === this.model.id; + + // Skip overflow check if the error is from before a compaction in the current path. + // This handles the case where an error was kept after compaction (in the "kept" region). + // The error shouldn't trigger another compaction since we already compacted. + // Example: opus fails → switch to codex → compact → switch back to opus → opus error + // is still in context but shouldn't trigger compaction again. + const compactionEntry = this.sessionManager.getBranch().find((e) => e.type === "compaction"); + const errorIsFromBeforeCompaction = + compactionEntry && assistantMessage.timestamp < new Date(compactionEntry.timestamp).getTime(); + // Case 1: Overflow - LLM returned context overflow error - if (isContextOverflow(assistantMessage, contextWindow)) { + if (sameModel && !errorIsFromBeforeCompaction && isContextOverflow(assistantMessage, contextWindow)) { // Remove the error message from agent state (it IS saved to session for history, // but we don't want it in context for the retry) const messages = this.agent.state.messages; From f3b7b0b1790c84a9d76069c1d81acd0b0fb63b62 Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Wed, 7 Jan 2026 17:50:06 +0100 Subject: [PATCH 4/8] fix(tui): handle batched input over SSH with StdinBuffer Adds StdinBuffer class (adapted from OpenTUI, MIT license) to split batched stdin into individual sequences before they reach components. This fixes key presses being dropped when batched with release events, which commonly occurs over SSH due to network buffering. - Each handleInput() call now receives a single event - matchesKey() and isKeyRelease() work correctly without batching awareness - Properly buffers incomplete escape sequences across chunks - Handles bracketed paste mode Addresses #538 --- packages/tui/CHANGELOG.md | 5 + packages/tui/src/index.ts | 2 + packages/tui/src/stdin-buffer.ts | 386 ++++++++++++++++++++++ packages/tui/src/terminal.ts | 82 +++-- packages/tui/test/stdin-buffer.test.ts | 422 +++++++++++++++++++++++++ 5 files changed, 877 insertions(+), 20 deletions(-) create mode 100644 packages/tui/src/stdin-buffer.ts create mode 100644 packages/tui/test/stdin-buffer.test.ts diff --git a/packages/tui/CHANGELOG.md b/packages/tui/CHANGELOG.md index 843ce36a..68da443b 100644 --- a/packages/tui/CHANGELOG.md +++ b/packages/tui/CHANGELOG.md @@ -5,6 +5,11 @@ ### Added - `EditorComponent` interface for custom editor implementations +- `StdinBuffer` class to split batched stdin into individual sequences (adapted from [OpenTUI](https://github.com/anomalyco/opentui), MIT license) + +### Fixed + +- Key presses no longer dropped when batched with other events over SSH ([#538](https://github.com/badlogic/pi-mono/pull/538)) ## [0.37.8] - 2026-01-07 diff --git a/packages/tui/src/index.ts b/packages/tui/src/index.ts index 3a7944ca..7e87d71a 100644 --- a/packages/tui/src/index.ts +++ b/packages/tui/src/index.ts @@ -43,6 +43,8 @@ export { parseKey, setKittyProtocolActive, } from "./keys.js"; +// Input buffering for batch splitting +export { StdinBuffer, type StdinBufferEventMap, type StdinBufferOptions } from "./stdin-buffer.js"; // Terminal interface and implementations export { ProcessTerminal, type Terminal } from "./terminal.js"; // Terminal image support diff --git a/packages/tui/src/stdin-buffer.ts b/packages/tui/src/stdin-buffer.ts new file mode 100644 index 00000000..5b2f977b --- /dev/null +++ b/packages/tui/src/stdin-buffer.ts @@ -0,0 +1,386 @@ +/** + * StdinBuffer buffers input and emits complete sequences. + * + * This is necessary because stdin data events can arrive in partial chunks, + * especially for escape sequences like mouse events. Without buffering, + * partial sequences can be misinterpreted as regular keypresses. + * + * For example, the mouse SGR sequence `\x1b[<35;20;5m` might arrive as: + * - Event 1: `\x1b` + * - Event 2: `[<35` + * - Event 3: `;20;5m` + * + * The buffer accumulates these until a complete sequence is detected. + * Call the `process()` method to feed input data. + * + * Based on code from OpenTUI (https://github.com/anomalyco/opentui) + * MIT License - Copyright (c) 2025 opentui + */ + +import { EventEmitter } from "events"; + +const ESC = "\x1b"; +const BRACKETED_PASTE_START = "\x1b[200~"; +const BRACKETED_PASTE_END = "\x1b[201~"; + +/** + * Check if a string is a complete escape sequence or needs more data + */ +function isCompleteSequence(data: string): "complete" | "incomplete" | "not-escape" { + if (!data.startsWith(ESC)) { + return "not-escape"; + } + + if (data.length === 1) { + return "incomplete"; + } + + const afterEsc = data.slice(1); + + // CSI sequences: ESC [ + if (afterEsc.startsWith("[")) { + // Check for old-style mouse sequence: ESC[M + 3 bytes + if (afterEsc.startsWith("[M")) { + // Old-style mouse needs ESC[M + 3 bytes = 6 total + return data.length >= 6 ? "complete" : "incomplete"; + } + return isCompleteCsiSequence(data); + } + + // OSC sequences: ESC ] + if (afterEsc.startsWith("]")) { + return isCompleteOscSequence(data); + } + + // DCS sequences: ESC P ... ESC \ (includes XTVersion responses) + if (afterEsc.startsWith("P")) { + return isCompleteDcsSequence(data); + } + + // APC sequences: ESC _ ... ESC \ (includes Kitty graphics responses) + if (afterEsc.startsWith("_")) { + return isCompleteApcSequence(data); + } + + // SS3 sequences: ESC O + if (afterEsc.startsWith("O")) { + // ESC O followed by a single character + return afterEsc.length >= 2 ? "complete" : "incomplete"; + } + + // Meta key sequences: ESC followed by a single character + if (afterEsc.length === 1) { + return "complete"; + } + + // Unknown escape sequence - treat as complete + return "complete"; +} + +/** + * Check if CSI sequence is complete + * CSI sequences: ESC [ ... followed by a final byte (0x40-0x7E) + */ +function isCompleteCsiSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}[`)) { + return "complete"; + } + + // Need at least ESC [ and one more character + if (data.length < 3) { + return "incomplete"; + } + + const payload = data.slice(2); + + // CSI sequences end with a byte in the range 0x40-0x7E (@-~) + // This includes all letters and several special characters + const lastChar = payload[payload.length - 1]; + const lastCharCode = lastChar.charCodeAt(0); + + if (lastCharCode >= 0x40 && lastCharCode <= 0x7e) { + // Special handling for SGR mouse sequences + // Format: ESC[ /^\d+$/.test(p))) { + return "complete"; + } + } + + return "incomplete"; + } + + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if OSC sequence is complete + * OSC sequences: ESC ] ... ST (where ST is ESC \ or BEL) + */ +function isCompleteOscSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}]`)) { + return "complete"; + } + + // OSC sequences end with ST (ESC \) or BEL (\x07) + if (data.endsWith(`${ESC}\\`) || data.endsWith("\x07")) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if DCS (Device Control String) sequence is complete + * DCS sequences: ESC P ... ST (where ST is ESC \) + * Used for XTVersion responses like ESC P >| ... ESC \ + */ +function isCompleteDcsSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}P`)) { + return "complete"; + } + + // DCS sequences end with ST (ESC \) + if (data.endsWith(`${ESC}\\`)) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if APC (Application Program Command) sequence is complete + * APC sequences: ESC _ ... ST (where ST is ESC \) + * Used for Kitty graphics responses like ESC _ G ... ESC \ + */ +function isCompleteApcSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}_`)) { + return "complete"; + } + + // APC sequences end with ST (ESC \) + if (data.endsWith(`${ESC}\\`)) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Split accumulated buffer into complete sequences + */ +function extractCompleteSequences(buffer: string): { sequences: string[]; remainder: string } { + const sequences: string[] = []; + let pos = 0; + + while (pos < buffer.length) { + const remaining = buffer.slice(pos); + + // Try to extract a sequence starting at this position + if (remaining.startsWith(ESC)) { + // Find the end of this escape sequence + let seqEnd = 1; + while (seqEnd <= remaining.length) { + const candidate = remaining.slice(0, seqEnd); + const status = isCompleteSequence(candidate); + + if (status === "complete") { + sequences.push(candidate); + pos += seqEnd; + break; + } else if (status === "incomplete") { + seqEnd++; + } else { + // Should not happen when starting with ESC + sequences.push(candidate); + pos += seqEnd; + break; + } + } + + if (seqEnd > remaining.length) { + return { sequences, remainder: remaining }; + } + } else { + // Not an escape sequence - take a single character + sequences.push(remaining[0]!); + pos++; + } + } + + return { sequences, remainder: "" }; +} + +export type StdinBufferOptions = { + /** + * Maximum time to wait for sequence completion (default: 10ms) + * After this time, the buffer is flushed even if incomplete + */ + timeout?: number; +}; + +export type StdinBufferEventMap = { + data: [string]; + paste: [string]; +}; + +/** + * Buffers stdin input and emits complete sequences via the 'data' event. + * Handles partial escape sequences that arrive across multiple chunks. + */ +export class StdinBuffer extends EventEmitter { + private buffer: string = ""; + private timeout: ReturnType | null = null; + private readonly timeoutMs: number; + private pasteMode: boolean = false; + private pasteBuffer: string = ""; + + constructor(options: StdinBufferOptions = {}) { + super(); + this.timeoutMs = options.timeout ?? 10; + } + + public process(data: string | Buffer): void { + // Clear any pending timeout + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + + // Handle high-byte conversion (for compatibility with parseKeypress) + // If buffer has single byte > 127, convert to ESC + (byte - 128) + let str: string; + if (Buffer.isBuffer(data)) { + if (data.length === 1 && data[0]! > 127) { + const byte = data[0]! - 128; + str = `\x1b${String.fromCharCode(byte)}`; + } else { + str = data.toString(); + } + } else { + str = data; + } + + if (str.length === 0 && this.buffer.length === 0) { + this.emit("data", ""); + return; + } + + this.buffer += str; + + if (this.pasteMode) { + this.pasteBuffer += this.buffer; + this.buffer = ""; + + const endIndex = this.pasteBuffer.indexOf(BRACKETED_PASTE_END); + if (endIndex !== -1) { + const pastedContent = this.pasteBuffer.slice(0, endIndex); + const remaining = this.pasteBuffer.slice(endIndex + BRACKETED_PASTE_END.length); + + this.pasteMode = false; + this.pasteBuffer = ""; + + this.emit("paste", pastedContent); + + if (remaining.length > 0) { + this.process(remaining); + } + } + return; + } + + const startIndex = this.buffer.indexOf(BRACKETED_PASTE_START); + if (startIndex !== -1) { + if (startIndex > 0) { + const beforePaste = this.buffer.slice(0, startIndex); + const result = extractCompleteSequences(beforePaste); + for (const sequence of result.sequences) { + this.emit("data", sequence); + } + } + + this.buffer = this.buffer.slice(startIndex + BRACKETED_PASTE_START.length); + this.pasteMode = true; + this.pasteBuffer = this.buffer; + this.buffer = ""; + + const endIndex = this.pasteBuffer.indexOf(BRACKETED_PASTE_END); + if (endIndex !== -1) { + const pastedContent = this.pasteBuffer.slice(0, endIndex); + const remaining = this.pasteBuffer.slice(endIndex + BRACKETED_PASTE_END.length); + + this.pasteMode = false; + this.pasteBuffer = ""; + + this.emit("paste", pastedContent); + + if (remaining.length > 0) { + this.process(remaining); + } + } + return; + } + + const result = extractCompleteSequences(this.buffer); + this.buffer = result.remainder; + + for (const sequence of result.sequences) { + this.emit("data", sequence); + } + + if (this.buffer.length > 0) { + this.timeout = setTimeout(() => { + const flushed = this.flush(); + + for (const sequence of flushed) { + this.emit("data", sequence); + } + }, this.timeoutMs); + } + } + + flush(): string[] { + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + + if (this.buffer.length === 0) { + return []; + } + + const sequences = [this.buffer]; + this.buffer = ""; + return sequences; + } + + clear(): void { + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + this.buffer = ""; + this.pasteMode = false; + this.pasteBuffer = ""; + } + + getBuffer(): string { + return this.buffer; + } + + destroy(): void { + this.clear(); + } +} diff --git a/packages/tui/src/terminal.ts b/packages/tui/src/terminal.ts index 65411880..557d54b6 100644 --- a/packages/tui/src/terminal.ts +++ b/packages/tui/src/terminal.ts @@ -1,4 +1,5 @@ import { setKittyProtocolActive } from "./keys.js"; +import { StdinBuffer } from "./stdin-buffer.js"; /** * Minimal terminal interface for TUI @@ -44,6 +45,8 @@ export class ProcessTerminal implements Terminal { private inputHandler?: (data: string) => void; private resizeHandler?: () => void; private _kittyProtocolActive = false; + private stdinBuffer?: StdinBuffer; + private stdinDataHandler?: (data: string) => void; get kittyProtocolActive(): boolean { return this._kittyProtocolActive; @@ -73,6 +76,35 @@ export class ProcessTerminal implements Terminal { this.queryAndEnableKittyProtocol(); } + /** + * Set up StdinBuffer to split batched input into individual sequences. + * This ensures components receive single events, making matchesKey/isKeyRelease work correctly. + * Note: Does NOT register the stdin handler - that's done after the Kitty protocol query. + */ + private setupStdinBuffer(): void { + this.stdinBuffer = new StdinBuffer({ timeout: 10 }); + + // Forward individual sequences to the input handler + this.stdinBuffer.on("data", (sequence) => { + if (this.inputHandler) { + this.inputHandler(sequence); + } + }); + + // Re-wrap paste content with bracketed paste markers for existing editor handling + this.stdinBuffer.on("paste", (content) => { + if (this.inputHandler) { + this.inputHandler(`\x1b[200~${content}\x1b[201~`); + } + }); + + // Handler that pipes stdin data through the buffer + // Registration happens after Kitty protocol query completes + this.stdinDataHandler = (data: string) => { + this.stdinBuffer!.process(data); + }; + } + /** * Query terminal for Kitty keyboard protocol support and enable if available. * @@ -91,9 +123,9 @@ export class ProcessTerminal implements Terminal { const queryHandler = (data: string) => { if (resolved) { - // Query phase done, forward to user handler - if (this.inputHandler) { - this.inputHandler(data); + // Query phase done, forward to StdinBuffer + if (this.stdinBuffer) { + this.stdinBuffer.process(data); } return; } @@ -112,21 +144,24 @@ export class ProcessTerminal implements Terminal { // Flag 2 = report event types (press/repeat/release) process.stdout.write("\x1b[>3u"); - // Remove the response from buffer, forward any remaining input + // Remove the response from buffer, forward any remaining input through StdinBuffer const remaining = buffer.replace(kittyResponsePattern, ""); - if (remaining && this.inputHandler) { - this.inputHandler(remaining); + if (remaining && this.stdinBuffer) { + this.stdinBuffer.process(remaining); } - // Replace with user handler + // Replace query handler with StdinBuffer handler process.stdin.removeListener("data", queryHandler); - if (this.inputHandler) { - process.stdin.on("data", this.inputHandler); + if (this.stdinDataHandler) { + process.stdin.on("data", this.stdinDataHandler); } } }; - // Temporarily intercept input for the query + // Set up StdinBuffer before query (it will receive input after query completes) + this.setupStdinBuffer(); + + // Temporarily intercept input for the query (before StdinBuffer) process.stdin.on("data", queryHandler); // Send query @@ -139,15 +174,15 @@ export class ProcessTerminal implements Terminal { this._kittyProtocolActive = false; setKittyProtocolActive(false); - // Forward any buffered input that wasn't a Kitty response - if (buffer && this.inputHandler) { - this.inputHandler(buffer); + // Forward any buffered input that wasn't a Kitty response through StdinBuffer + if (buffer && this.stdinBuffer) { + this.stdinBuffer.process(buffer); } - // Replace with user handler + // Replace query handler with StdinBuffer handler process.stdin.removeListener("data", queryHandler); - if (this.inputHandler) { - process.stdin.on("data", this.inputHandler); + if (this.stdinDataHandler) { + process.stdin.on("data", this.stdinDataHandler); } } }, QUERY_TIMEOUT_MS); @@ -164,11 +199,18 @@ export class ProcessTerminal implements Terminal { setKittyProtocolActive(false); } - // Remove event handlers - if (this.inputHandler) { - process.stdin.removeListener("data", this.inputHandler); - this.inputHandler = undefined; + // Clean up StdinBuffer + if (this.stdinBuffer) { + this.stdinBuffer.destroy(); + this.stdinBuffer = undefined; } + + // Remove event handlers + if (this.stdinDataHandler) { + process.stdin.removeListener("data", this.stdinDataHandler); + this.stdinDataHandler = undefined; + } + this.inputHandler = undefined; if (this.resizeHandler) { process.stdout.removeListener("resize", this.resizeHandler); this.resizeHandler = undefined; diff --git a/packages/tui/test/stdin-buffer.test.ts b/packages/tui/test/stdin-buffer.test.ts new file mode 100644 index 00000000..5fb0d6ff --- /dev/null +++ b/packages/tui/test/stdin-buffer.test.ts @@ -0,0 +1,422 @@ +/** + * Tests for StdinBuffer + * + * Based on code from OpenTUI (https://github.com/anomalyco/opentui) + * MIT License - Copyright (c) 2025 opentui + */ + +import assert from "node:assert"; +import { beforeEach, describe, it } from "node:test"; +import { StdinBuffer } from "../src/stdin-buffer.js"; + +describe("StdinBuffer", () => { + let buffer: StdinBuffer; + let emittedSequences: string[]; + + beforeEach(() => { + buffer = new StdinBuffer({ timeout: 10 }); + + // Collect emitted sequences + emittedSequences = []; + buffer.on("data", (sequence) => { + emittedSequences.push(sequence); + }); + }); + + // Helper to process data through the buffer + function processInput(data: string | Buffer): void { + buffer.process(data); + } + + // Helper to wait for async operations + async function wait(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } + + describe("Regular Characters", () => { + it("should pass through regular characters immediately", () => { + processInput("a"); + assert.deepStrictEqual(emittedSequences, ["a"]); + }); + + it("should pass through multiple regular characters", () => { + processInput("abc"); + assert.deepStrictEqual(emittedSequences, ["a", "b", "c"]); + }); + + it("should handle unicode characters", () => { + processInput("hello 世界"); + assert.deepStrictEqual(emittedSequences, ["h", "e", "l", "l", "o", " ", "世", "界"]); + }); + }); + + describe("Complete Escape Sequences", () => { + it("should pass through complete mouse SGR sequences", () => { + const mouseSeq = "\x1b[<35;20;5m"; + processInput(mouseSeq); + assert.deepStrictEqual(emittedSequences, [mouseSeq]); + }); + + it("should pass through complete arrow key sequences", () => { + const upArrow = "\x1b[A"; + processInput(upArrow); + assert.deepStrictEqual(emittedSequences, [upArrow]); + }); + + it("should pass through complete function key sequences", () => { + const f1 = "\x1b[11~"; + processInput(f1); + assert.deepStrictEqual(emittedSequences, [f1]); + }); + + it("should pass through meta key sequences", () => { + const metaA = "\x1ba"; + processInput(metaA); + assert.deepStrictEqual(emittedSequences, [metaA]); + }); + + it("should pass through SS3 sequences", () => { + const ss3 = "\x1bOA"; + processInput(ss3); + assert.deepStrictEqual(emittedSequences, [ss3]); + }); + }); + + describe("Partial Escape Sequences", () => { + it("should buffer incomplete mouse SGR sequence", async () => { + processInput("\x1b"); + assert.deepStrictEqual(emittedSequences, []); + assert.strictEqual(buffer.getBuffer(), "\x1b"); + + processInput("[<35"); + assert.deepStrictEqual(emittedSequences, []); + assert.strictEqual(buffer.getBuffer(), "\x1b[<35"); + + processInput(";20;5m"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<35;20;5m"]); + assert.strictEqual(buffer.getBuffer(), ""); + }); + + it("should buffer incomplete CSI sequence", () => { + processInput("\x1b["); + assert.deepStrictEqual(emittedSequences, []); + + processInput("1;"); + assert.deepStrictEqual(emittedSequences, []); + + processInput("5H"); + assert.deepStrictEqual(emittedSequences, ["\x1b[1;5H"]); + }); + + it("should buffer split across many chunks", () => { + processInput("\x1b"); + processInput("["); + processInput("<"); + processInput("3"); + processInput("5"); + processInput(";"); + processInput("2"); + processInput("0"); + processInput(";"); + processInput("5"); + processInput("m"); + + assert.deepStrictEqual(emittedSequences, ["\x1b[<35;20;5m"]); + }); + + it("should flush incomplete sequence after timeout", async () => { + processInput("\x1b[<35"); + assert.deepStrictEqual(emittedSequences, []); + + // Wait for timeout + await wait(15); + + assert.deepStrictEqual(emittedSequences, ["\x1b[<35"]); + }); + }); + + describe("Mixed Content", () => { + it("should handle characters followed by escape sequence", () => { + processInput("abc\x1b[A"); + assert.deepStrictEqual(emittedSequences, ["a", "b", "c", "\x1b[A"]); + }); + + it("should handle escape sequence followed by characters", () => { + processInput("\x1b[Aabc"); + assert.deepStrictEqual(emittedSequences, ["\x1b[A", "a", "b", "c"]); + }); + + it("should handle multiple complete sequences", () => { + processInput("\x1b[A\x1b[B\x1b[C"); + assert.deepStrictEqual(emittedSequences, ["\x1b[A", "\x1b[B", "\x1b[C"]); + }); + + it("should handle partial sequence with preceding characters", () => { + processInput("abc\x1b[<35"); + assert.deepStrictEqual(emittedSequences, ["a", "b", "c"]); + assert.strictEqual(buffer.getBuffer(), "\x1b[<35"); + + processInput(";20;5m"); + assert.deepStrictEqual(emittedSequences, ["a", "b", "c", "\x1b[<35;20;5m"]); + }); + }); + + describe("Kitty Keyboard Protocol", () => { + it("should handle Kitty CSI u press events", () => { + // Press 'a' in Kitty protocol + processInput("\x1b[97u"); + assert.deepStrictEqual(emittedSequences, ["\x1b[97u"]); + }); + + it("should handle Kitty CSI u release events", () => { + // Release 'a' in Kitty protocol + processInput("\x1b[97;1:3u"); + assert.deepStrictEqual(emittedSequences, ["\x1b[97;1:3u"]); + }); + + it("should handle batched Kitty press and release", () => { + // Press 'a', release 'a' batched together (common over SSH) + processInput("\x1b[97u\x1b[97;1:3u"); + assert.deepStrictEqual(emittedSequences, ["\x1b[97u", "\x1b[97;1:3u"]); + }); + + it("should handle multiple batched Kitty events", () => { + // Press 'a', release 'a', press 'b', release 'b' + processInput("\x1b[97u\x1b[97;1:3u\x1b[98u\x1b[98;1:3u"); + assert.deepStrictEqual(emittedSequences, ["\x1b[97u", "\x1b[97;1:3u", "\x1b[98u", "\x1b[98;1:3u"]); + }); + + it("should handle Kitty arrow keys with event type", () => { + // Up arrow press with event type + processInput("\x1b[1;1:1A"); + assert.deepStrictEqual(emittedSequences, ["\x1b[1;1:1A"]); + }); + + it("should handle Kitty functional keys with event type", () => { + // Delete key release + processInput("\x1b[3;1:3~"); + assert.deepStrictEqual(emittedSequences, ["\x1b[3;1:3~"]); + }); + + it("should handle plain characters mixed with Kitty sequences", () => { + // Plain 'a' followed by Kitty release + processInput("a\x1b[97;1:3u"); + assert.deepStrictEqual(emittedSequences, ["a", "\x1b[97;1:3u"]); + }); + + it("should handle Kitty sequence followed by plain characters", () => { + processInput("\x1b[97ua"); + assert.deepStrictEqual(emittedSequences, ["\x1b[97u", "a"]); + }); + + it("should handle rapid typing simulation with Kitty protocol", () => { + // Simulates typing "hi" quickly with releases interleaved + processInput("\x1b[104u\x1b[104;1:3u\x1b[105u\x1b[105;1:3u"); + assert.deepStrictEqual(emittedSequences, ["\x1b[104u", "\x1b[104;1:3u", "\x1b[105u", "\x1b[105;1:3u"]); + }); + }); + + describe("Mouse Events", () => { + it("should handle mouse press event", () => { + processInput("\x1b[<0;10;5M"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<0;10;5M"]); + }); + + it("should handle mouse release event", () => { + processInput("\x1b[<0;10;5m"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<0;10;5m"]); + }); + + it("should handle mouse move event", () => { + processInput("\x1b[<35;20;5m"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<35;20;5m"]); + }); + + it("should handle split mouse events", () => { + processInput("\x1b[<3"); + processInput("5;1"); + processInput("5;"); + processInput("10m"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<35;15;10m"]); + }); + + it("should handle multiple mouse events", () => { + processInput("\x1b[<35;1;1m\x1b[<35;2;2m\x1b[<35;3;3m"); + assert.deepStrictEqual(emittedSequences, ["\x1b[<35;1;1m", "\x1b[<35;2;2m", "\x1b[<35;3;3m"]); + }); + + it("should handle old-style mouse sequence (ESC[M + 3 bytes)", () => { + processInput("\x1b[M abc"); + assert.deepStrictEqual(emittedSequences, ["\x1b[M ab", "c"]); + }); + + it("should buffer incomplete old-style mouse sequence", () => { + processInput("\x1b[M"); + assert.strictEqual(buffer.getBuffer(), "\x1b[M"); + + processInput(" a"); + assert.strictEqual(buffer.getBuffer(), "\x1b[M a"); + + processInput("b"); + assert.deepStrictEqual(emittedSequences, ["\x1b[M ab"]); + }); + }); + + describe("Edge Cases", () => { + it("should handle empty input", () => { + processInput(""); + // Empty string emits an empty data event + assert.deepStrictEqual(emittedSequences, [""]); + }); + + it("should handle lone escape character with timeout", async () => { + processInput("\x1b"); + assert.deepStrictEqual(emittedSequences, []); + + // After timeout, should emit + await wait(15); + assert.deepStrictEqual(emittedSequences, ["\x1b"]); + }); + + it("should handle lone escape character with explicit flush", () => { + processInput("\x1b"); + assert.deepStrictEqual(emittedSequences, []); + + const flushed = buffer.flush(); + assert.deepStrictEqual(flushed, ["\x1b"]); + }); + + it("should handle buffer input", () => { + processInput(Buffer.from("\x1b[A")); + assert.deepStrictEqual(emittedSequences, ["\x1b[A"]); + }); + + it("should handle very long sequences", () => { + const longSeq = `\x1b[${"1;".repeat(50)}H`; + processInput(longSeq); + assert.deepStrictEqual(emittedSequences, [longSeq]); + }); + }); + + describe("Flush", () => { + it("should flush incomplete sequences", () => { + processInput("\x1b[<35"); + const flushed = buffer.flush(); + assert.deepStrictEqual(flushed, ["\x1b[<35"]); + assert.strictEqual(buffer.getBuffer(), ""); + }); + + it("should return empty array if nothing to flush", () => { + const flushed = buffer.flush(); + assert.deepStrictEqual(flushed, []); + }); + + it("should emit flushed data via timeout", async () => { + processInput("\x1b[<35"); + assert.deepStrictEqual(emittedSequences, []); + + // Wait for timeout to flush + await wait(15); + + assert.deepStrictEqual(emittedSequences, ["\x1b[<35"]); + }); + }); + + describe("Clear", () => { + it("should clear buffered content without emitting", () => { + processInput("\x1b[<35"); + assert.strictEqual(buffer.getBuffer(), "\x1b[<35"); + + buffer.clear(); + assert.strictEqual(buffer.getBuffer(), ""); + assert.deepStrictEqual(emittedSequences, []); + }); + }); + + describe("Bracketed Paste", () => { + let emittedPaste: string[] = []; + + beforeEach(() => { + buffer = new StdinBuffer({ timeout: 10 }); + + // Collect emitted sequences + emittedSequences = []; + buffer.on("data", (sequence) => { + emittedSequences.push(sequence); + }); + + // Collect paste events + emittedPaste = []; + buffer.on("paste", (data) => { + emittedPaste.push(data); + }); + }); + + it("should emit paste event for complete bracketed paste", () => { + const pasteStart = "\x1b[200~"; + const pasteEnd = "\x1b[201~"; + const content = "hello world"; + + processInput(pasteStart + content + pasteEnd); + + assert.deepStrictEqual(emittedPaste, ["hello world"]); + assert.deepStrictEqual(emittedSequences, []); // No data events during paste + }); + + it("should handle paste arriving in chunks", () => { + processInput("\x1b[200~"); + assert.deepStrictEqual(emittedPaste, []); + + processInput("hello "); + assert.deepStrictEqual(emittedPaste, []); + + processInput("world\x1b[201~"); + assert.deepStrictEqual(emittedPaste, ["hello world"]); + assert.deepStrictEqual(emittedSequences, []); + }); + + it("should handle paste with input before and after", () => { + processInput("a"); + processInput("\x1b[200~pasted\x1b[201~"); + processInput("b"); + + assert.deepStrictEqual(emittedSequences, ["a", "b"]); + assert.deepStrictEqual(emittedPaste, ["pasted"]); + }); + + it("should handle paste with newlines", () => { + processInput("\x1b[200~line1\nline2\nline3\x1b[201~"); + + assert.deepStrictEqual(emittedPaste, ["line1\nline2\nline3"]); + assert.deepStrictEqual(emittedSequences, []); + }); + + it("should handle paste with unicode", () => { + processInput("\x1b[200~Hello 世界 🎉\x1b[201~"); + + assert.deepStrictEqual(emittedPaste, ["Hello 世界 🎉"]); + assert.deepStrictEqual(emittedSequences, []); + }); + }); + + describe("Destroy", () => { + it("should clear buffer on destroy", () => { + processInput("\x1b[<35"); + assert.strictEqual(buffer.getBuffer(), "\x1b[<35"); + + buffer.destroy(); + assert.strictEqual(buffer.getBuffer(), ""); + }); + + it("should clear pending timeouts on destroy", async () => { + processInput("\x1b[<35"); + buffer.destroy(); + + // Wait longer than timeout + await wait(15); + + // Should not have emitted anything + assert.deepStrictEqual(emittedSequences, []); + }); + }); +}); From cbd3a8cb87da2e24b4998c19c6a6f9db4f3335d1 Mon Sep 17 00:00:00 2001 From: Fero Date: Wed, 7 Jan 2026 18:11:03 +0100 Subject: [PATCH 5/8] fix: use defaultThinkingLevel from settings when enabledModels lacks explicit suffix (#540) When enabledModels is configured without thinking level suffixes (e.g., 'claude-opus-4-5' instead of 'claude-opus-4-5:high'), the scoped model's default 'off' thinking level was overriding defaultThinkingLevel from settings. Now thinkingLevel in ScopedModel is optional (undefined means 'not explicitly specified'). When passing to SDK, undefined values are filled with defaultThinkingLevel from settings. --- packages/coding-agent/CHANGELOG.md | 4 ++ .../coding-agent/src/core/model-resolver.ts | 23 ++++++----- packages/coding-agent/src/main.ts | 24 ++++++++--- .../coding-agent/test/model-resolver.test.ts | 40 +++++++++---------- 4 files changed, 55 insertions(+), 36 deletions(-) diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index 92561529..145af044 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -11,6 +11,10 @@ - Extension UI dialogs (`ctx.ui.select()`, `ctx.ui.confirm()`, `ctx.ui.input()`) now support a `timeout` option that auto-dismisses the dialog with a live countdown display. Simpler alternative to `AbortSignal` for timed dialogs. - Extensions can now provide custom editor components via `ctx.ui.setEditorComponent((tui, theme, keybindings) => ...)`. Extend `CustomEditor` for full app keybinding support (escape, ctrl+d, model switching, etc.). See `examples/extensions/modal-editor.ts`, `examples/extensions/rainbow-editor.ts`, and `docs/tui.md` Pattern 7. +### Fixed + +- Default thinking level from settings now applies correctly when `enabledModels` is configured. Previously, models without explicit thinking level suffixes (e.g., `claude-opus-4-5` instead of `claude-opus-4-5:high`) would override `defaultThinkingLevel` with "off" + ## [0.37.8] - 2026-01-07 ## [0.37.7] - 2026-01-07 diff --git a/packages/coding-agent/src/core/model-resolver.ts b/packages/coding-agent/src/core/model-resolver.ts index 98c8fd60..b00e213a 100644 --- a/packages/coding-agent/src/core/model-resolver.ts +++ b/packages/coding-agent/src/core/model-resolver.ts @@ -29,7 +29,8 @@ export const defaultModelPerProvider: Record = { export interface ScopedModel { model: Model; - thinkingLevel: ThinkingLevel; + /** Thinking level if explicitly specified in pattern (e.g., "model:high"), undefined otherwise */ + thinkingLevel?: ThinkingLevel; } /** @@ -98,7 +99,8 @@ function tryMatchModel(modelPattern: string, availableModels: Model[]): Mod export interface ParsedModelResult { model: Model | undefined; - thinkingLevel: ThinkingLevel; + /** Thinking level if explicitly specified in pattern, undefined otherwise */ + thinkingLevel?: ThinkingLevel; warning: string | undefined; } @@ -119,14 +121,14 @@ export function parseModelPattern(pattern: string, availableModels: Model[] // Try exact match first const exactMatch = tryMatchModel(pattern, availableModels); if (exactMatch) { - return { model: exactMatch, thinkingLevel: "off", warning: undefined }; + return { model: exactMatch, thinkingLevel: undefined, warning: undefined }; } // No match - try splitting on last colon if present const lastColonIndex = pattern.lastIndexOf(":"); if (lastColonIndex === -1) { // No colons, pattern simply doesn't match any model - return { model: undefined, thinkingLevel: "off", warning: undefined }; + return { model: undefined, thinkingLevel: undefined, warning: undefined }; } const prefix = pattern.substring(0, lastColonIndex); @@ -137,22 +139,21 @@ export function parseModelPattern(pattern: string, availableModels: Model[] const result = parseModelPattern(prefix, availableModels); if (result.model) { // Only use this thinking level if no warning from inner recursion - // (if there was an invalid suffix deeper, we already have "off") return { model: result.model, - thinkingLevel: result.warning ? "off" : suffix, + thinkingLevel: result.warning ? undefined : suffix, warning: result.warning, }; } return result; } else { - // Invalid suffix - recurse on prefix with "off" and warn + // Invalid suffix - recurse on prefix and warn const result = parseModelPattern(prefix, availableModels); if (result.model) { return { model: result.model, - thinkingLevel: "off", - warning: `Invalid thinking level "${suffix}" in pattern "${pattern}". Using "off" instead.`, + thinkingLevel: undefined, + warning: `Invalid thinking level "${suffix}" in pattern "${pattern}". Using default instead.`, }; } return result; @@ -180,7 +181,7 @@ export async function resolveModelScope(patterns: string[], modelRegistry: Model // Extract optional thinking level suffix (e.g., "provider/*:high") const colonIdx = pattern.lastIndexOf(":"); let globPattern = pattern; - let thinkingLevel: ThinkingLevel = "off"; + let thinkingLevel: ThinkingLevel | undefined; if (colonIdx !== -1) { const suffix = pattern.substring(colonIdx + 1); @@ -282,7 +283,7 @@ export async function findInitialModel(options: { if (scopedModels.length > 0 && !isContinuing) { return { model: scopedModels[0].model, - thinkingLevel: scopedModels[0].thinkingLevel, + thinkingLevel: scopedModels[0].thinkingLevel ?? defaultThinkingLevel ?? "off", fallbackMessage: undefined, }; } diff --git a/packages/coding-agent/src/main.ts b/packages/coding-agent/src/main.ts index da8d0e6c..a05c46a8 100644 --- a/packages/coding-agent/src/main.ts +++ b/packages/coding-agent/src/main.ts @@ -235,6 +235,7 @@ function buildSessionOptions( scopedModels: ScopedModel[], sessionManager: SessionManager | undefined, modelRegistry: ModelRegistry, + settingsManager: SettingsManager, preloadedExtensions?: LoadedExtension[], ): CreateAgentSessionOptions { const options: CreateAgentSessionOptions = {}; @@ -261,15 +262,21 @@ function buildSessionOptions( } // Thinking level + // Only use scoped model's thinking level if it was explicitly specified (e.g., "model:high") + // Otherwise, let the SDK use defaultThinkingLevel from settings if (parsed.thinking) { options.thinkingLevel = parsed.thinking; - } else if (scopedModels.length > 0 && !parsed.continue && !parsed.resume) { + } else if (scopedModels.length > 0 && scopedModels[0].thinkingLevel && !parsed.continue && !parsed.resume) { options.thinkingLevel = scopedModels[0].thinkingLevel; } - // Scoped models for Ctrl+P cycling + // Scoped models for Ctrl+P cycling - fill in default thinking level for models without explicit level if (scopedModels.length > 0) { - options.scopedModels = scopedModels; + const defaultThinkingLevel = settingsManager.getDefaultThinkingLevel() ?? "off"; + options.scopedModels = scopedModels.map((sm) => ({ + model: sm.model, + thinkingLevel: sm.thinkingLevel ?? defaultThinkingLevel, + })); } // API key from CLI - set in authStorage @@ -423,7 +430,14 @@ export async function main(args: string[]) { sessionManager = SessionManager.open(selectedPath); } - const sessionOptions = buildSessionOptions(parsed, scopedModels, sessionManager, modelRegistry, loadedExtensions); + const sessionOptions = buildSessionOptions( + parsed, + scopedModels, + sessionManager, + modelRegistry, + settingsManager, + loadedExtensions, + ); sessionOptions.authStorage = authStorage; sessionOptions.modelRegistry = modelRegistry; sessionOptions.eventBus = eventBus; @@ -471,7 +485,7 @@ export async function main(args: string[]) { if (scopedModels.length > 0) { const modelList = scopedModels .map((sm) => { - const thinkingStr = sm.thinkingLevel !== "off" ? `:${sm.thinkingLevel}` : ""; + const thinkingStr = sm.thinkingLevel ? `:${sm.thinkingLevel}` : ""; return `${sm.model.id}${thinkingStr}`; }) .join(", "); diff --git a/packages/coding-agent/test/model-resolver.test.ts b/packages/coding-agent/test/model-resolver.test.ts index 0b7b47ca..fdef4160 100644 --- a/packages/coding-agent/test/model-resolver.test.ts +++ b/packages/coding-agent/test/model-resolver.test.ts @@ -62,24 +62,24 @@ const allModels = [...mockModels, ...mockOpenRouterModels]; describe("parseModelPattern", () => { describe("simple patterns without colons", () => { - test("exact match returns model with off thinking level", () => { + test("exact match returns model with undefined thinking level", () => { const result = parseModelPattern("claude-sonnet-4-5", allModels); expect(result.model?.id).toBe("claude-sonnet-4-5"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); - test("partial match returns best model", () => { + test("partial match returns best model with undefined thinking level", () => { const result = parseModelPattern("sonnet", allModels); expect(result.model?.id).toBe("claude-sonnet-4-5"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); - test("no match returns null model", () => { + test("no match returns undefined model and thinking level", () => { const result = parseModelPattern("nonexistent", allModels); expect(result.model).toBeUndefined(); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); }); @@ -110,27 +110,27 @@ describe("parseModelPattern", () => { }); describe("patterns with invalid thinking levels", () => { - test("sonnet:random returns sonnet with off and warning", () => { + test("sonnet:random returns sonnet with undefined thinking level and warning", () => { const result = parseModelPattern("sonnet:random", allModels); expect(result.model?.id).toBe("claude-sonnet-4-5"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toContain("Invalid thinking level"); expect(result.warning).toContain("random"); }); - test("gpt-4o:invalid returns gpt-4o with off and warning", () => { + test("gpt-4o:invalid returns gpt-4o with undefined thinking level and warning", () => { const result = parseModelPattern("gpt-4o:invalid", allModels); expect(result.model?.id).toBe("gpt-4o"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toContain("Invalid thinking level"); }); }); describe("OpenRouter models with colons in IDs", () => { - test("qwen3-coder:exacto matches the model with off", () => { + test("qwen3-coder:exacto matches the model with undefined thinking level", () => { const result = parseModelPattern("qwen/qwen3-coder:exacto", allModels); expect(result.model?.id).toBe("qwen/qwen3-coder:exacto"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); @@ -138,7 +138,7 @@ describe("parseModelPattern", () => { const result = parseModelPattern("openrouter/qwen/qwen3-coder:exacto", allModels); expect(result.model?.id).toBe("qwen/qwen3-coder:exacto"); expect(result.model?.provider).toBe("openrouter"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); @@ -157,27 +157,27 @@ describe("parseModelPattern", () => { expect(result.warning).toBeUndefined(); }); - test("gpt-4o:extended matches the extended model", () => { + test("gpt-4o:extended matches the extended model with undefined thinking level", () => { const result = parseModelPattern("openai/gpt-4o:extended", allModels); expect(result.model?.id).toBe("openai/gpt-4o:extended"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toBeUndefined(); }); }); describe("invalid thinking levels with OpenRouter models", () => { - test("qwen3-coder:exacto:random returns model with off and warning", () => { + test("qwen3-coder:exacto:random returns model with undefined thinking level and warning", () => { const result = parseModelPattern("qwen/qwen3-coder:exacto:random", allModels); expect(result.model?.id).toBe("qwen/qwen3-coder:exacto"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toContain("Invalid thinking level"); expect(result.warning).toContain("random"); }); - test("qwen3-coder:exacto:high:random returns model with off and warning", () => { + test("qwen3-coder:exacto:high:random returns model with undefined thinking level and warning", () => { const result = parseModelPattern("qwen/qwen3-coder:exacto:high:random", allModels); expect(result.model?.id).toBe("qwen/qwen3-coder:exacto"); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); expect(result.warning).toContain("Invalid thinking level"); expect(result.warning).toContain("random"); }); @@ -188,7 +188,7 @@ describe("parseModelPattern", () => { // Empty string is included in all model IDs, so partial matching finds a match const result = parseModelPattern("", allModels); expect(result.model).not.toBeNull(); - expect(result.thinkingLevel).toBe("off"); + expect(result.thinkingLevel).toBeUndefined(); }); test("pattern ending with colon treats empty suffix as invalid", () => { From 7042a5f61d49549da8929997e7b4bdcb43ed3580 Mon Sep 17 00:00:00 2001 From: Fero Date: Wed, 7 Jan 2026 18:15:12 +0100 Subject: [PATCH 6/8] docs: add changelog entry for settings preservation fix (#527) (#541) --- packages/coding-agent/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index 145af044..4363836a 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -14,6 +14,7 @@ ### Fixed - Default thinking level from settings now applies correctly when `enabledModels` is configured. Previously, models without explicit thinking level suffixes (e.g., `claude-opus-4-5` instead of `claude-opus-4-5:high`) would override `defaultThinkingLevel` with "off" +- External edits to `settings.json` while pi is running are now preserved when pi saves settings (e.g., when changing thinking level via Shift+Tab) ## [0.37.8] - 2026-01-07 From 8bf2b975a52390177974f72500bb384dcc543645 Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Wed, 7 Jan 2026 20:20:40 +0100 Subject: [PATCH 7/8] docs: add changelog citation format check to PR review prompt --- .pi/prompts/pr.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.pi/prompts/pr.md b/.pi/prompts/pr.md index f7e2a378..7ccc7543 100644 --- a/.pi/prompts/pr.md +++ b/.pi/prompts/pr.md @@ -7,7 +7,10 @@ For each PR URL, do the following in order: 1. Read the PR page in full. Include description, all comments, all commits, and all changed files. 2. Identify any linked issues referenced in the PR body, comments, commit messages, or cross links. Read each issue in full, including all comments. 3. Analyze the PR diff. Read all relevant code files in full with no truncation. Include related code paths that are not in the diff but are required to validate behavior. -4. Check for a changelog entry in the relevant `packages/*/CHANGELOG.md` files. Report whether an entry exists. If missing, state that a changelog entry is required before merge and that you will add it if the user decides to merge. Follow the changelog format rules in AGENTS.md. +4. Check for a changelog entry in the relevant `packages/*/CHANGELOG.md` files. Report whether an entry exists. If missing, state that a changelog entry is required before merge and that you will add it if the user decides to merge. Follow the changelog format rules in AGENTS.md. Verify: + - Entry uses correct section (`### Breaking Changes`, `### Added`, `### Fixed`, etc.) + - External contributions include PR link and author: `Fixed foo ([#123](https://github.com/badlogic/pi-mono/pull/123) by [@user](https://github.com/user))` + - Breaking changes are in `### Breaking Changes`, not just `### Fixed` 5. Check if packages/coding-agent/README.md, packages/coding-agent/docs/*.md, packages/coding-agent/examples/**/*.md require modification. This is usually the case when existing features have been changed, or new features have been added. 6. Provide a structured review with these sections: - Good: solid choices or improvements From 39fa25eb672f72b8f4461a4418d0525217c8c32b Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Wed, 7 Jan 2026 20:39:46 +0100 Subject: [PATCH 8/8] fix(ai): clean up openai-codex models and token limits - Remove model aliases (gpt-5, gpt-5-mini, gpt-5-nano, codex-mini-latest, gpt-5-codex, gpt-5.1-codex, gpt-5.1-chat-latest) - Fix context window from 400k to 272k tokens to match Codex CLI defaults - Keep maxTokens at 128k (original value) - Simplify reasoning effort clamping closes #536 --- packages/ai/CHANGELOG.md | 6 +++++- packages/ai/scripts/generate-models.ts | 3 +-- packages/ai/src/models.generated.ts | 10 +++++----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 48e5af4f..8ff82bf4 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -2,9 +2,13 @@ ## [Unreleased] +### Breaking Changes + +- Removed OpenAI Codex model aliases (`gpt-5`, `gpt-5-mini`, `gpt-5-nano`, `codex-mini-latest`, `gpt-5-codex`, `gpt-5.1-codex`, `gpt-5.1-chat-latest`). Use canonical model IDs: `gpt-5.1`, `gpt-5.1-codex-max`, `gpt-5.1-codex-mini`, `gpt-5.2`, `gpt-5.2-codex`. ([#536](https://github.com/badlogic/pi-mono/pull/536) by [@ghoulr](https://github.com/ghoulr)) + ### Fixed -- Fixed OpenAI Codex OAuth model list (removed aliases), aligned context window/maxTokens with observed backend limits, and refined reasoning effort clamping. +- Fixed OpenAI Codex context window from 400,000 to 272,000 tokens to match Codex CLI defaults and prevent 400 errors. ([#536](https://github.com/badlogic/pi-mono/pull/536) by [@ghoulr](https://github.com/ghoulr)) ## [0.37.8] - 2026-01-07 diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index 431c8678..48be0fdb 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -447,8 +447,7 @@ async function generateModels() { // Context window is based on observed server limits (400s above ~272k), not marketing numbers. const CODEX_BASE_URL = "https://chatgpt.com/backend-api"; const CODEX_CONTEXT = 272000; - // Use the same max output token budget as Codex CLI. - const CODEX_MAX_TOKENS = 10000; + const CODEX_MAX_TOKENS = 128000; const codexModels: Model<"openai-codex-responses">[] = [ { id: "gpt-5.1", diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 4e86ee0d..5c7a069b 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -2806,7 +2806,7 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 272000, - maxTokens: 10000, + maxTokens: 128000, } satisfies Model<"openai-codex-responses">, "gpt-5.1-codex-max": { id: "gpt-5.1-codex-max", @@ -2823,7 +2823,7 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 272000, - maxTokens: 10000, + maxTokens: 128000, } satisfies Model<"openai-codex-responses">, "gpt-5.1-codex-mini": { id: "gpt-5.1-codex-mini", @@ -2840,7 +2840,7 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 272000, - maxTokens: 10000, + maxTokens: 128000, } satisfies Model<"openai-codex-responses">, "gpt-5.2": { id: "gpt-5.2", @@ -2857,7 +2857,7 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 272000, - maxTokens: 10000, + maxTokens: 128000, } satisfies Model<"openai-codex-responses">, "gpt-5.2-codex": { id: "gpt-5.2-codex", @@ -2874,7 +2874,7 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 272000, - maxTokens: 10000, + maxTokens: 128000, } satisfies Model<"openai-codex-responses">, }, "openrouter": {