feat: add xhigh thinking level support for gpt-5.2 models

- Add supportsXhigh() function to ai package for checking xhigh support
- Clamp xhigh to high for OpenAI models that don't support it
- Update coding-agent to use centralized supportsXhigh()
- gpt-5.2, gpt-5.2-codex now show xhigh in thinking selector

Closes #236
This commit is contained in:
Mario Zechner 2025-12-19 20:07:24 +01:00
parent 4f981d8ebc
commit 7e38897673
5 changed files with 24 additions and 8 deletions

View file

@ -2,6 +2,10 @@
## [Unreleased] ## [Unreleased]
### Added
- **xhigh thinking level support**: Added `supportsXhigh()` function to check if a model supports xhigh reasoning level. Also clamps xhigh to high for OpenAI models that don't support it. ([#236](https://github.com/badlogic/pi-mono/pull/236) by [@theBucky](https://github.com/theBucky))
## [0.23.5] - 2025-12-19 ## [0.23.5] - 2025-12-19
### Added ### Added

View file

@ -43,3 +43,14 @@ export function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage
usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite; usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
return usage.cost; return usage.cost;
} }
/** Models that support xhigh thinking level */
const XHIGH_MODELS = new Set(["gpt-5.1-codex-max", "gpt-5.2", "gpt-5.2-codex"]);
/**
* Check if a model supports xhigh thinking level.
* Currently only certain OpenAI models support this.
*/
export function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean {
return XHIGH_MODELS.has(model.id);
}

View file

@ -1,4 +1,5 @@
import { ThinkingLevel } from "@google/genai"; import { ThinkingLevel } from "@google/genai";
import { supportsXhigh } from "./models.js";
import { type AnthropicOptions, streamAnthropic } from "./providers/anthropic.js"; import { type AnthropicOptions, streamAnthropic } from "./providers/anthropic.js";
import { type GoogleOptions, streamGoogle } from "./providers/google.js"; import { type GoogleOptions, streamGoogle } from "./providers/google.js";
import { type OpenAICompletionsOptions, streamOpenAICompletions } from "./providers/openai-completions.js"; import { type OpenAICompletionsOptions, streamOpenAICompletions } from "./providers/openai-completions.js";
@ -155,13 +156,13 @@ function mapOptionsForApi<TApi extends Api>(
case "openai-completions": case "openai-completions":
return { return {
...base, ...base,
reasoningEffort: options?.reasoning, reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAICompletionsOptions; } satisfies OpenAICompletionsOptions;
case "openai-responses": case "openai-responses":
return { return {
...base, ...base,
reasoningEffort: options?.reasoning, reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAIResponsesOptions; } satisfies OpenAIResponsesOptions;
case "google-generative-ai": { case "google-generative-ai": {

View file

@ -2,6 +2,10 @@
## [Unreleased] ## [Unreleased]
### Added
- **xhigh thinking level for gpt-5.2 models**: The thinking level selector and shift+tab cycling now show xhigh option for gpt-5.2 and gpt-5.2-codex models (in addition to gpt-5.1-codex-max). ([#236](https://github.com/badlogic/pi-mono/pull/236) by [@theBucky](https://github.com/theBucky))
### Fixed ### Fixed
- **Hooks wrap custom tools**: Custom tools are now executed through the hook wrapper, so `tool_call`/`tool_result` hooks can observe, block, and modify custom tool executions (consistent with hook type docs). - **Hooks wrap custom tools**: Custom tools are now executed through the hook wrapper, so `tool_call`/`tool_result` hooks can observe, block, and modify custom tool executions (consistent with hook type docs).

View file

@ -15,7 +15,7 @@
import type { Agent, AgentEvent, AgentState, AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core"; import type { Agent, AgentEvent, AgentState, AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { AssistantMessage, Message, Model, TextContent } from "@mariozechner/pi-ai"; import type { AssistantMessage, Message, Model, TextContent } from "@mariozechner/pi-ai";
import { isContextOverflow } from "@mariozechner/pi-ai"; import { isContextOverflow, supportsXhigh } from "@mariozechner/pi-ai";
import { getModelsPath } from "../config.js"; import { getModelsPath } from "../config.js";
import { type BashResult, executeBash as executeBashCommand } from "./bash-executor.js"; import { type BashResult, executeBash as executeBashCommand } from "./bash-executor.js";
import { calculateContextTokens, compact, shouldCompact } from "./compaction.js"; import { calculateContextTokens, compact, shouldCompact } from "./compaction.js";
@ -102,9 +102,6 @@ export interface SessionStats {
// Constants // Constants
// ============================================================================ // ============================================================================
/** Models that support xhigh thinking level */
const XHIGH_MODELS = ["gpt-5.1-codex-max", "gpt-5.2", "gpt-5.2-codex"];
/** Standard thinking levels */ /** Standard thinking levels */
const THINKING_LEVELS: ThinkingLevel[] = ["off", "minimal", "low", "medium", "high"]; const THINKING_LEVELS: ThinkingLevel[] = ["off", "minimal", "low", "medium", "high"];
@ -670,8 +667,7 @@ export class AgentSession {
* Check if current model supports xhigh thinking level. * Check if current model supports xhigh thinking level.
*/ */
supportsXhighThinking(): boolean { supportsXhighThinking(): boolean {
const modelId = this.model?.id || ""; return this.model ? supportsXhigh(this.model) : false;
return XHIGH_MODELS.some((m) => modelId === m || modelId.endsWith(`/${m}`));
} }
/** /**