From 4793f7c92d74234db422fad08774013cce097e98 Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Thu, 12 Feb 2026 19:04:51 +0100 Subject: [PATCH] fix(coding-agent): make resolveCliModel sync, update docs and changelog --- packages/coding-agent/CHANGELOG.md | 1 + packages/coding-agent/README.md | 8 +++++- packages/coding-agent/docs/rpc.md | 2 +- packages/coding-agent/src/cli/args.ts | 6 ++++ .../coding-agent/src/core/model-resolver.ts | 4 +-- packages/coding-agent/src/main.ts | 8 +++--- .../coding-agent/test/model-resolver.test.ts | 28 +++++++++---------- 7 files changed, 35 insertions(+), 22 deletions(-) diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index e3102891..facc3cab 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -10,6 +10,7 @@ - Fixed context usage percentage in footer showing stale pre-compaction values. After compaction the footer now shows `?/200k` until the next LLM response provides accurate usage ([#1382](https://github.com/badlogic/pi-mono/pull/1382) by [@ferologics](https://github.com/ferologics)) - Fixed `_checkCompaction()` using the first compaction entry instead of the latest, which could cause incorrect overflow detection with multiple compactions ([#1382](https://github.com/badlogic/pi-mono/pull/1382) by [@ferologics](https://github.com/ferologics)) +- `--model` now works without `--provider`, supports `provider/id` syntax, fuzzy matching, and `:` suffix (e.g., `--model sonnet:high`, `--model openai/gpt-4o`) ([#1350](https://github.com/badlogic/pi-mono/pull/1350) by [@mitsuhiko](https://github.com/mitsuhiko)) ## [0.52.9] - 2026-02-08 diff --git a/packages/coding-agent/README.md b/packages/coding-agent/README.md index fd2ed77e..98f632c6 100644 --- a/packages/coding-agent/README.md +++ b/packages/coding-agent/README.md @@ -452,7 +452,7 @@ pi config # Enable/disable package resources | Option | Description | |--------|-------------| | `--provider ` | Provider (anthropic, openai, google, etc.) | -| `--model ` | Model ID | +| `--model ` | Model pattern or ID (supports `provider/id` and optional `:`) | | `--api-key ` | API key (overrides env vars) | | `--thinking ` | `off`, `minimal`, `low`, `medium`, `high`, `xhigh` | | `--models ` | Comma-separated patterns for Ctrl+P cycling | @@ -524,6 +524,12 @@ pi -p "Summarize this codebase" # Different model pi --provider openai --model gpt-4o "Help me refactor" +# Model with provider prefix (no --provider needed) +pi --model openai/gpt-4o "Help me refactor" + +# Model with thinking level shorthand +pi --model sonnet:high "Solve this complex problem" + # Limit model cycling pi --models "claude-*,gpt-4o" diff --git a/packages/coding-agent/docs/rpc.md b/packages/coding-agent/docs/rpc.md index e7ba7c7e..b41d60de 100644 --- a/packages/coding-agent/docs/rpc.md +++ b/packages/coding-agent/docs/rpc.md @@ -12,7 +12,7 @@ pi --mode rpc [options] Common options: - `--provider `: Set the LLM provider (anthropic, openai, google, etc.) -- `--model `: Set the model ID +- `--model `: Model pattern or ID (supports `provider/id` and optional `:`) - `--no-session`: Disable session persistence - `--session-dir `: Custom session storage directory diff --git a/packages/coding-agent/src/cli/args.ts b/packages/coding-agent/src/cli/args.ts index 93ae848b..03f2a565 100644 --- a/packages/coding-agent/src/cli/args.ts +++ b/packages/coding-agent/src/cli/args.ts @@ -244,6 +244,12 @@ ${chalk.bold("Examples:")} # Use different model ${APP_NAME} --provider openai --model gpt-4o-mini "Help me refactor this code" + # Use model with provider prefix (no --provider needed) + ${APP_NAME} --model openai/gpt-4o "Help me refactor this code" + + # Use model with thinking level shorthand + ${APP_NAME} --model sonnet:high "Solve this complex problem" + # Limit model cycling to specific models ${APP_NAME} --models claude-sonnet,claude-haiku,gpt-4o diff --git a/packages/coding-agent/src/core/model-resolver.ts b/packages/coding-agent/src/core/model-resolver.ts index f6a2a24a..bd74c206 100644 --- a/packages/coding-agent/src/core/model-resolver.ts +++ b/packages/coding-agent/src/core/model-resolver.ts @@ -274,11 +274,11 @@ export interface ResolveCliModelResult { * Note: This does not apply the thinking level by itself, but it may *parse* and * return a thinking level from ":" so the caller can apply it. */ -export async function resolveCliModel(options: { +export function resolveCliModel(options: { cliProvider?: string; cliModel?: string; modelRegistry: ModelRegistry; -}): Promise { +}): ResolveCliModelResult { const { cliProvider, cliModel, modelRegistry } = options; if (!cliModel) { diff --git a/packages/coding-agent/src/main.ts b/packages/coding-agent/src/main.ts index 5d5b1bf6..17ae99e1 100644 --- a/packages/coding-agent/src/main.ts +++ b/packages/coding-agent/src/main.ts @@ -403,13 +403,13 @@ async function createSessionManager(parsed: Args, cwd: string): Promise { +): { options: CreateAgentSessionOptions; cliThinkingFromModel: boolean } { const options: CreateAgentSessionOptions = {}; let cliThinkingFromModel = false; @@ -421,7 +421,7 @@ async function buildSessionOptions( // - supports --provider --model // - supports --model / if (parsed.model) { - const resolved = await resolveCliModel({ + const resolved = resolveCliModel({ cliProvider: parsed.provider, cliModel: parsed.model, modelRegistry, @@ -670,7 +670,7 @@ export async function main(args: string[]) { sessionManager = SessionManager.open(selectedPath); } - const { options: sessionOptions, cliThinkingFromModel } = await buildSessionOptions( + const { options: sessionOptions, cliThinkingFromModel } = buildSessionOptions( parsed, scopedModels, sessionManager, diff --git a/packages/coding-agent/test/model-resolver.test.ts b/packages/coding-agent/test/model-resolver.test.ts index 7649d9a5..c14d524b 100644 --- a/packages/coding-agent/test/model-resolver.test.ts +++ b/packages/coding-agent/test/model-resolver.test.ts @@ -207,12 +207,12 @@ describe("parseModelPattern", () => { }); describe("resolveCliModel", () => { - test("resolves --model provider/id without --provider", async () => { + test("resolves --model provider/id without --provider", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliModel: "openai/gpt-4o", modelRegistry: registry, }); @@ -222,12 +222,12 @@ describe("resolveCliModel", () => { expect(result.model?.id).toBe("gpt-4o"); }); - test("resolves fuzzy patterns within an explicit provider", async () => { + test("resolves fuzzy patterns within an explicit provider", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliProvider: "openai", cliModel: "4o", modelRegistry: registry, @@ -238,12 +238,12 @@ describe("resolveCliModel", () => { expect(result.model?.id).toBe("gpt-4o"); }); - test("supports --model : (without explicit --thinking)", async () => { + test("supports --model : (without explicit --thinking)", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliModel: "sonnet:high", modelRegistry: registry, }); @@ -253,12 +253,12 @@ describe("resolveCliModel", () => { expect(result.thinkingLevel).toBe("high"); }); - test("prefers exact model id match over provider inference (OpenRouter-style ids)", async () => { + test("prefers exact model id match over provider inference (OpenRouter-style ids)", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliModel: "openai/gpt-4o:extended", modelRegistry: registry, }); @@ -268,12 +268,12 @@ describe("resolveCliModel", () => { expect(result.model?.id).toBe("openai/gpt-4o:extended"); }); - test("does not strip invalid :suffix as thinking level in --model (fail fast)", async () => { + test("does not strip invalid :suffix as thinking level in --model (fail fast)", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliProvider: "openai", cliModel: "gpt-4o:extended", modelRegistry: registry, @@ -283,12 +283,12 @@ describe("resolveCliModel", () => { expect(result.error).toContain("not found"); }); - test("returns a clear error when there are no models", async () => { + test("returns a clear error when there are no models", () => { const registry = { getAll: () => [], } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliProvider: "openai", cliModel: "gpt-4o", modelRegistry: registry, @@ -298,12 +298,12 @@ describe("resolveCliModel", () => { expect(result.error).toContain("No models available"); }); - test("resolves provider-prefixed fuzzy patterns (openrouter/qwen -> openrouter model)", async () => { + test("resolves provider-prefixed fuzzy patterns (openrouter/qwen -> openrouter model)", () => { const registry = { getAll: () => allModels, } as unknown as Parameters[0]["modelRegistry"]; - const result = await resolveCliModel({ + const result = resolveCliModel({ cliModel: "openrouter/qwen", modelRegistry: registry, });