fix(coding-agent): make resolveCliModel sync, update docs and changelog

This commit is contained in:
Mario Zechner 2026-02-12 19:04:51 +01:00
parent 56342258e1
commit 4793f7c92d
7 changed files with 35 additions and 22 deletions

View file

@ -10,6 +10,7 @@
- Fixed context usage percentage in footer showing stale pre-compaction values. After compaction the footer now shows `?/200k` until the next LLM response provides accurate usage ([#1382](https://github.com/badlogic/pi-mono/pull/1382) by [@ferologics](https://github.com/ferologics))
- Fixed `_checkCompaction()` using the first compaction entry instead of the latest, which could cause incorrect overflow detection with multiple compactions ([#1382](https://github.com/badlogic/pi-mono/pull/1382) by [@ferologics](https://github.com/ferologics))
- `--model` now works without `--provider`, supports `provider/id` syntax, fuzzy matching, and `:<thinking>` suffix (e.g., `--model sonnet:high`, `--model openai/gpt-4o`) ([#1350](https://github.com/badlogic/pi-mono/pull/1350) by [@mitsuhiko](https://github.com/mitsuhiko))
## [0.52.9] - 2026-02-08

View file

@ -452,7 +452,7 @@ pi config # Enable/disable package resources
| Option | Description |
|--------|-------------|
| `--provider <name>` | Provider (anthropic, openai, google, etc.) |
| `--model <id>` | Model ID |
| `--model <pattern>` | Model pattern or ID (supports `provider/id` and optional `:<thinking>`) |
| `--api-key <key>` | API key (overrides env vars) |
| `--thinking <level>` | `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
| `--models <patterns>` | Comma-separated patterns for Ctrl+P cycling |
@ -524,6 +524,12 @@ pi -p "Summarize this codebase"
# Different model
pi --provider openai --model gpt-4o "Help me refactor"
# Model with provider prefix (no --provider needed)
pi --model openai/gpt-4o "Help me refactor"
# Model with thinking level shorthand
pi --model sonnet:high "Solve this complex problem"
# Limit model cycling
pi --models "claude-*,gpt-4o"

View file

@ -12,7 +12,7 @@ pi --mode rpc [options]
Common options:
- `--provider <name>`: Set the LLM provider (anthropic, openai, google, etc.)
- `--model <id>`: Set the model ID
- `--model <pattern>`: Model pattern or ID (supports `provider/id` and optional `:<thinking>`)
- `--no-session`: Disable session persistence
- `--session-dir <path>`: Custom session storage directory

View file

@ -244,6 +244,12 @@ ${chalk.bold("Examples:")}
# Use different model
${APP_NAME} --provider openai --model gpt-4o-mini "Help me refactor this code"
# Use model with provider prefix (no --provider needed)
${APP_NAME} --model openai/gpt-4o "Help me refactor this code"
# Use model with thinking level shorthand
${APP_NAME} --model sonnet:high "Solve this complex problem"
# Limit model cycling to specific models
${APP_NAME} --models claude-sonnet,claude-haiku,gpt-4o

View file

@ -274,11 +274,11 @@ export interface ResolveCliModelResult {
* Note: This does not apply the thinking level by itself, but it may *parse* and
* return a thinking level from "<pattern>:<thinking>" so the caller can apply it.
*/
export async function resolveCliModel(options: {
export function resolveCliModel(options: {
cliProvider?: string;
cliModel?: string;
modelRegistry: ModelRegistry;
}): Promise<ResolveCliModelResult> {
}): ResolveCliModelResult {
const { cliProvider, cliModel, modelRegistry } = options;
if (!cliModel) {

View file

@ -403,13 +403,13 @@ async function createSessionManager(parsed: Args, cwd: string): Promise<SessionM
return undefined;
}
async function buildSessionOptions(
function buildSessionOptions(
parsed: Args,
scopedModels: ScopedModel[],
sessionManager: SessionManager | undefined,
modelRegistry: ModelRegistry,
settingsManager: SettingsManager,
): Promise<{ options: CreateAgentSessionOptions; cliThinkingFromModel: boolean }> {
): { options: CreateAgentSessionOptions; cliThinkingFromModel: boolean } {
const options: CreateAgentSessionOptions = {};
let cliThinkingFromModel = false;
@ -421,7 +421,7 @@ async function buildSessionOptions(
// - supports --provider <name> --model <pattern>
// - supports --model <provider>/<pattern>
if (parsed.model) {
const resolved = await resolveCliModel({
const resolved = resolveCliModel({
cliProvider: parsed.provider,
cliModel: parsed.model,
modelRegistry,
@ -670,7 +670,7 @@ export async function main(args: string[]) {
sessionManager = SessionManager.open(selectedPath);
}
const { options: sessionOptions, cliThinkingFromModel } = await buildSessionOptions(
const { options: sessionOptions, cliThinkingFromModel } = buildSessionOptions(
parsed,
scopedModels,
sessionManager,

View file

@ -207,12 +207,12 @@ describe("parseModelPattern", () => {
});
describe("resolveCliModel", () => {
test("resolves --model provider/id without --provider", async () => {
test("resolves --model provider/id without --provider", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliModel: "openai/gpt-4o",
modelRegistry: registry,
});
@ -222,12 +222,12 @@ describe("resolveCliModel", () => {
expect(result.model?.id).toBe("gpt-4o");
});
test("resolves fuzzy patterns within an explicit provider", async () => {
test("resolves fuzzy patterns within an explicit provider", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliProvider: "openai",
cliModel: "4o",
modelRegistry: registry,
@ -238,12 +238,12 @@ describe("resolveCliModel", () => {
expect(result.model?.id).toBe("gpt-4o");
});
test("supports --model <pattern>:<thinking> (without explicit --thinking)", async () => {
test("supports --model <pattern>:<thinking> (without explicit --thinking)", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliModel: "sonnet:high",
modelRegistry: registry,
});
@ -253,12 +253,12 @@ describe("resolveCliModel", () => {
expect(result.thinkingLevel).toBe("high");
});
test("prefers exact model id match over provider inference (OpenRouter-style ids)", async () => {
test("prefers exact model id match over provider inference (OpenRouter-style ids)", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliModel: "openai/gpt-4o:extended",
modelRegistry: registry,
});
@ -268,12 +268,12 @@ describe("resolveCliModel", () => {
expect(result.model?.id).toBe("openai/gpt-4o:extended");
});
test("does not strip invalid :suffix as thinking level in --model (fail fast)", async () => {
test("does not strip invalid :suffix as thinking level in --model (fail fast)", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliProvider: "openai",
cliModel: "gpt-4o:extended",
modelRegistry: registry,
@ -283,12 +283,12 @@ describe("resolveCliModel", () => {
expect(result.error).toContain("not found");
});
test("returns a clear error when there are no models", async () => {
test("returns a clear error when there are no models", () => {
const registry = {
getAll: () => [],
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliProvider: "openai",
cliModel: "gpt-4o",
modelRegistry: registry,
@ -298,12 +298,12 @@ describe("resolveCliModel", () => {
expect(result.error).toContain("No models available");
});
test("resolves provider-prefixed fuzzy patterns (openrouter/qwen -> openrouter model)", async () => {
test("resolves provider-prefixed fuzzy patterns (openrouter/qwen -> openrouter model)", () => {
const registry = {
getAll: () => allModels,
} as unknown as Parameters<typeof resolveCliModel>[0]["modelRegistry"];
const result = await resolveCliModel({
const result = resolveCliModel({
cliModel: "openrouter/qwen",
modelRegistry: registry,
});