diff --git a/packages/ai/src/providers/openai-completions.ts b/packages/ai/src/providers/openai-completions.ts index 2518ea4a..3cd370cd 100644 --- a/packages/ai/src/providers/openai-completions.ts +++ b/packages/ai/src/providers/openai-completions.ts @@ -428,7 +428,7 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio (params as any).enable_thinking = !!options?.reasoningEffort; } else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) { // OpenAI-style reasoning_effort - params.reasoning_effort = options.reasoningEffort; + (params as any).reasoning_effort = mapReasoningEffort(options.reasoningEffort, compat.reasoningEffortMap); } // OpenRouter provider routing preferences @@ -450,6 +450,13 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio return params; } +function mapReasoningEffort( + effort: NonNullable, + reasoningEffortMap: Partial, string>>, +): string { + return reasoningEffortMap[effort] ?? effort; +} + function maybeAddOpenRouterAnthropicCacheControl( model: Model<"openai-completions">, messages: ChatCompletionMessageParam[], @@ -777,13 +784,26 @@ function detectCompat(model: Model<"openai-completions">): Required): Required>; /** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */ supportsUsageInStreaming?: boolean; /** Which field to use for max tokens. Default: auto-detected from URL. */ diff --git a/packages/ai/test/openai-completions-tool-choice.test.ts b/packages/ai/test/openai-completions-tool-choice.test.ts index 747d88a6..339e3f21 100644 --- a/packages/ai/test/openai-completions-tool-choice.test.ts +++ b/packages/ai/test/openai-completions-tool-choice.test.ts @@ -119,4 +119,60 @@ describe("openai-completions tool_choice", () => { expect(tool?.strict).toBeUndefined(); expect("strict" in (tool ?? {})).toBe(false); }); + + it("maps groq qwen3 reasoning levels to default reasoning_effort", async () => { + const model = getModel("groq", "qwen/qwen3-32b")!; + let payload: unknown; + + await streamSimple( + model, + { + messages: [ + { + role: "user", + content: "Hi", + timestamp: Date.now(), + }, + ], + }, + { + apiKey: "test", + reasoning: "medium", + onPayload: (params: unknown) => { + payload = params; + }, + }, + ).result(); + + const params = (payload ?? mockState.lastParams) as { reasoning_effort?: string }; + expect(params.reasoning_effort).toBe("default"); + }); + + it("keeps normal reasoning_effort for groq models without compat mapping", async () => { + const model = getModel("groq", "openai/gpt-oss-20b")!; + let payload: unknown; + + await streamSimple( + model, + { + messages: [ + { + role: "user", + content: "Hi", + timestamp: Date.now(), + }, + ], + }, + { + apiKey: "test", + reasoning: "medium", + onPayload: (params: unknown) => { + payload = params; + }, + }, + ).result(); + + const params = (payload ?? mockState.lastParams) as { reasoning_effort?: string }; + expect(params.reasoning_effort).toBe("medium"); + }); }); diff --git a/packages/ai/test/openai-completions-tool-result-images.test.ts b/packages/ai/test/openai-completions-tool-result-images.test.ts index 2a2860fc..1e06746a 100644 --- a/packages/ai/test/openai-completions-tool-result-images.test.ts +++ b/packages/ai/test/openai-completions-tool-result-images.test.ts @@ -23,6 +23,7 @@ const compat: Required = { supportsStore: true, supportsDeveloperRole: true, supportsReasoningEffort: true, + reasoningEffortMap: {}, supportsUsageInStreaming: true, maxTokensField: "max_completion_tokens", requiresToolResultName: false, diff --git a/packages/coding-agent/docs/custom-provider.md b/packages/coding-agent/docs/custom-provider.md index 4a7e41be..8556e96e 100644 --- a/packages/coding-agent/docs/custom-provider.md +++ b/packages/coding-agent/docs/custom-provider.md @@ -172,10 +172,17 @@ models: [{ // ... compat: { supportsDeveloperRole: false, // use "system" instead of "developer" - supportsReasoningEffort: false, // disable reasoning_effort param + supportsReasoningEffort: true, + reasoningEffortMap: { // map pi-ai levels to provider values + minimal: "default", + low: "default", + medium: "default", + high: "default", + xhigh: "default" + }, maxTokensField: "max_tokens", // instead of "max_completion_tokens" requiresToolResultName: true, // tool results need name field - requiresMistralToolIds: true // tool IDs must be 9 alphanumeric chars + requiresMistralToolIds: true, thinkingFormat: "qwen" // uses enable_thinking: true } }] @@ -568,6 +575,7 @@ interface ProviderModelConfig { supportsStore?: boolean; supportsDeveloperRole?: boolean; supportsReasoningEffort?: boolean; + reasoningEffortMap?: Partial>; supportsUsageInStreaming?: boolean; maxTokensField?: "max_completion_tokens" | "max_tokens"; requiresToolResultName?: boolean;