From ff0eb3ecd41b18548b555888b1cdc04c3bb80609 Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Mon, 2 Feb 2026 00:44:55 +0100 Subject: [PATCH] fix(ai): omit strict for unsupported openai completions --- packages/ai/CHANGELOG.md | 1 + packages/ai/README.md | 8 ++ .../ai/src/providers/openai-completions.ts | 12 ++- packages/ai/src/types.ts | 2 + .../openai-completions-tool-choice.test.ts | 99 ++++++++++++++----- ...nai-completions-tool-result-images.test.ts | 1 + 6 files changed, 94 insertions(+), 29 deletions(-) diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 4bb5d27e..61592c01 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -6,6 +6,7 @@ - Fixed `cacheRetention` option not being passed through in `buildBaseOptions` ([#1154](https://github.com/badlogic/pi-mono/issues/1154)) - Fixed OAuth login/refresh not using HTTP proxy settings (`HTTP_PROXY`, `HTTPS_PROXY` env vars) ([#1132](https://github.com/badlogic/pi-mono/issues/1132)) +- Fixed OpenAI-compatible completions to omit unsupported `strict` tool fields for providers that reject them ([#1172](https://github.com/badlogic/pi-mono/issues/1172)) ## [0.50.9] - 2026-02-01 diff --git a/packages/ai/README.md b/packages/ai/README.md index 6f040737..9d982a23 100644 --- a/packages/ai/README.md +++ b/packages/ai/README.md @@ -731,8 +731,16 @@ interface OpenAICompletionsCompat { supportsStore?: boolean; // Whether provider supports the `store` field (default: true) supportsDeveloperRole?: boolean; // Whether provider supports `developer` role vs `system` (default: true) supportsReasoningEffort?: boolean; // Whether provider supports `reasoning_effort` (default: true) + supportsUsageInStreaming?: boolean; // Whether provider supports `stream_options: { include_usage: true }` (default: true) + supportsStrictMode?: boolean; // Whether provider supports `strict` in tool definitions (default: true) maxTokensField?: 'max_completion_tokens' | 'max_tokens'; // Which field name to use (default: max_completion_tokens) + requiresToolResultName?: boolean; // Whether tool results require the `name` field (default: false) + requiresAssistantAfterToolResult?: boolean; // Whether tool results must be followed by an assistant message (default: false) + requiresThinkingAsText?: boolean; // Whether thinking blocks must be converted to text (default: false) + requiresMistralToolIds?: boolean; // Whether tool call IDs must be normalized to Mistral format (default: false) thinkingFormat?: 'openai' | 'zai' | 'qwen'; // Format for reasoning param: 'openai' uses reasoning_effort, 'zai' uses thinking: { type: "enabled" }, 'qwen' uses enable_thinking: boolean (default: openai) + openRouterRouting?: OpenRouterRouting; // OpenRouter routing preferences (default: {}) + vercelGatewayRouting?: VercelGatewayRouting; // Vercel AI Gateway routing preferences (default: {}) } interface OpenAIResponsesCompat { diff --git a/packages/ai/src/providers/openai-completions.ts b/packages/ai/src/providers/openai-completions.ts index 1c3538af..d8e008e5 100644 --- a/packages/ai/src/providers/openai-completions.ts +++ b/packages/ai/src/providers/openai-completions.ts @@ -428,7 +428,7 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio } if (context.tools) { - params.tools = convertTools(context.tools); + params.tools = convertTools(context.tools, compat); } else if (hasToolHistory(context.messages)) { // Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results params.tools = []; @@ -738,14 +738,18 @@ export function convertMessages( return params; } -function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] { +function convertTools( + tools: Tool[], + compat: Required, +): OpenAI.Chat.Completions.ChatCompletionTool[] { return tools.map((tool) => ({ type: "function", function: { name: tool.name, description: tool.description, parameters: tool.parameters as any, // TypeBox already generates JSON Schema - strict: false, // Disable strict mode to allow optional parameters without null unions + // Only include strict if provider supports it. Some reject unknown fields. + ...(compat.supportsStrictMode !== false && { strict: false }), }, })); } @@ -812,6 +816,7 @@ function detectCompat(model: Model<"openai-completions">): Required): Required ({ lastParams: undefined as unknown })); -class FakeOpenAI { - chat = { - completions: { - create: async (params: unknown) => { - lastParams = params; - return { - async *[Symbol.asyncIterator]() { - yield { - choices: [{ delta: {}, finish_reason: "stop" }], - usage: { - prompt_tokens: 1, - completion_tokens: 1, - prompt_tokens_details: { cached_tokens: 0 }, - completion_tokens_details: { reasoning_tokens: 0 }, - }, - }; - }, - }; +vi.mock("openai", () => { + class FakeOpenAI { + chat = { + completions: { + create: async (params: unknown) => { + mockState.lastParams = params; + return { + async *[Symbol.asyncIterator]() { + yield { + choices: [{ delta: {}, finish_reason: "stop" }], + usage: { + prompt_tokens: 1, + completion_tokens: 1, + prompt_tokens_details: { cached_tokens: 0 }, + completion_tokens_details: { reasoning_tokens: 0 }, + }, + }; + }, + }; + }, }, - }, - }; -} + }; + } -vi.mock("openai", () => ({ default: FakeOpenAI })); + return { default: FakeOpenAI }; +}); describe("openai-completions tool_choice", () => { it("forwards toolChoice from simple options to payload", async () => { - const { streamSimple } = await import("../src/stream.js"); - const { getModel } = await import("../src/models.js"); const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!; const model = { ...baseModel, api: "openai-completions" } as const; const tools: Tool[] = [ @@ -67,9 +69,54 @@ describe("openai-completions tool_choice", () => { } as unknown as Parameters[2], ).result(); - const params = (payload ?? lastParams) as { tool_choice?: string; tools?: unknown[] }; + const params = (payload ?? mockState.lastParams) as { tool_choice?: string; tools?: unknown[] }; expect(params.tool_choice).toBe("required"); expect(Array.isArray(params.tools)).toBe(true); expect(params.tools?.length ?? 0).toBeGreaterThan(0); }); + + it("omits strict when compat disables strict mode", async () => { + const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!; + const model = { + ...baseModel, + api: "openai-completions", + compat: { supportsStrictMode: false }, + } as const; + const tools: Tool[] = [ + { + name: "ping", + description: "Ping tool", + parameters: Type.Object({ + ok: Type.Boolean(), + }), + }, + ]; + let payload: unknown; + + await streamSimple( + model, + { + messages: [ + { + role: "user", + content: "Call ping with ok=true", + timestamp: Date.now(), + }, + ], + tools, + }, + { + apiKey: "test", + onPayload: (params: unknown) => { + payload = params; + }, + } as unknown as Parameters[2], + ).result(); + + const params = (payload ?? mockState.lastParams) as { tools?: Array<{ function?: Record }> }; + const tool = params.tools?.[0]?.function; + expect(tool).toBeTruthy(); + expect(tool?.strict).toBeUndefined(); + expect("strict" in (tool ?? {})).toBe(false); + }); }); diff --git a/packages/ai/test/openai-completions-tool-result-images.test.ts b/packages/ai/test/openai-completions-tool-result-images.test.ts index 3330e776..2a2860fc 100644 --- a/packages/ai/test/openai-completions-tool-result-images.test.ts +++ b/packages/ai/test/openai-completions-tool-result-images.test.ts @@ -32,6 +32,7 @@ const compat: Required = { thinkingFormat: "openai", openRouterRouting: {}, vercelGatewayRouting: {}, + supportsStrictMode: true, }; function buildToolResult(toolCallId: string, timestamp: number): ToolResultMessage {