mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 11:02:17 +00:00
fix(ai): omit strict for unsupported openai completions
This commit is contained in:
parent
5d6a7d6c34
commit
ff0eb3ecd4
6 changed files with 94 additions and 29 deletions
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
- Fixed `cacheRetention` option not being passed through in `buildBaseOptions` ([#1154](https://github.com/badlogic/pi-mono/issues/1154))
|
||||
- Fixed OAuth login/refresh not using HTTP proxy settings (`HTTP_PROXY`, `HTTPS_PROXY` env vars) ([#1132](https://github.com/badlogic/pi-mono/issues/1132))
|
||||
- Fixed OpenAI-compatible completions to omit unsupported `strict` tool fields for providers that reject them ([#1172](https://github.com/badlogic/pi-mono/issues/1172))
|
||||
|
||||
## [0.50.9] - 2026-02-01
|
||||
|
||||
|
|
|
|||
|
|
@ -731,8 +731,16 @@ interface OpenAICompletionsCompat {
|
|||
supportsStore?: boolean; // Whether provider supports the `store` field (default: true)
|
||||
supportsDeveloperRole?: boolean; // Whether provider supports `developer` role vs `system` (default: true)
|
||||
supportsReasoningEffort?: boolean; // Whether provider supports `reasoning_effort` (default: true)
|
||||
supportsUsageInStreaming?: boolean; // Whether provider supports `stream_options: { include_usage: true }` (default: true)
|
||||
supportsStrictMode?: boolean; // Whether provider supports `strict` in tool definitions (default: true)
|
||||
maxTokensField?: 'max_completion_tokens' | 'max_tokens'; // Which field name to use (default: max_completion_tokens)
|
||||
requiresToolResultName?: boolean; // Whether tool results require the `name` field (default: false)
|
||||
requiresAssistantAfterToolResult?: boolean; // Whether tool results must be followed by an assistant message (default: false)
|
||||
requiresThinkingAsText?: boolean; // Whether thinking blocks must be converted to text (default: false)
|
||||
requiresMistralToolIds?: boolean; // Whether tool call IDs must be normalized to Mistral format (default: false)
|
||||
thinkingFormat?: 'openai' | 'zai' | 'qwen'; // Format for reasoning param: 'openai' uses reasoning_effort, 'zai' uses thinking: { type: "enabled" }, 'qwen' uses enable_thinking: boolean (default: openai)
|
||||
openRouterRouting?: OpenRouterRouting; // OpenRouter routing preferences (default: {})
|
||||
vercelGatewayRouting?: VercelGatewayRouting; // Vercel AI Gateway routing preferences (default: {})
|
||||
}
|
||||
|
||||
interface OpenAIResponsesCompat {
|
||||
|
|
|
|||
|
|
@ -428,7 +428,7 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio
|
|||
}
|
||||
|
||||
if (context.tools) {
|
||||
params.tools = convertTools(context.tools);
|
||||
params.tools = convertTools(context.tools, compat);
|
||||
} else if (hasToolHistory(context.messages)) {
|
||||
// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results
|
||||
params.tools = [];
|
||||
|
|
@ -738,14 +738,18 @@ export function convertMessages(
|
|||
return params;
|
||||
}
|
||||
|
||||
function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
function convertTools(
|
||||
tools: Tool[],
|
||||
compat: Required<OpenAICompletionsCompat>,
|
||||
): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
return tools.map((tool) => ({
|
||||
type: "function",
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters as any, // TypeBox already generates JSON Schema
|
||||
strict: false, // Disable strict mode to allow optional parameters without null unions
|
||||
// Only include strict if provider supports it. Some reject unknown fields.
|
||||
...(compat.supportsStrictMode !== false && { strict: false }),
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
|
@ -812,6 +816,7 @@ function detectCompat(model: Model<"openai-completions">): Required<OpenAIComple
|
|||
thinkingFormat: isZai ? "zai" : "openai",
|
||||
openRouterRouting: {},
|
||||
vercelGatewayRouting: {},
|
||||
supportsStrictMode: true,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -837,5 +842,6 @@ function getCompat(model: Model<"openai-completions">): Required<OpenAICompletio
|
|||
thinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,
|
||||
openRouterRouting: model.compat.openRouterRouting ?? {},
|
||||
vercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,
|
||||
supportsStrictMode: model.compat.supportsStrictMode ?? detected.supportsStrictMode,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -235,6 +235,8 @@ export interface OpenAICompletionsCompat {
|
|||
openRouterRouting?: OpenRouterRouting;
|
||||
/** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */
|
||||
vercelGatewayRouting?: VercelGatewayRouting;
|
||||
/** Whether the provider supports the `strict` field in tool definitions. Default: true. */
|
||||
supportsStrictMode?: boolean;
|
||||
}
|
||||
|
||||
/** Compatibility settings for OpenAI Responses APIs. */
|
||||
|
|
|
|||
|
|
@ -1,38 +1,40 @@
|
|||
import { Type } from "@sinclair/typebox";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { getModel } from "../src/models.js";
|
||||
import { streamSimple } from "../src/stream.js";
|
||||
import type { Tool } from "../src/types.js";
|
||||
|
||||
let lastParams: unknown;
|
||||
const mockState = vi.hoisted(() => ({ lastParams: undefined as unknown }));
|
||||
|
||||
class FakeOpenAI {
|
||||
chat = {
|
||||
completions: {
|
||||
create: async (params: unknown) => {
|
||||
lastParams = params;
|
||||
return {
|
||||
async *[Symbol.asyncIterator]() {
|
||||
yield {
|
||||
choices: [{ delta: {}, finish_reason: "stop" }],
|
||||
usage: {
|
||||
prompt_tokens: 1,
|
||||
completion_tokens: 1,
|
||||
prompt_tokens_details: { cached_tokens: 0 },
|
||||
completion_tokens_details: { reasoning_tokens: 0 },
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
vi.mock("openai", () => {
|
||||
class FakeOpenAI {
|
||||
chat = {
|
||||
completions: {
|
||||
create: async (params: unknown) => {
|
||||
mockState.lastParams = params;
|
||||
return {
|
||||
async *[Symbol.asyncIterator]() {
|
||||
yield {
|
||||
choices: [{ delta: {}, finish_reason: "stop" }],
|
||||
usage: {
|
||||
prompt_tokens: 1,
|
||||
completion_tokens: 1,
|
||||
prompt_tokens_details: { cached_tokens: 0 },
|
||||
completion_tokens_details: { reasoning_tokens: 0 },
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
vi.mock("openai", () => ({ default: FakeOpenAI }));
|
||||
return { default: FakeOpenAI };
|
||||
});
|
||||
|
||||
describe("openai-completions tool_choice", () => {
|
||||
it("forwards toolChoice from simple options to payload", async () => {
|
||||
const { streamSimple } = await import("../src/stream.js");
|
||||
const { getModel } = await import("../src/models.js");
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
const model = { ...baseModel, api: "openai-completions" } as const;
|
||||
const tools: Tool[] = [
|
||||
|
|
@ -67,9 +69,54 @@ describe("openai-completions tool_choice", () => {
|
|||
} as unknown as Parameters<typeof streamSimple>[2],
|
||||
).result();
|
||||
|
||||
const params = (payload ?? lastParams) as { tool_choice?: string; tools?: unknown[] };
|
||||
const params = (payload ?? mockState.lastParams) as { tool_choice?: string; tools?: unknown[] };
|
||||
expect(params.tool_choice).toBe("required");
|
||||
expect(Array.isArray(params.tools)).toBe(true);
|
||||
expect(params.tools?.length ?? 0).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("omits strict when compat disables strict mode", async () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
const model = {
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
compat: { supportsStrictMode: false },
|
||||
} as const;
|
||||
const tools: Tool[] = [
|
||||
{
|
||||
name: "ping",
|
||||
description: "Ping tool",
|
||||
parameters: Type.Object({
|
||||
ok: Type.Boolean(),
|
||||
}),
|
||||
},
|
||||
];
|
||||
let payload: unknown;
|
||||
|
||||
await streamSimple(
|
||||
model,
|
||||
{
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Call ping with ok=true",
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
],
|
||||
tools,
|
||||
},
|
||||
{
|
||||
apiKey: "test",
|
||||
onPayload: (params: unknown) => {
|
||||
payload = params;
|
||||
},
|
||||
} as unknown as Parameters<typeof streamSimple>[2],
|
||||
).result();
|
||||
|
||||
const params = (payload ?? mockState.lastParams) as { tools?: Array<{ function?: Record<string, unknown> }> };
|
||||
const tool = params.tools?.[0]?.function;
|
||||
expect(tool).toBeTruthy();
|
||||
expect(tool?.strict).toBeUndefined();
|
||||
expect("strict" in (tool ?? {})).toBe(false);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ const compat: Required<OpenAICompletionsCompat> = {
|
|||
thinkingFormat: "openai",
|
||||
openRouterRouting: {},
|
||||
vercelGatewayRouting: {},
|
||||
supportsStrictMode: true,
|
||||
};
|
||||
|
||||
function buildToolResult(toolCallId: string, timestamp: number): ToolResultMessage {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue