feat(ai): add strictResponsesPairing for Azure OpenAI Responses API

Split OpenAICompat into OpenAICompletionsCompat and OpenAIResponsesCompat
for type-safe API-specific compat settings. Added strictResponsesPairing
option to suppress orphaned reasoning/tool calls on incomplete turns,
fixing 400 errors on Azure's Responses API which requires strict pairing.

Closes #768
This commit is contained in:
Mario Zechner 2026-01-18 20:15:26 +01:00
parent def9e4e9a9
commit d43930c818
17 changed files with 112 additions and 23 deletions

View file

@ -15,7 +15,7 @@ import type {
Context,
Message,
Model,
OpenAICompat,
OpenAICompletionsCompat,
StopReason,
StreamFunction,
StreamOptions,
@ -452,7 +452,7 @@ function maybeAddOpenRouterAnthropicCacheControl(
function convertMessages(
model: Model<"openai-completions">,
context: Context,
compat: Required<OpenAICompat>,
compat: Required<OpenAICompletionsCompat>,
): ChatCompletionMessageParam[] {
const params: ChatCompletionMessageParam[] = [];
@ -681,9 +681,9 @@ function mapStopReason(reason: ChatCompletionChunk.Choice["finish_reason"]): Sto
/**
* Detect compatibility settings from provider and baseUrl for known providers.
* Provider takes precedence over URL-based detection since it's explicitly configured.
* Returns a fully resolved OpenAICompat object with all fields set.
* Returns a fully resolved OpenAICompletionsCompat object with all fields set.
*/
function detectCompat(model: Model<"openai-completions">): Required<OpenAICompat> {
function detectCompat(model: Model<"openai-completions">): Required<OpenAICompletionsCompat> {
const provider = model.provider;
const baseUrl = model.baseUrl;
@ -725,7 +725,7 @@ function detectCompat(model: Model<"openai-completions">): Required<OpenAICompat
* Get resolved compatibility settings for a model.
* Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.
*/
function getCompat(model: Model<"openai-completions">): Required<OpenAICompat> {
function getCompat(model: Model<"openai-completions">): Required<OpenAICompletionsCompat> {
const detected = detectCompat(model);
if (!model.compat) return detected;

View file

@ -461,10 +461,22 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
}
} else if (msg.role === "assistant") {
const output: ResponseInput = [];
const strictResponsesPairing = model.compat?.strictResponsesPairing ?? false;
let isIncomplete = false;
let shouldReplayReasoning = msg.stopReason !== "error";
let allowToolCalls = msg.stopReason !== "error";
if (strictResponsesPairing) {
isIncomplete = msg.stopReason === "error" || msg.stopReason === "aborted";
const hasPairedContent = msg.content.some(
(b) => b.type === "toolCall" || (b.type === "text" && (b as TextContent).text.trim().length > 0),
);
shouldReplayReasoning = !isIncomplete && hasPairedContent;
allowToolCalls = !isIncomplete;
}
for (const block of msg.content) {
// Do not submit thinking blocks if the completion had an error (i.e. abort)
if (block.type === "thinking" && msg.stopReason !== "error") {
if (block.type === "thinking" && shouldReplayReasoning) {
if (block.thinkingSignature) {
const reasoningItem = JSON.parse(block.thinkingSignature);
output.push(reasoningItem);
@ -475,6 +487,11 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
let msgId = textBlock.textSignature;
if (!msgId) {
msgId = `msg_${msgIndex}`;
}
// For incomplete turns, never replay the original message id (if any).
// Generate a stable synthetic id so strict pairing providers do not expect a paired reasoning item.
if (strictResponsesPairing && isIncomplete) {
msgId = `msg_${msgIndex}_${shortHash(textBlock.text)}`;
} else if (msgId.length > 64) {
msgId = `msg_${shortHash(msgId)}`;
}
@ -486,7 +503,7 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
id: msgId,
} satisfies ResponseOutputMessage);
// Do not submit toolcall blocks if the completion had an error (i.e. abort)
} else if (block.type === "toolCall" && msg.stopReason !== "error") {
} else if (block.type === "toolCall" && allowToolCalls) {
const toolCall = block as ToolCall;
output.push({
type: "function_call",

View file

@ -204,10 +204,10 @@ export type AssistantMessageEvent =
| { type: "error"; reason: Extract<StopReason, "aborted" | "error">; error: AssistantMessage };
/**
* Compatibility settings for openai-completions API.
* Compatibility settings for OpenAI-compatible completions APIs.
* Use this to override URL-based auto-detection for custom providers.
*/
export interface OpenAICompat {
export interface OpenAICompletionsCompat {
/** Whether the provider supports the `store` field. Default: auto-detected from URL. */
supportsStore?: boolean;
/** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */
@ -230,6 +230,12 @@ export interface OpenAICompat {
thinkingFormat?: "openai" | "zai";
}
/** Compatibility settings for OpenAI Responses APIs. */
export interface OpenAIResponsesCompat {
/** Whether OpenAI Responses history replay requires strict reasoning/message pairing (for providers like Azure). */
strictResponsesPairing?: boolean;
}
// Model interface for the unified model system
export interface Model<TApi extends Api> {
id: string;
@ -248,6 +254,10 @@ export interface Model<TApi extends Api> {
contextWindow: number;
maxTokens: number;
headers?: Record<string, string>;
/** Compatibility overrides for openai-completions API. If not set, auto-detected from baseUrl. */
compat?: TApi extends "openai-completions" ? OpenAICompat : never;
/** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */
compat?: TApi extends "openai-completions"
? OpenAICompletionsCompat
: TApi extends "openai-responses"
? OpenAIResponsesCompat
: never;
}