mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-19 23:01:32 +00:00
fix(ai): skip errored/aborted assistant messages in transform-messages
Fixes OpenAI Responses 400 error 'reasoning without following item' by skipping errored/aborted assistant messages entirely rather than filtering at the provider level. This covers openai-responses, openai-codex-responses, and future providers. Removes strictResponsesPairing compat option (no longer needed). Closes #838
This commit is contained in:
parent
abb1775ff7
commit
2d27a2c728
10 changed files with 109 additions and 52 deletions
|
|
@ -329,7 +329,7 @@ function convertAssistantMessage(msg: AssistantMessage): unknown[] {
|
|||
const output: unknown[] = [];
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === "thinking" && msg.stopReason !== "error" && block.thinkingSignature) {
|
||||
if (block.type === "thinking" && block.thinkingSignature) {
|
||||
output.push(JSON.parse(block.thinkingSignature));
|
||||
} else if (block.type === "text") {
|
||||
output.push({
|
||||
|
|
@ -338,7 +338,7 @@ function convertAssistantMessage(msg: AssistantMessage): unknown[] {
|
|||
content: [{ type: "output_text", text: sanitizeSurrogates(block.text), annotations: [] }],
|
||||
status: "completed",
|
||||
});
|
||||
} else if (block.type === "toolCall" && msg.stopReason !== "error") {
|
||||
} else if (block.type === "toolCall") {
|
||||
const [callId, id] = block.id.split("|");
|
||||
output.push({
|
||||
type: "function_call",
|
||||
|
|
|
|||
|
|
@ -478,22 +478,9 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
|
|||
}
|
||||
} else if (msg.role === "assistant") {
|
||||
const output: ResponseInput = [];
|
||||
const strictResponsesPairing = model.compat?.strictResponsesPairing ?? false;
|
||||
let isIncomplete = false;
|
||||
let shouldReplayReasoning = msg.stopReason !== "error";
|
||||
let allowToolCalls = msg.stopReason !== "error";
|
||||
if (strictResponsesPairing) {
|
||||
isIncomplete = msg.stopReason === "error" || msg.stopReason === "aborted";
|
||||
const hasPairedContent = msg.content.some(
|
||||
(b) => b.type === "toolCall" || (b.type === "text" && (b as TextContent).text.trim().length > 0),
|
||||
);
|
||||
shouldReplayReasoning = !isIncomplete && hasPairedContent;
|
||||
allowToolCalls = !isIncomplete;
|
||||
}
|
||||
|
||||
for (const block of msg.content) {
|
||||
// Do not submit thinking blocks if the completion had an error (i.e. abort)
|
||||
if (block.type === "thinking" && shouldReplayReasoning) {
|
||||
if (block.type === "thinking") {
|
||||
if (block.thinkingSignature) {
|
||||
const reasoningItem = JSON.parse(block.thinkingSignature);
|
||||
output.push(reasoningItem);
|
||||
|
|
@ -504,11 +491,6 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
|
|||
let msgId = textBlock.textSignature;
|
||||
if (!msgId) {
|
||||
msgId = `msg_${msgIndex}`;
|
||||
}
|
||||
// For incomplete turns, never replay the original message id (if any).
|
||||
// Generate a stable synthetic id so strict pairing providers do not expect a paired reasoning item.
|
||||
if (strictResponsesPairing && isIncomplete) {
|
||||
msgId = `msg_${msgIndex}_${shortHash(textBlock.text)}`;
|
||||
} else if (msgId.length > 64) {
|
||||
msgId = `msg_${shortHash(msgId)}`;
|
||||
}
|
||||
|
|
@ -519,8 +501,7 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
|
|||
status: "completed",
|
||||
id: msgId,
|
||||
} satisfies ResponseOutputMessage);
|
||||
// Do not submit toolcall blocks if the completion had an error (i.e. abort)
|
||||
} else if (block.type === "toolCall" && allowToolCalls) {
|
||||
} else if (block.type === "toolCall") {
|
||||
const toolCall = block as ToolCall;
|
||||
output.push({
|
||||
type: "function_call",
|
||||
|
|
|
|||
|
|
@ -118,27 +118,23 @@ export function transformMessages<TApi extends Api>(
|
|||
existingToolResultIds = new Set();
|
||||
}
|
||||
|
||||
// Track tool calls from this assistant message
|
||||
// Don't track tool calls from errored messages - they will be dropped by
|
||||
// provider-specific converters, so we shouldn't create synthetic results for them
|
||||
// Skip errored/aborted assistant messages entirely.
|
||||
// These are incomplete turns that shouldn't be replayed:
|
||||
// - May have partial content (reasoning without message, incomplete tool calls)
|
||||
// - Replaying them can cause API errors (e.g., OpenAI "reasoning without following item")
|
||||
// - The model should retry from the last valid state
|
||||
const assistantMsg = msg as AssistantMessage;
|
||||
const toolCalls =
|
||||
assistantMsg.stopReason === "error"
|
||||
? []
|
||||
: (assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[]);
|
||||
if (assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track tool calls from this assistant message
|
||||
const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[];
|
||||
if (toolCalls.length > 0) {
|
||||
pendingToolCalls = toolCalls;
|
||||
existingToolResultIds = new Set();
|
||||
}
|
||||
|
||||
// Skip empty assistant messages (no content and no tool calls)
|
||||
// This handles error responses (e.g., 429/500) that produced no content
|
||||
// All providers already filter these in convertMessages, but we do it here
|
||||
// centrally to prevent issues with the tool_use -> tool_result chain
|
||||
if (assistantMsg.content.length === 0 && toolCalls.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.push(msg);
|
||||
} else if (msg.role === "toolResult") {
|
||||
existingToolResultIds.add(msg.toolCallId);
|
||||
|
|
|
|||
|
|
@ -236,8 +236,7 @@ export interface OpenAICompletionsCompat {
|
|||
|
||||
/** Compatibility settings for OpenAI Responses APIs. */
|
||||
export interface OpenAIResponsesCompat {
|
||||
/** Whether OpenAI Responses history replay requires strict reasoning/message pairing (for providers like Azure). */
|
||||
strictResponsesPairing?: boolean;
|
||||
// Reserved for future use
|
||||
}
|
||||
|
||||
// Model interface for the unified model system
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue