fix(ai): handle call arguments done on OpenAI-compatible endpoints

fix bug encountered when running GLM-4.7-Flash hosted by LM Studio, in
which the provider sends tool call arguments via
`response.function_call_arguments.done` events instead of streaming them
via `response.function_call_arguments.delta` events. The final
`response.output_item.done` event then contains empty `{}` arguments.
The code only handled delta events, so tool calls failed with validation
errors like `"must have required property 'command'"`.

Full disclosure, Opus triaged the bug and provided the fix (by adding
logging statements to the req/resp to the upstream provider (LM
Studio)). I'm to provide prompts/transcripts, and acknowledge that I'm
not an expert in Pi internals at this time.
This commit is contained in:
Willi Ballenthin 2026-01-23 13:48:54 +01:00
parent 73734a23a1
commit fb364c89bf

View file

@ -231,6 +231,13 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
});
}
}
// Handle function call arguments done (some providers send this instead of deltas)
else if (event.type === "response.function_call_arguments.done") {
if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") {
currentBlock.partialJson = event.arguments;
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
}
}
// Handle output item completion
else if (event.type === "response.output_item.done") {
const item = event.item;
@ -256,13 +263,17 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
});
currentBlock = null;
} else if (item.type === "function_call") {
const args =
currentBlock?.type === "toolCall" && currentBlock.partialJson
? JSON.parse(currentBlock.partialJson)
: JSON.parse(item.arguments);
const toolCall: ToolCall = {
type: "toolCall",
id: `${item.call_id}|${item.id}`,
name: item.name,
arguments: JSON.parse(item.arguments),
arguments: args,
};
currentBlock = null;
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
}
}