Add image support in tool results across all providers

Tool results now use content blocks and can include both text and images.
All providers (Anthropic, Google, OpenAI Completions, OpenAI Responses)
correctly pass images from tool results to LLMs.

- Update ToolResultMessage type to use content blocks
- Add placeholder text for image-only tool results in Google/Anthropic
- OpenAI providers send tool result + follow-up user message with images
- Fix Anthropic JSON parsing for empty tool arguments
- Add comprehensive tests for image-only and text+image tool results
- Update README with tool result content blocks API
This commit is contained in:
Mario Zechner 2025-11-12 10:45:56 +01:00
parent 9dac37d836
commit 84dcab219b
37 changed files with 720 additions and 544 deletions

View file

@ -377,20 +377,44 @@ function convertMessages(model: Model<"google-generative-ai">, context: Context)
parts,
});
} else if (msg.role === "toolResult") {
// Build parts array with functionResponse and/or images
const parts: Part[] = [];
// Extract text and image content
const textResult = msg.content
.filter((c) => c.type === "text")
.map((c) => (c as any).text)
.join("\n");
const imageBlocks = model.input.includes("image") ? msg.content.filter((c) => c.type === "image") : [];
// Always add functionResponse with text result (or placeholder if only images)
const hasText = textResult.length > 0;
const hasImages = imageBlocks.length > 0;
parts.push({
functionResponse: {
id: msg.toolCallId,
name: msg.toolName,
response: {
result: hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "",
isError: msg.isError,
},
},
});
// Add any images as inlineData parts
for (const imageBlock of imageBlocks) {
parts.push({
inlineData: {
mimeType: (imageBlock as any).mimeType,
data: (imageBlock as any).data,
},
});
}
contents.push({
role: "user",
parts: [
{
functionResponse: {
id: msg.toolCallId,
name: msg.toolName,
response: {
result: sanitizeSurrogates(msg.output),
isError: msg.isError,
},
},
},
],
parts,
});
}
}