clanker-agent/packages/ai/test/openai-completions-tool-result-images.test.ts
Harivansh Rathi 0250f72976 move pi-mono into companion-cloud as apps/companion-os
- Copy all pi-mono source into apps/companion-os/
- Update Dockerfile to COPY pre-built binary instead of downloading from GitHub Releases
- Update deploy-staging.yml to build pi from source (bun compile) before Docker build
- Add apps/companion-os/** to path triggers
- No more cross-repo dispatch needed

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 09:22:50 -08:00

111 lines
2.9 KiB
TypeScript

import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { convertMessages } from "../src/providers/openai-completions.js";
import type {
AssistantMessage,
Context,
Model,
OpenAICompletionsCompat,
ToolResultMessage,
Usage,
} from "../src/types.js";
const emptyUsage: Usage = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
};
const compat: Required<OpenAICompletionsCompat> = {
supportsStore: true,
supportsDeveloperRole: true,
supportsReasoningEffort: true,
reasoningEffortMap: {},
supportsUsageInStreaming: true,
maxTokensField: "max_completion_tokens",
requiresToolResultName: false,
requiresAssistantAfterToolResult: false,
requiresThinkingAsText: false,
thinkingFormat: "openai",
openRouterRouting: {},
vercelGatewayRouting: {},
supportsStrictMode: true,
};
function buildToolResult(
toolCallId: string,
timestamp: number,
): ToolResultMessage {
return {
role: "toolResult",
toolCallId,
toolName: "read",
content: [
{ type: "text", text: "Read image file [image/png]" },
{ type: "image", data: "ZmFrZQ==", mimeType: "image/png" },
],
isError: false,
timestamp,
};
}
describe("openai-completions convertMessages", () => {
it("batches tool-result images after consecutive tool results", () => {
const baseModel = getModel("openai", "gpt-4o-mini");
const model: Model<"openai-completions"> = {
...baseModel,
api: "openai-completions",
input: ["text", "image"],
};
const now = Date.now();
const assistantMessage: AssistantMessage = {
role: "assistant",
content: [
{
type: "toolCall",
id: "tool-1",
name: "read",
arguments: { path: "img-1.png" },
},
{
type: "toolCall",
id: "tool-2",
name: "read",
arguments: { path: "img-2.png" },
},
],
api: model.api,
provider: model.provider,
model: model.id,
usage: emptyUsage,
stopReason: "toolUse",
timestamp: now,
};
const context: Context = {
messages: [
{ role: "user", content: "Read the images", timestamp: now - 2 },
assistantMessage,
buildToolResult("tool-1", now + 1),
buildToolResult("tool-2", now + 2),
],
};
const messages = convertMessages(model, context, compat);
const roles = messages.map((message) => message.role);
expect(roles).toEqual(["user", "assistant", "tool", "tool", "user"]);
const imageMessage = messages[messages.length - 1];
expect(imageMessage.role).toBe("user");
expect(Array.isArray(imageMessage.content)).toBe(true);
const imageParts = (
imageMessage.content as Array<{ type?: string }>
).filter((part) => part?.type === "image_url");
expect(imageParts.length).toBe(2);
});
});