mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 10:02:23 +00:00
fix(ai): normalize pipe-separated tool call IDs for cross-provider handoff
- Handle pipe-separated IDs from OpenAI Responses API in openai-completions provider - Strip trailing underscores after truncation in openai-responses-shared (OpenAI Codex rejects them) - Add regression tests for tool call ID normalization fixes #1022
This commit is contained in:
parent
4f004adefa
commit
605f6f494b
4 changed files with 308 additions and 2 deletions
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed cross-provider handoff failing when switching from OpenAI Responses API providers (github-copilot, openai-codex) to other providers due to pipe-separated tool call IDs not being normalized, and trailing underscores in truncated IDs being rejected by OpenAI Codex ([#1022](https://github.com/badlogic/pi-mono/issues/1022))
|
||||||
- Fixed 429 rate limit errors incorrectly triggering auto-compaction instead of retry with backoff ([#1038](https://github.com/badlogic/pi-mono/issues/1038))
|
- Fixed 429 rate limit errors incorrectly triggering auto-compaction instead of retry with backoff ([#1038](https://github.com/badlogic/pi-mono/issues/1038))
|
||||||
- Fixed Anthropic provider to handle `sensitive` stop_reason returned by API ([#978](https://github.com/badlogic/pi-mono/issues/978))
|
- Fixed Anthropic provider to handle `sensitive` stop_reason returned by API ([#978](https://github.com/badlogic/pi-mono/issues/978))
|
||||||
- Fixed DeepSeek API compatibility by detecting `deepseek.com` URLs and disabling unsupported `developer` role ([#1048](https://github.com/badlogic/pi-mono/issues/1048))
|
- Fixed DeepSeek API compatibility by detecting `deepseek.com` URLs and disabling unsupported `developer` role ([#1048](https://github.com/badlogic/pi-mono/issues/1048))
|
||||||
|
|
|
||||||
|
|
@ -497,6 +497,17 @@ export function convertMessages(
|
||||||
|
|
||||||
const normalizeToolCallId = (id: string): string => {
|
const normalizeToolCallId = (id: string): string => {
|
||||||
if (compat.requiresMistralToolIds) return normalizeMistralToolId(id);
|
if (compat.requiresMistralToolIds) return normalizeMistralToolId(id);
|
||||||
|
|
||||||
|
// Handle pipe-separated IDs from OpenAI Responses API
|
||||||
|
// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)
|
||||||
|
// These come from providers like github-copilot, openai-codex, opencode
|
||||||
|
// Extract just the call_id part and normalize it
|
||||||
|
if (id.includes("|")) {
|
||||||
|
const [callId] = id.split("|");
|
||||||
|
// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)
|
||||||
|
return callId.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 40);
|
||||||
|
}
|
||||||
|
|
||||||
if (model.provider === "openai") return id.length > 40 ? id.slice(0, 40) : id;
|
if (model.provider === "openai") return id.length > 40 ? id.slice(0, 40) : id;
|
||||||
// Copilot Claude models route to Claude backend which requires Anthropic ID format
|
// Copilot Claude models route to Claude backend which requires Anthropic ID format
|
||||||
if (model.provider === "github-copilot" && model.id.toLowerCase().includes("claude")) {
|
if (model.provider === "github-copilot" && model.id.toLowerCase().includes("claude")) {
|
||||||
|
|
|
||||||
|
|
@ -86,8 +86,11 @@ export function convertResponsesMessages<TApi extends Api>(
|
||||||
if (!sanitizedItemId.startsWith("fc")) {
|
if (!sanitizedItemId.startsWith("fc")) {
|
||||||
sanitizedItemId = `fc_${sanitizedItemId}`;
|
sanitizedItemId = `fc_${sanitizedItemId}`;
|
||||||
}
|
}
|
||||||
const normalizedCallId = sanitizedCallId.length > 64 ? sanitizedCallId.slice(0, 64) : sanitizedCallId;
|
// Truncate to 64 chars and strip trailing underscores (OpenAI Codex rejects them)
|
||||||
const normalizedItemId = sanitizedItemId.length > 64 ? sanitizedItemId.slice(0, 64) : sanitizedItemId;
|
let normalizedCallId = sanitizedCallId.length > 64 ? sanitizedCallId.slice(0, 64) : sanitizedCallId;
|
||||||
|
let normalizedItemId = sanitizedItemId.length > 64 ? sanitizedItemId.slice(0, 64) : sanitizedItemId;
|
||||||
|
normalizedCallId = normalizedCallId.replace(/_+$/, "");
|
||||||
|
normalizedItemId = normalizedItemId.replace(/_+$/, "");
|
||||||
return `${normalizedCallId}|${normalizedItemId}`;
|
return `${normalizedCallId}|${normalizedItemId}`;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
291
packages/ai/test/tool-call-id-normalization.test.ts
Normal file
291
packages/ai/test/tool-call-id-normalization.test.ts
Normal file
|
|
@ -0,0 +1,291 @@
|
||||||
|
/**
|
||||||
|
* Tool Call ID Normalization Tests
|
||||||
|
*
|
||||||
|
* Tests that tool call IDs from OpenAI Responses API (github-copilot, openai-codex, opencode)
|
||||||
|
* are properly normalized when sent to other providers.
|
||||||
|
*
|
||||||
|
* OpenAI Responses API generates IDs in format: {call_id}|{id}
|
||||||
|
* where {id} can be 400+ chars with special characters (+, /, =).
|
||||||
|
*
|
||||||
|
* Regression test for: https://github.com/badlogic/pi-mono/issues/1022
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Type } from "@sinclair/typebox";
|
||||||
|
import { describe, expect, it } from "vitest";
|
||||||
|
import { getModel } from "../src/models.js";
|
||||||
|
import { completeSimple, getEnvApiKey } from "../src/stream.js";
|
||||||
|
import type { AssistantMessage, Message, Tool, ToolResultMessage } from "../src/types.js";
|
||||||
|
import { resolveApiKey } from "./oauth.js";
|
||||||
|
|
||||||
|
// Resolve API keys
|
||||||
|
const copilotToken = await resolveApiKey("github-copilot");
|
||||||
|
const openrouterKey = getEnvApiKey("openrouter");
|
||||||
|
const codexToken = await resolveApiKey("openai-codex");
|
||||||
|
|
||||||
|
// Simple echo tool for testing
|
||||||
|
const echoToolSchema = Type.Object({
|
||||||
|
message: Type.String({ description: "Message to echo back" }),
|
||||||
|
});
|
||||||
|
|
||||||
|
const echoTool: Tool<typeof echoToolSchema> = {
|
||||||
|
name: "echo",
|
||||||
|
description: "Echoes the message back",
|
||||||
|
parameters: echoToolSchema,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test 1: Live cross-provider handoff
|
||||||
|
*
|
||||||
|
* 1. Use github-copilot gpt-5.2-codex to generate a tool call
|
||||||
|
* 2. Switch to openrouter openai/gpt-5.2-codex and complete
|
||||||
|
* 3. Switch to openai-codex gpt-5.2-codex and complete
|
||||||
|
*
|
||||||
|
* Both should succeed without "call_id too long" errors.
|
||||||
|
*/
|
||||||
|
describe("Tool Call ID Normalization - Live Handoff", () => {
|
||||||
|
it.skipIf(!copilotToken || !openrouterKey)(
|
||||||
|
"github-copilot -> openrouter should normalize pipe-separated IDs",
|
||||||
|
async () => {
|
||||||
|
const copilotModel = getModel("github-copilot", "gpt-5.2-codex");
|
||||||
|
const openrouterModel = getModel("openrouter", "openai/gpt-5.2-codex");
|
||||||
|
|
||||||
|
// Step 1: Generate tool call with github-copilot
|
||||||
|
const userMessage: Message = {
|
||||||
|
role: "user",
|
||||||
|
content: "Use the echo tool to echo 'hello world'",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
const assistantResponse = await completeSimple(
|
||||||
|
copilotModel,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant. Use the echo tool when asked.",
|
||||||
|
messages: [userMessage],
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: copilotToken },
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(assistantResponse.stopReason, `Copilot error: ${assistantResponse.errorMessage}`).toBe("toolUse");
|
||||||
|
|
||||||
|
const toolCall = assistantResponse.content.find((c) => c.type === "toolCall");
|
||||||
|
expect(toolCall).toBeDefined();
|
||||||
|
expect(toolCall!.type).toBe("toolCall");
|
||||||
|
|
||||||
|
// Verify it's a pipe-separated ID (OpenAI Responses format)
|
||||||
|
if (toolCall?.type === "toolCall") {
|
||||||
|
expect(toolCall.id).toContain("|");
|
||||||
|
console.log(`Tool call ID from github-copilot: ${toolCall.id.slice(0, 80)}...`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create tool result
|
||||||
|
const toolResult: ToolResultMessage = {
|
||||||
|
role: "toolResult",
|
||||||
|
toolCallId: (toolCall as any).id,
|
||||||
|
toolName: "echo",
|
||||||
|
content: [{ type: "text", text: "hello world" }],
|
||||||
|
isError: false,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Step 2: Complete with openrouter (uses openai-completions API)
|
||||||
|
const openrouterResponse = await completeSimple(
|
||||||
|
openrouterModel,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant.",
|
||||||
|
messages: [
|
||||||
|
userMessage,
|
||||||
|
assistantResponse,
|
||||||
|
toolResult,
|
||||||
|
{ role: "user", content: "Say hi", timestamp: Date.now() },
|
||||||
|
],
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: openrouterKey },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should NOT fail with "call_id too long" error
|
||||||
|
expect(openrouterResponse.stopReason, `OpenRouter error: ${openrouterResponse.errorMessage}`).not.toBe(
|
||||||
|
"error",
|
||||||
|
);
|
||||||
|
expect(openrouterResponse.errorMessage).toBeUndefined();
|
||||||
|
},
|
||||||
|
60000,
|
||||||
|
);
|
||||||
|
|
||||||
|
it.skipIf(!copilotToken || !codexToken)(
|
||||||
|
"github-copilot -> openai-codex should normalize pipe-separated IDs",
|
||||||
|
async () => {
|
||||||
|
const copilotModel = getModel("github-copilot", "gpt-5.2-codex");
|
||||||
|
const codexModel = getModel("openai-codex", "gpt-5.2-codex");
|
||||||
|
|
||||||
|
// Step 1: Generate tool call with github-copilot
|
||||||
|
const userMessage: Message = {
|
||||||
|
role: "user",
|
||||||
|
content: "Use the echo tool to echo 'test message'",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
const assistantResponse = await completeSimple(
|
||||||
|
copilotModel,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant. Use the echo tool when asked.",
|
||||||
|
messages: [userMessage],
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: copilotToken },
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(assistantResponse.stopReason, `Copilot error: ${assistantResponse.errorMessage}`).toBe("toolUse");
|
||||||
|
|
||||||
|
const toolCall = assistantResponse.content.find((c) => c.type === "toolCall");
|
||||||
|
expect(toolCall).toBeDefined();
|
||||||
|
|
||||||
|
// Create tool result
|
||||||
|
const toolResult: ToolResultMessage = {
|
||||||
|
role: "toolResult",
|
||||||
|
toolCallId: (toolCall as any).id,
|
||||||
|
toolName: "echo",
|
||||||
|
content: [{ type: "text", text: "test message" }],
|
||||||
|
isError: false,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Step 2: Complete with openai-codex (uses openai-codex-responses API)
|
||||||
|
const codexResponse = await completeSimple(
|
||||||
|
codexModel,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant.",
|
||||||
|
messages: [
|
||||||
|
userMessage,
|
||||||
|
assistantResponse,
|
||||||
|
toolResult,
|
||||||
|
{ role: "user", content: "Say hi", timestamp: Date.now() },
|
||||||
|
],
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: codexToken },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should NOT fail with ID validation error
|
||||||
|
expect(codexResponse.stopReason, `Codex error: ${codexResponse.errorMessage}`).not.toBe("error");
|
||||||
|
expect(codexResponse.errorMessage).toBeUndefined();
|
||||||
|
},
|
||||||
|
60000,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test 2: Prefilled context with exact failing IDs from issue #1022
|
||||||
|
*
|
||||||
|
* Uses the exact tool call ID format that caused the error:
|
||||||
|
* "call_xxx|very_long_base64_with_special_chars+/="
|
||||||
|
*/
|
||||||
|
describe("Tool Call ID Normalization - Prefilled Context", () => {
|
||||||
|
// Exact tool call ID from issue #1022 JSONL
|
||||||
|
const FAILING_TOOL_CALL_ID =
|
||||||
|
"call_pAYbIr76hXIjncD9UE4eGfnS|t5nnb2qYMFWGSsr13fhCd1CaCu3t3qONEPuOudu4HSVEtA8YJSL6FAZUxvoOoD792VIJWl91g87EdqsCWp9krVsdBysQoDaf9lMCLb8BS4EYi4gQd5kBQBYLlgD71PYwvf+TbMD9J9/5OMD42oxSRj8H+vRf78/l2Xla33LWz4nOgsddBlbvabICRs8GHt5C9PK5keFtzyi3lsyVKNlfduK3iphsZqs4MLv4zyGJnvZo/+QzShyk5xnMSQX/f98+aEoNflEApCdEOXipipgeiNWnpFSHbcwmMkZoJhURNu+JEz3xCh1mrXeYoN5o+trLL3IXJacSsLYXDrYTipZZbJFRPAucgbnjYBC+/ZzJOfkwCs+Gkw7EoZR7ZQgJ8ma+9586n4tT4cI8DEhBSZsWMjrCt8dxKg==";
|
||||||
|
|
||||||
|
// Build prefilled context with the failing ID
|
||||||
|
function buildPrefilledMessages(): Message[] {
|
||||||
|
const userMessage: Message = {
|
||||||
|
role: "user",
|
||||||
|
content: "Use the echo tool to echo 'hello'",
|
||||||
|
timestamp: Date.now() - 2000,
|
||||||
|
};
|
||||||
|
|
||||||
|
const assistantMessage: AssistantMessage = {
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "toolCall",
|
||||||
|
id: FAILING_TOOL_CALL_ID,
|
||||||
|
name: "echo",
|
||||||
|
arguments: { message: "hello" },
|
||||||
|
},
|
||||||
|
],
|
||||||
|
api: "openai-responses",
|
||||||
|
provider: "github-copilot",
|
||||||
|
model: "gpt-5.2-codex",
|
||||||
|
usage: {
|
||||||
|
input: 100,
|
||||||
|
output: 50,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
totalTokens: 150,
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||||
|
},
|
||||||
|
stopReason: "toolUse",
|
||||||
|
timestamp: Date.now() - 1500,
|
||||||
|
};
|
||||||
|
|
||||||
|
const toolResult: ToolResultMessage = {
|
||||||
|
role: "toolResult",
|
||||||
|
toolCallId: FAILING_TOOL_CALL_ID,
|
||||||
|
toolName: "echo",
|
||||||
|
content: [{ type: "text", text: "hello" }],
|
||||||
|
isError: false,
|
||||||
|
timestamp: Date.now() - 1000,
|
||||||
|
};
|
||||||
|
|
||||||
|
const followUpUser: Message = {
|
||||||
|
role: "user",
|
||||||
|
content: "Say hi",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
return [userMessage, assistantMessage, toolResult, followUpUser];
|
||||||
|
}
|
||||||
|
|
||||||
|
it.skipIf(!openrouterKey)(
|
||||||
|
"openrouter should handle prefilled context with long pipe-separated IDs",
|
||||||
|
async () => {
|
||||||
|
const model = getModel("openrouter", "openai/gpt-5.2-codex");
|
||||||
|
const messages = buildPrefilledMessages();
|
||||||
|
|
||||||
|
const response = await completeSimple(
|
||||||
|
model,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant.",
|
||||||
|
messages,
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: openrouterKey },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should NOT fail with "call_id too long" error
|
||||||
|
expect(response.stopReason, `OpenRouter error: ${response.errorMessage}`).not.toBe("error");
|
||||||
|
if (response.errorMessage) {
|
||||||
|
expect(response.errorMessage).not.toContain("call_id");
|
||||||
|
expect(response.errorMessage).not.toContain("too long");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
30000,
|
||||||
|
);
|
||||||
|
|
||||||
|
it.skipIf(!codexToken)(
|
||||||
|
"openai-codex should handle prefilled context with long pipe-separated IDs",
|
||||||
|
async () => {
|
||||||
|
const model = getModel("openai-codex", "gpt-5.2-codex");
|
||||||
|
const messages = buildPrefilledMessages();
|
||||||
|
|
||||||
|
const response = await completeSimple(
|
||||||
|
model,
|
||||||
|
{
|
||||||
|
systemPrompt: "You are a helpful assistant.",
|
||||||
|
messages,
|
||||||
|
tools: [echoTool],
|
||||||
|
},
|
||||||
|
{ apiKey: codexToken },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should NOT fail with ID validation error
|
||||||
|
expect(response.stopReason, `Codex error: ${response.errorMessage}`).not.toBe("error");
|
||||||
|
if (response.errorMessage) {
|
||||||
|
expect(response.errorMessage).not.toContain("id");
|
||||||
|
expect(response.errorMessage).not.toContain("additional characters");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
30000,
|
||||||
|
);
|
||||||
|
});
|
||||||
Loading…
Add table
Add a link
Reference in a new issue