mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 07:03:25 +00:00
Fix Mistral 400 errors after aborted assistant messages
- Skip empty assistant messages (no content, no tool calls) to avoid Mistral's 'Assistant message must have either content or tool_calls' error - Remove synthetic assistant bridge message after tool results (Mistral no longer requires this as of Dec 2024) - Add test for empty assistant message handling Follow-up to #165
This commit is contained in:
parent
99b4b1aca0
commit
76312ea7e8
2 changed files with 138 additions and 11 deletions
|
|
@ -369,15 +369,6 @@ function convertMessages(
|
||||||
let lastRole: string | null = null;
|
let lastRole: string | null = null;
|
||||||
|
|
||||||
for (const msg of transformedMessages) {
|
for (const msg of transformedMessages) {
|
||||||
// Some providers (e.g. Mistral) don't allow user messages directly after tool results
|
|
||||||
// Insert a synthetic assistant message to bridge the gap
|
|
||||||
if (compat.requiresAssistantAfterToolResult && lastRole === "toolResult" && msg.role === "user") {
|
|
||||||
params.push({
|
|
||||||
role: "assistant",
|
|
||||||
content: "I have processed the tool results.",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msg.role === "user") {
|
if (msg.role === "user") {
|
||||||
if (typeof msg.content === "string") {
|
if (typeof msg.content === "string") {
|
||||||
params.push({
|
params.push({
|
||||||
|
|
@ -455,7 +446,16 @@ function convertMessages(
|
||||||
},
|
},
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
if (assistantMsg.content === null && !assistantMsg.tool_calls) {
|
// Skip assistant messages that have no content and no tool calls.
|
||||||
|
// Mistral explicitly requires "either content or tool_calls, but not none".
|
||||||
|
// Other providers also don't accept empty assistant messages.
|
||||||
|
// This handles aborted assistant responses that got no content.
|
||||||
|
const content = assistantMsg.content;
|
||||||
|
const hasContent =
|
||||||
|
content !== null &&
|
||||||
|
content !== undefined &&
|
||||||
|
(typeof content === "string" ? content.length > 0 : content.length > 0);
|
||||||
|
if (!hasContent && !assistantMsg.tool_calls) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
params.push(assistantMsg);
|
params.push(assistantMsg);
|
||||||
|
|
@ -570,7 +570,7 @@ function detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {
|
||||||
supportsReasoningEffort: !isGrok,
|
supportsReasoningEffort: !isGrok,
|
||||||
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
||||||
requiresToolResultName: isMistral,
|
requiresToolResultName: isMistral,
|
||||||
requiresAssistantAfterToolResult: isMistral,
|
requiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024
|
||||||
requiresThinkingAsText: isMistral,
|
requiresThinkingAsText: isMistral,
|
||||||
requiresMistralToolIds: isMistral,
|
requiresMistralToolIds: isMistral,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
127
packages/ai/test/mistral-empty-assistant.test.ts
Normal file
127
packages/ai/test/mistral-empty-assistant.test.ts
Normal file
|
|
@ -0,0 +1,127 @@
|
||||||
|
import { Mistral } from "@mistralai/mistralai";
|
||||||
|
import { Type } from "@sinclair/typebox";
|
||||||
|
import { describe, expect, it } from "vitest";
|
||||||
|
import { getModel } from "../src/models.js";
|
||||||
|
import { streamSimple } from "../src/stream.js";
|
||||||
|
import type { AssistantMessage, Context, ToolCall, ToolResultMessage, UserMessage } from "../src/types.js";
|
||||||
|
|
||||||
|
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Empty Assistant Message", () => {
|
||||||
|
it("verifies SDK rejects empty assistant messages", async () => {
|
||||||
|
// Verify the raw SDK behavior - empty assistant messages fail
|
||||||
|
const client = new Mistral({ apiKey: process.env.MISTRAL_API_KEY });
|
||||||
|
|
||||||
|
// This should fail - empty assistant message
|
||||||
|
try {
|
||||||
|
await client.chat.complete({
|
||||||
|
model: "devstral-medium-latest",
|
||||||
|
messages: [
|
||||||
|
{ role: "user", content: "Hello" },
|
||||||
|
{ role: "assistant", content: "" }, // Empty - should fail
|
||||||
|
{ role: "user", content: "Are you there?" },
|
||||||
|
],
|
||||||
|
});
|
||||||
|
expect.fail("Should have thrown an error");
|
||||||
|
} catch (error: any) {
|
||||||
|
expect(error.message).toContain("Assistant message must have either content or tool_calls");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it("skips empty assistant messages to avoid 400 errors", async () => {
|
||||||
|
const model = getModel("mistral", "devstral-medium-latest");
|
||||||
|
if (!model) throw new Error("Model not found");
|
||||||
|
|
||||||
|
// Build a context with an aborted assistant message
|
||||||
|
const messages: (UserMessage | AssistantMessage | ToolResultMessage)[] = [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: "Hello, read a file for me",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "toolCall",
|
||||||
|
id: "test12345",
|
||||||
|
name: "read",
|
||||||
|
arguments: { path: "/test.txt" },
|
||||||
|
} as ToolCall,
|
||||||
|
],
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "mistral",
|
||||||
|
model: "devstral-medium-latest",
|
||||||
|
usage: {
|
||||||
|
input: 100,
|
||||||
|
output: 20,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
totalTokens: 120,
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||||
|
},
|
||||||
|
stopReason: "toolUse",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "toolResult",
|
||||||
|
toolCallId: "test12345",
|
||||||
|
toolName: "read",
|
||||||
|
content: [{ type: "text", text: "File content here..." }],
|
||||||
|
isError: false,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
},
|
||||||
|
// This is the aborted assistant message - empty content, no tool calls
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [], // Empty - simulates aborted
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "mistral",
|
||||||
|
model: "devstral-medium-latest",
|
||||||
|
usage: {
|
||||||
|
input: 0,
|
||||||
|
output: 0,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
totalTokens: 0,
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||||
|
},
|
||||||
|
stopReason: "aborted",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
errorMessage: "Request was aborted.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: "Are you still there?",
|
||||||
|
timestamp: Date.now(),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const context: Context = {
|
||||||
|
systemPrompt: "You are a helpful assistant.",
|
||||||
|
messages,
|
||||||
|
tools: [
|
||||||
|
{
|
||||||
|
name: "read",
|
||||||
|
description: "Read file contents",
|
||||||
|
parameters: Type.Object({
|
||||||
|
path: Type.String(),
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// This should NOT fail with 400 after our fix
|
||||||
|
const response = await streamSimple(model, context);
|
||||||
|
const result = await response.result();
|
||||||
|
|
||||||
|
console.log("Result:", JSON.stringify(result, null, 2));
|
||||||
|
|
||||||
|
expect(result.stopReason).not.toBe("error");
|
||||||
|
expect(result.errorMessage).toBeUndefined();
|
||||||
|
|
||||||
|
// Verify the assistant can respond
|
||||||
|
const textContent = result.content.find((c) => c.type === "text");
|
||||||
|
expect(textContent).toBeDefined();
|
||||||
|
|
||||||
|
console.log("Test passed - pi-ai provider handled aborted message correctly");
|
||||||
|
}, 60000);
|
||||||
|
});
|
||||||
Loading…
Add table
Add a link
Reference in a new issue