Merge branch 'feat/use-mistral-sdk'

This commit is contained in:
Mario Zechner 2026-03-05 23:49:29 +01:00
commit a31065166d
17 changed files with 728 additions and 171 deletions

View file

@ -413,7 +413,6 @@ describe("Context overflow error handling", () => {
// =============================================================================
// Mistral
// Expected pattern: TBD - need to test actual error message
// =============================================================================
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral", () => {
@ -423,6 +422,7 @@ describe("Context overflow error handling", () => {
logResult(result);
expect(result.stopReason).toBe("error");
expect(result.errorMessage).toMatch(/too large for model with \d+ maximum context length/i);
expect(isContextOverflow(result.response, model.contextWindow)).toBe(true);
}, 120000);
});

View file

@ -291,11 +291,11 @@ describe("Tool Results with Images", () => {
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (pixtral-12b)", () => {
const llm = getModel("mistral", "pixtral-12b");
it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => {
it("should handle tool result with only image", { retry: 5, timeout: 30000 }, async () => {
await handleToolWithImageResult(llm);
});
it("should handle tool result with text and image", { retry: 3, timeout: 30000 }, async () => {
it("should handle tool result with text and image", { retry: 5, timeout: 30000 }, async () => {
await handleToolWithTextAndImageResult(llm);
});
});

View file

@ -29,7 +29,6 @@ const compat: Required<OpenAICompletionsCompat> = {
requiresToolResultName: false,
requiresAssistantAfterToolResult: false,
requiresThinkingAsText: false,
requiresMistralToolIds: false,
thinkingFormat: "openai",
openRouterRouting: {},
vercelGatewayRouting: {},

View file

@ -745,34 +745,30 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.MISTRAL_API_KEY)(
"Mistral Provider (devstral-medium-latest via OpenAI Completions)",
() => {
const llm = getModel("mistral", "devstral-medium-latest");
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (devstral-medium-latest)", () => {
const llm = getModel("mistral", "devstral-medium-latest");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle thinking mode", { retry: 3 }, async () => {
// FIXME Skip for now, getting a 422 status code, need to test with official SDK
// const llm = getModel("mistral", "magistral-medium-latest");
// await handleThinking(llm, { reasoningEffort: "medium" });
});
it("should handle thinking mode", { retry: 3 }, async () => {
const llm = getModel("mistral", "magistral-medium-latest");
await handleThinking(llm, { reasoningEffort: "medium" });
});
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { reasoningEffort: "medium" });
});
},
);
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { reasoningEffort: "medium" });
});
});
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (pixtral-12b with image support)", () => {
const llm = getModel("mistral", "pixtral-12b");

View file

@ -55,6 +55,7 @@ async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: St
// MiniMax reports input tokens but not output tokens when aborted.
if (
llm.api === "openai-completions" ||
llm.api === "mistral-conversations" ||
llm.api === "openai-responses" ||
llm.api === "azure-openai-responses" ||
llm.api === "openai-codex-responses" ||