refactor(ai): migrate mistral provider to conversations sdk

This commit is contained in:
Abdeslam Yassine Agmar 2026-03-03 11:33:17 +00:00
parent 9a4fe52654
commit eb9f1183ad
No known key found for this signature in database
GPG key ID: 146F5C7179A49F57
17 changed files with 723 additions and 171 deletions

View file

@ -626,6 +626,7 @@ The library uses a registry of API implementations. Built-in APIs include:
- **`google-generative-ai`**: Google Generative AI API (`streamGoogle`, `GoogleOptions`)
- **`google-gemini-cli`**: Google Cloud Code Assist API (`streamGoogleGeminiCli`, `GoogleGeminiCliOptions`)
- **`google-vertex`**: Google Vertex AI API (`streamGoogleVertex`, `GoogleVertexOptions`)
- **`mistral-conversations`**: Mistral Conversations API (`streamMistral`, `MistralOptions`)
- **`openai-completions`**: OpenAI Chat Completions API (`streamOpenAICompletions`, `OpenAICompletionsOptions`)
- **`openai-responses`**: OpenAI Responses API (`streamOpenAIResponses`, `OpenAIResponsesOptions`)
- **`openai-codex-responses`**: OpenAI Codex Responses API (`streamOpenAICodexResponses`, `OpenAICodexResponsesOptions`)
@ -638,7 +639,8 @@ A **provider** offers models through a specific API. For example:
- **Anthropic** models use the `anthropic-messages` API
- **Google** models use the `google-generative-ai` API
- **OpenAI** models use the `openai-responses` API
- **Mistral, xAI, Cerebras, Groq, etc.** models use the `openai-completions` API (OpenAI-compatible)
- **Mistral** models use the `mistral-conversations` API
- **xAI, Cerebras, Groq, etc.** models use the `openai-completions` API (OpenAI-compatible)
### Querying Providers and Models
@ -728,7 +730,7 @@ const response = await stream(ollamaModel, context, {
### OpenAI Compatibility Settings
The `openai-completions` API is implemented by many providers with minor differences. By default, the library auto-detects compatibility settings based on `baseUrl` for known providers (Cerebras, xAI, Mistral, Chutes, etc.). For custom proxies or unknown endpoints, you can override these settings via the `compat` field. For `openai-responses` models, the compat field only supports Responses-specific flags.
The `openai-completions` API is implemented by many providers with minor differences. By default, the library auto-detects compatibility settings based on `baseUrl` for a small set of known OpenAI-compatible providers (Cerebras, xAI, Chutes, DeepSeek, zAi, OpenCode, etc.). For custom proxies or unknown endpoints, you can override these settings via the `compat` field. For `openai-responses` models, the compat field only supports Responses-specific flags.
```typescript
interface OpenAICompletionsCompat {
@ -741,7 +743,6 @@ interface OpenAICompletionsCompat {
requiresToolResultName?: boolean; // Whether tool results require the `name` field (default: false)
requiresAssistantAfterToolResult?: boolean; // Whether tool results must be followed by an assistant message (default: false)
requiresThinkingAsText?: boolean; // Whether thinking blocks must be converted to text (default: false)
requiresMistralToolIds?: boolean; // Whether tool call IDs must be normalized to Mistral format (default: false)
thinkingFormat?: 'openai' | 'zai' | 'qwen'; // Format for reasoning param: 'openai' uses reasoning_effort, 'zai' uses thinking: { type: "enabled" }, 'qwen' uses enable_thinking: boolean (default: openai)
openRouterRouting?: OpenRouterRouting; // OpenRouter routing preferences (default: {})
vercelGatewayRouting?: VercelGatewayRouting; // Vercel AI Gateway routing preferences (default: {})

View file

@ -25,7 +25,7 @@
"@anthropic-ai/sdk": "^0.73.0",
"@aws-sdk/client-bedrock-runtime": "^3.983.0",
"@google/genai": "^1.40.0",
"@mistralai/mistralai": "1.10.0",
"@mistralai/mistralai": "1.14.1",
"@sinclair/typebox": "^0.34.41",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",

View file

@ -414,9 +414,9 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
models.push({
id: modelId,
name: m.name || modelId,
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: m.reasoning === true,
input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"],
cost: {

View file

@ -9,6 +9,7 @@ export * from "./providers/azure-openai-responses.js";
export * from "./providers/google.js";
export * from "./providers/google-gemini-cli.js";
export * from "./providers/google-vertex.js";
export * from "./providers/mistral.js";
export * from "./providers/openai-completions.js";
export * from "./providers/openai-responses.js";
export * from "./providers/register-builtins.js";

View file

@ -4485,9 +4485,9 @@ export const MODELS = {
"codestral-latest": {
id: "codestral-latest",
name: "Codestral",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4498,13 +4498,13 @@ export const MODELS = {
},
contextWindow: 256000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"devstral-2512": {
id: "devstral-2512",
name: "Devstral 2",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4515,13 +4515,13 @@ export const MODELS = {
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"devstral-medium-2507": {
id: "devstral-medium-2507",
name: "Devstral Medium",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4532,13 +4532,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"devstral-medium-latest": {
id: "devstral-medium-latest",
name: "Devstral 2",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4549,13 +4549,13 @@ export const MODELS = {
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"devstral-small-2505": {
id: "devstral-small-2505",
name: "Devstral Small 2505",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4566,13 +4566,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"devstral-small-2507": {
id: "devstral-small-2507",
name: "Devstral Small",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4583,13 +4583,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"labs-devstral-small-2512": {
id: "labs-devstral-small-2512",
name: "Devstral Small 2",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4600,13 +4600,13 @@ export const MODELS = {
},
contextWindow: 256000,
maxTokens: 256000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"magistral-medium-latest": {
id: "magistral-medium-latest",
name: "Magistral Medium",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: true,
input: ["text"],
cost: {
@ -4617,13 +4617,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"magistral-small": {
id: "magistral-small",
name: "Magistral Small",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: true,
input: ["text"],
cost: {
@ -4634,13 +4634,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"ministral-3b-latest": {
id: "ministral-3b-latest",
name: "Ministral 3B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4651,13 +4651,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"ministral-8b-latest": {
id: "ministral-8b-latest",
name: "Ministral 8B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4668,13 +4668,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-large-2411": {
id: "mistral-large-2411",
name: "Mistral Large 2.1",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4685,13 +4685,13 @@ export const MODELS = {
},
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-large-2512": {
id: "mistral-large-2512",
name: "Mistral Large 3",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4702,13 +4702,13 @@ export const MODELS = {
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-large-latest": {
id: "mistral-large-latest",
name: "Mistral Large",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4719,13 +4719,13 @@ export const MODELS = {
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-medium-2505": {
id: "mistral-medium-2505",
name: "Mistral Medium 3",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4736,13 +4736,13 @@ export const MODELS = {
},
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-medium-2508": {
id: "mistral-medium-2508",
name: "Mistral Medium 3.1",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4753,13 +4753,13 @@ export const MODELS = {
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-medium-latest": {
id: "mistral-medium-latest",
name: "Mistral Medium",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4770,13 +4770,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-nemo": {
id: "mistral-nemo",
name: "Mistral Nemo",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4787,13 +4787,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-small-2506": {
id: "mistral-small-2506",
name: "Mistral Small 3.2",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4804,13 +4804,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"mistral-small-latest": {
id: "mistral-small-latest",
name: "Mistral Small",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4821,13 +4821,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"open-mistral-7b": {
id: "open-mistral-7b",
name: "Mistral 7B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4838,13 +4838,13 @@ export const MODELS = {
},
contextWindow: 8000,
maxTokens: 8000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"open-mixtral-8x22b": {
id: "open-mixtral-8x22b",
name: "Mixtral 8x22B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4855,13 +4855,13 @@ export const MODELS = {
},
contextWindow: 64000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"open-mixtral-8x7b": {
id: "open-mixtral-8x7b",
name: "Mixtral 8x7B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text"],
cost: {
@ -4872,13 +4872,13 @@ export const MODELS = {
},
contextWindow: 32000,
maxTokens: 32000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"pixtral-12b": {
id: "pixtral-12b",
name: "Pixtral 12B",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4889,13 +4889,13 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
"pixtral-large-latest": {
id: "pixtral-large-latest",
name: "Pixtral Large",
api: "openai-completions",
api: "mistral-conversations",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
baseUrl: "https://api.mistral.ai",
reasoning: false,
input: ["text", "image"],
cost: {
@ -4906,7 +4906,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"mistral-conversations">,
},
"openai": {
"codex-mini-latest": {

View file

@ -0,0 +1,572 @@
import { createHash } from "node:crypto";
import { Mistral } from "@mistralai/mistralai";
import type { RequestOptions } from "@mistralai/mistralai/lib/sdks.js";
import type {
ChatCompletionStreamRequest,
ChatCompletionStreamRequestMessages,
CompletionEvent,
ContentChunk,
FunctionTool,
} from "@mistralai/mistralai/models/components/index.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { calculateCost } from "../models.js";
import type {
AssistantMessage,
Context,
Message,
Model,
SimpleStreamOptions,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
const MISTRAL_TOOL_CALL_ID_LENGTH = 9;
const MAX_MISTRAL_ERROR_BODY_CHARS = 4000;
/**
* Provider-specific options for the Mistral API.
*/
export interface MistralOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } };
promptMode?: "reasoning";
}
/**
* Stream responses from Mistral using `chat.stream`.
*/
export const streamMistral: StreamFunction<"mistral-conversations", MistralOptions> = (
model: Model<"mistral-conversations">,
context: Context,
options?: MistralOptions,
): AssistantMessageEventStream => {
const stream = new AssistantMessageEventStream();
(async () => {
const output = createOutput(model);
try {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
// Intentionally per-request: avoids shared SDK mutable state across concurrent consumers.
const mistral = new Mistral({
apiKey,
serverURL: model.baseUrl,
});
const normalizeMistralToolCallId = createMistralToolCallIdNormalizer();
const transformedMessages = transformMessages(context.messages, model, (id) => normalizeMistralToolCallId(id));
const payload = buildChatPayload(model, context, transformedMessages, options);
options?.onPayload?.(payload);
const mistralStream = await mistral.chat.stream(payload, buildRequestOptions(model, options));
stream.push({ type: "start", partial: output });
await consumeChatStream(model, output, stream, mistralStream);
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
if (output.stopReason === "aborted" || output.stopReason === "error") {
throw new Error("An unknown error occurred");
}
stream.push({ type: "done", reason: output.stopReason, message: output });
stream.end();
} catch (error) {
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = formatMistralError(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
};
/**
* Maps provider-agnostic `SimpleStreamOptions` to Mistral options.
*/
export const streamSimpleMistral: StreamFunction<"mistral-conversations", SimpleStreamOptions> = (
model: Model<"mistral-conversations">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
const reasoning = clampReasoning(options?.reasoning);
return streamMistral(model, context, {
...base,
promptMode: model.reasoning && reasoning ? "reasoning" : undefined,
} satisfies MistralOptions);
};
function createOutput(model: Model<"mistral-conversations">): AssistantMessage {
return {
role: "assistant",
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
}
function createMistralToolCallIdNormalizer(): (id: string) => string {
const idMap = new Map<string, string>();
const reverseMap = new Map<string, string>();
return (id: string): string => {
const existing = idMap.get(id);
if (existing) return existing;
let attempt = 0;
while (true) {
const candidate = deriveMistralToolCallId(id, attempt);
const owner = reverseMap.get(candidate);
if (!owner || owner === id) {
idMap.set(id, candidate);
reverseMap.set(candidate, id);
return candidate;
}
attempt++;
}
};
}
function deriveMistralToolCallId(id: string, attempt: number): string {
const normalized = id.replace(/[^a-zA-Z0-9]/g, "");
if (attempt === 0 && normalized.length === MISTRAL_TOOL_CALL_ID_LENGTH) return normalized;
const seedBase = normalized || id;
const seed = attempt === 0 ? seedBase : `${seedBase}:${attempt}`;
return createHash("sha256").update(seed).digest("hex").slice(0, MISTRAL_TOOL_CALL_ID_LENGTH);
}
function formatMistralError(error: unknown): string {
if (error instanceof Error) {
const sdkError = error as Error & { statusCode?: unknown; body?: unknown };
const statusCode = typeof sdkError.statusCode === "number" ? sdkError.statusCode : undefined;
const bodyText = typeof sdkError.body === "string" ? sdkError.body.trim() : undefined;
if (statusCode !== undefined && bodyText) {
return `Mistral API error (${statusCode}): ${truncateErrorText(bodyText, MAX_MISTRAL_ERROR_BODY_CHARS)}`;
}
if (statusCode !== undefined) return `Mistral API error (${statusCode}): ${error.message}`;
return error.message;
}
return safeJsonStringify(error);
}
function truncateErrorText(text: string, maxChars: number): string {
if (text.length <= maxChars) return text;
return `${text.slice(0, maxChars)}... [truncated ${text.length - maxChars} chars]`;
}
function safeJsonStringify(value: unknown): string {
try {
const serialized = JSON.stringify(value);
return serialized === undefined ? String(value) : serialized;
} catch {
return String(value);
}
}
function buildRequestOptions(model: Model<"mistral-conversations">, options?: MistralOptions): RequestOptions {
const requestOptions: RequestOptions = {};
if (options?.signal) requestOptions.signal = options.signal;
requestOptions.retries = { strategy: "none" };
const headers: Record<string, string> = {};
if (model.headers) Object.assign(headers, model.headers);
if (options?.headers) Object.assign(headers, options.headers);
// Mistral infrastructure uses `x-affinity` for KV-cache reuse (prefix caching).
// Respect explicit caller-provided header values.
if (options?.sessionId && !headers["x-affinity"]) {
headers["x-affinity"] = options.sessionId;
}
if (Object.keys(headers).length > 0) {
requestOptions.headers = headers;
}
return requestOptions;
}
function buildChatPayload(
model: Model<"mistral-conversations">,
context: Context,
messages: Message[],
options?: MistralOptions,
): ChatCompletionStreamRequest {
const payload: ChatCompletionStreamRequest = {
model: model.id,
stream: true,
messages: toChatMessages(messages, model.input.includes("image")),
};
if (context.tools?.length) payload.tools = toFunctionTools(context.tools);
if (options?.temperature !== undefined) payload.temperature = options.temperature;
if (options?.maxTokens !== undefined) payload.maxTokens = options.maxTokens;
if (options?.toolChoice) payload.toolChoice = mapToolChoice(options.toolChoice);
if (options?.promptMode) payload.promptMode = options.promptMode as any;
if (context.systemPrompt) {
payload.messages.unshift({
role: "system",
content: sanitizeSurrogates(context.systemPrompt),
});
}
return payload;
}
async function consumeChatStream(
model: Model<"mistral-conversations">,
output: AssistantMessage,
stream: AssistantMessageEventStream,
mistralStream: AsyncIterable<CompletionEvent>,
): Promise<void> {
let currentBlock: TextContent | ThinkingContent | null = null;
const blocks = output.content;
const blockIndex = () => blocks.length - 1;
const toolBlocksByKey = new Map<string, number>();
const finishCurrentBlock = (block?: typeof currentBlock) => {
if (!block) return;
if (block.type === "text") {
stream.push({
type: "text_end",
contentIndex: blockIndex(),
content: block.text,
partial: output,
});
return;
}
if (block.type === "thinking") {
stream.push({
type: "thinking_end",
contentIndex: blockIndex(),
content: block.thinking,
partial: output,
});
}
};
for await (const event of mistralStream) {
const chunk = event.data;
if (chunk.usage) {
output.usage.input = chunk.usage.promptTokens || 0;
output.usage.output = chunk.usage.completionTokens || 0;
output.usage.cacheRead = 0;
output.usage.cacheWrite = 0;
output.usage.totalTokens = chunk.usage.totalTokens || output.usage.input + output.usage.output;
calculateCost(model, output.usage);
}
const choice = chunk.choices[0];
if (!choice) continue;
if (choice.finishReason) {
output.stopReason = mapChatStopReason(choice.finishReason);
}
const delta = choice.delta;
if (delta.content !== null && delta.content !== undefined) {
const contentItems = typeof delta.content === "string" ? [delta.content] : delta.content;
for (const item of contentItems) {
if (typeof item === "string") {
const textDelta = sanitizeSurrogates(item);
if (!currentBlock || currentBlock.type !== "text") {
finishCurrentBlock(currentBlock);
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
}
currentBlock.text += textDelta;
stream.push({
type: "text_delta",
contentIndex: blockIndex(),
delta: textDelta,
partial: output,
});
continue;
}
if (item.type === "thinking") {
const deltaText = item.thinking
.map((part) => ("text" in part ? part.text : ""))
.filter((text) => text.length > 0)
.join("");
const thinkingDelta = sanitizeSurrogates(deltaText);
if (!thinkingDelta) continue;
if (!currentBlock || currentBlock.type !== "thinking") {
finishCurrentBlock(currentBlock);
currentBlock = { type: "thinking", thinking: "" };
output.content.push(currentBlock);
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
}
currentBlock.thinking += thinkingDelta;
stream.push({
type: "thinking_delta",
contentIndex: blockIndex(),
delta: thinkingDelta,
partial: output,
});
continue;
}
if (item.type === "text") {
const textDelta = sanitizeSurrogates(item.text);
if (!currentBlock || currentBlock.type !== "text") {
finishCurrentBlock(currentBlock);
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
}
currentBlock.text += textDelta;
stream.push({
type: "text_delta",
contentIndex: blockIndex(),
delta: textDelta,
partial: output,
});
}
}
}
const toolCalls = delta.toolCalls || [];
for (const toolCall of toolCalls) {
if (currentBlock) {
finishCurrentBlock(currentBlock);
currentBlock = null;
}
const callId =
toolCall.id && toolCall.id !== "null"
? toolCall.id
: deriveMistralToolCallId(`toolcall:${toolCall.index ?? 0}`, 0);
const key = `${callId}:${toolCall.index || 0}`;
const existingIndex = toolBlocksByKey.get(key);
let block: (ToolCall & { partialArgs?: string }) | undefined;
if (existingIndex !== undefined) {
const existing = output.content[existingIndex];
if (existing?.type === "toolCall") {
block = existing as ToolCall & { partialArgs?: string };
}
}
if (!block) {
block = {
type: "toolCall",
id: callId,
name: toolCall.function.name,
arguments: {},
partialArgs: "",
};
output.content.push(block);
toolBlocksByKey.set(key, output.content.length - 1);
stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
}
const argsDelta =
typeof toolCall.function.arguments === "string"
? toolCall.function.arguments
: JSON.stringify(toolCall.function.arguments || {});
block.partialArgs = (block.partialArgs || "") + argsDelta;
block.arguments = parseStreamingJson<Record<string, unknown>>(block.partialArgs);
stream.push({
type: "toolcall_delta",
contentIndex: toolBlocksByKey.get(key)!,
delta: argsDelta,
partial: output,
});
}
}
finishCurrentBlock(currentBlock);
for (const index of toolBlocksByKey.values()) {
const block = output.content[index];
if (block.type !== "toolCall") continue;
const toolBlock = block as ToolCall & { partialArgs?: string };
toolBlock.arguments = parseStreamingJson<Record<string, unknown>>(toolBlock.partialArgs);
delete toolBlock.partialArgs;
stream.push({
type: "toolcall_end",
contentIndex: index,
toolCall: toolBlock,
partial: output,
});
}
}
function toFunctionTools(tools: Tool[]): Array<FunctionTool & { type: "function" }> {
return tools.map((tool) => ({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters as unknown as Record<string, unknown>,
strict: false,
},
}));
}
function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompletionStreamRequestMessages[] {
const result: ChatCompletionStreamRequestMessages[] = [];
for (const msg of messages) {
if (msg.role === "user") {
if (typeof msg.content === "string") {
result.push({ role: "user", content: sanitizeSurrogates(msg.content) });
continue;
}
const hadImages = msg.content.some((item) => item.type === "image");
const content: ContentChunk[] = msg.content
.filter((item) => item.type === "text" || supportsImages)
.map((item) => {
if (item.type === "text") return { type: "text", text: sanitizeSurrogates(item.text) };
return { type: "image_url", imageUrl: `data:${item.mimeType};base64,${item.data}` };
});
if (content.length > 0) {
result.push({ role: "user", content });
continue;
}
if (hadImages && !supportsImages) {
result.push({ role: "user", content: "(image omitted: model does not support images)" });
}
continue;
}
if (msg.role === "assistant") {
const textParts: Array<{ type: "text"; text: string }> = [];
const toolCalls: Array<{ id: string; type: "function"; function: { name: string; arguments: string } }> = [];
for (const block of msg.content) {
if (block.type === "text") {
if (block.text.trim().length > 0) textParts.push({ type: "text", text: sanitizeSurrogates(block.text) });
continue;
}
if (block.type === "thinking") {
if (block.thinking.trim().length > 0) {
textParts.push({ type: "text", text: sanitizeSurrogates(block.thinking) });
}
continue;
}
toolCalls.push({
id: block.id,
type: "function",
function: { name: block.name, arguments: JSON.stringify(block.arguments || {}) },
});
}
const assistantMessage: ChatCompletionStreamRequestMessages = { role: "assistant" };
if (textParts.length > 0) assistantMessage.content = textParts;
if (toolCalls.length > 0) assistantMessage.toolCalls = toolCalls;
if (textParts.length > 0 || toolCalls.length > 0) result.push(assistantMessage);
continue;
}
const toolContent: ContentChunk[] = [];
const textResult = msg.content
.filter((part) => part.type === "text")
.map((part) => (part.type === "text" ? sanitizeSurrogates(part.text) : ""))
.join("\n");
const hasImages = msg.content.some((part) => part.type === "image");
const toolText = buildToolResultText(textResult, hasImages, supportsImages, msg.isError);
toolContent.push({ type: "text", text: toolText });
for (const part of msg.content) {
if (!supportsImages) continue;
if (part.type !== "image") continue;
toolContent.push({
type: "image_url",
imageUrl: `data:${part.mimeType};base64,${part.data}`,
});
}
result.push({
role: "tool",
toolCallId: msg.toolCallId,
name: msg.toolName,
content: toolContent,
});
}
return result;
}
function buildToolResultText(text: string, hasImages: boolean, supportsImages: boolean, isError: boolean): string {
const trimmed = text.trim();
const errorPrefix = isError ? "[tool error] " : "";
if (trimmed.length > 0) {
const imageSuffix = hasImages && !supportsImages ? "\n[tool image omitted: model does not support images]" : "";
return `${errorPrefix}${trimmed}${imageSuffix}`;
}
if (hasImages) {
if (supportsImages) {
return isError ? "[tool error] (see attached image)" : "(see attached image)";
}
return isError
? "[tool error] (image omitted: model does not support images)"
: "(image omitted: model does not support images)";
}
return isError ? "[tool error] (no tool output)" : "(no tool output)";
}
function mapToolChoice(
choice: MistralOptions["toolChoice"],
): "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } } | undefined {
if (!choice) return undefined;
if (choice === "auto" || choice === "none" || choice === "any" || choice === "required") {
return choice as any;
}
return {
type: "function",
function: { name: choice.function.name },
};
}
function mapChatStopReason(reason: string | null): StopReason {
if (reason === null) return "stop";
switch (reason) {
case "stop":
return "stop";
case "length":
case "model_length":
return "length";
case "tool_calls":
return "toolUse";
case "error":
return "error";
default:
return "stop";
}
}

View file

@ -33,24 +33,6 @@ import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copi
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
/**
* Normalize tool call ID for Mistral.
* Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).
*/
function normalizeMistralToolId(id: string): string {
// Remove non-alphanumeric characters
let normalized = id.replace(/[^a-zA-Z0-9]/g, "");
// Mistral requires exactly 9 characters
if (normalized.length < 9) {
// Pad with deterministic characters based on original ID to ensure matching
const padding = "ABCDEFGHI";
normalized = normalized + padding.slice(0, 9 - normalized.length);
} else if (normalized.length > 9) {
normalized = normalized.slice(0, 9);
}
return normalized;
}
/**
* Check if conversation messages contain tool calls or tool results.
* This is needed because Anthropic (via proxy) requires the tools param
@ -296,7 +278,6 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions", OpenA
}
finishCurrentBlock(currentBlock);
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
@ -498,8 +479,6 @@ export function convertMessages(
const params: ChatCompletionMessageParam[] = [];
const normalizeToolCallId = (id: string): string => {
if (compat.requiresMistralToolIds) return normalizeMistralToolId(id);
// Handle pipe-separated IDs from OpenAI Responses API
// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)
// These come from providers like github-copilot, openai-codex, opencode
@ -526,7 +505,7 @@ export function convertMessages(
for (let i = 0; i < transformedMessages.length; i++) {
const msg = transformedMessages[i];
// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results
// Some providers don't allow user messages directly after tool results
// Insert a synthetic assistant message to bridge the gap
if (compat.requiresAssistantAfterToolResult && lastRole === "toolResult" && msg.role === "user") {
params.push({
@ -567,7 +546,7 @@ export function convertMessages(
});
}
} else if (msg.role === "assistant") {
// Some providers (e.g. Mistral) don't accept null content, use empty string instead
// Some providers don't accept null content, use empty string instead
const assistantMsg: ChatCompletionAssistantMessageParam = {
role: "assistant",
content: compat.requiresAssistantAfterToolResult ? "" : null,
@ -636,7 +615,7 @@ export function convertMessages(
}
}
// Skip assistant messages that have no content and no tool calls.
// Mistral explicitly requires "either content or tool_calls, but not none".
// Some providers require "either content or tool_calls, but not none".
// Other providers also don't accept empty assistant messages.
// This handles aborted assistant responses that got no content.
const content = assistantMsg.content;
@ -664,7 +643,7 @@ export function convertMessages(
// Always send tool result with text (or placeholder if only images)
const hasText = textResult.length > 0;
// Some providers (e.g. Mistral) require the 'name' field in tool results
// Some providers require the 'name' field in tool results
const toolResultMsg: ChatCompletionToolMessageParam = {
role: "tool",
content: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
@ -773,21 +752,17 @@ function detectCompat(model: Model<"openai-completions">): Required<OpenAIComple
baseUrl.includes("cerebras.ai") ||
provider === "xai" ||
baseUrl.includes("api.x.ai") ||
provider === "mistral" ||
baseUrl.includes("mistral.ai") ||
baseUrl.includes("chutes.ai") ||
baseUrl.includes("deepseek.com") ||
isZai ||
provider === "opencode" ||
baseUrl.includes("opencode.ai");
const useMaxTokens = provider === "mistral" || baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
const useMaxTokens = baseUrl.includes("chutes.ai");
const isGrok = provider === "xai" || baseUrl.includes("api.x.ai");
const isGroq = provider === "groq" || baseUrl.includes("groq.com");
const isMistral = provider === "mistral" || baseUrl.includes("mistral.ai");
const reasoningEffortMap =
isGroq && model.id === "qwen/qwen3-32b"
? {
@ -798,7 +773,6 @@ function detectCompat(model: Model<"openai-completions">): Required<OpenAIComple
xhigh: "default",
}
: {};
return {
supportsStore: !isNonStandard,
supportsDeveloperRole: !isNonStandard,
@ -806,10 +780,9 @@ function detectCompat(model: Model<"openai-completions">): Required<OpenAIComple
reasoningEffortMap,
supportsUsageInStreaming: true,
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
requiresToolResultName: isMistral,
requiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024
requiresThinkingAsText: isMistral,
requiresMistralToolIds: isMistral,
requiresToolResultName: false,
requiresAssistantAfterToolResult: false,
requiresThinkingAsText: false,
thinkingFormat: isZai ? "zai" : "openai",
openRouterRouting: {},
vercelGatewayRouting: {},
@ -836,7 +809,6 @@ function getCompat(model: Model<"openai-completions">): Required<OpenAICompletio
requiresAssistantAfterToolResult:
model.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,
requiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,
requiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,
thinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,
openRouterRouting: model.compat.openRouterRouting ?? {},
vercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,

View file

@ -5,6 +5,7 @@ import { streamAzureOpenAIResponses, streamSimpleAzureOpenAIResponses } from "./
import { streamGoogle, streamSimpleGoogle } from "./google.js";
import { streamGoogleGeminiCli, streamSimpleGoogleGeminiCli } from "./google-gemini-cli.js";
import { streamGoogleVertex, streamSimpleGoogleVertex } from "./google-vertex.js";
import { streamMistral, streamSimpleMistral } from "./mistral.js";
import { streamOpenAICodexResponses, streamSimpleOpenAICodexResponses } from "./openai-codex-responses.js";
import { streamOpenAICompletions, streamSimpleOpenAICompletions } from "./openai-completions.js";
import { streamOpenAIResponses, streamSimpleOpenAIResponses } from "./openai-responses.js";
@ -22,6 +23,12 @@ export function registerBuiltInApiProviders(): void {
streamSimple: streamSimpleOpenAICompletions,
});
registerApiProvider({
api: "mistral-conversations",
stream: streamMistral,
streamSimple: streamSimpleMistral,
});
registerApiProvider({
api: "openai-responses",
stream: streamOpenAIResponses,

View file

@ -4,6 +4,7 @@ export type { AssistantMessageEventStream } from "./utils/event-stream.js";
export type KnownApi =
| "openai-completions"
| "mistral-conversations"
| "openai-responses"
| "azure-openai-responses"
| "openai-codex-responses"
@ -247,8 +248,6 @@ export interface OpenAICompletionsCompat {
requiresAssistantAfterToolResult?: boolean;
/** Whether thinking blocks must be converted to text blocks with <thinking> delimiters. Default: auto-detected from URL. */
requiresThinkingAsText?: boolean;
/** Whether tool call IDs must be normalized to Mistral format (exactly 9 alphanumeric chars). Default: auto-detected from URL. */
requiresMistralToolIds?: boolean;
/** Format for reasoning/thinking parameter. "openai" uses reasoning_effort, "zai" uses thinking: { type: "enabled" }, "qwen" uses enable_thinking: boolean. Default: "openai". */
thinkingFormat?: "openai" | "zai" | "qwen";
/** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */

View file

@ -20,7 +20,7 @@ import type { AssistantMessage } from "../types.js";
* - MiniMax: "invalid params, context window exceeds limit"
* - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)"
* - Cerebras: Returns "400/413 status code (no body)" - handled separately below
* - Mistral: Returns "400/413 status code (no body)" - handled separately below
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
* - Ollama: Silently truncates input - not detectable via error message
*/
@ -37,6 +37,7 @@ const OVERFLOW_PATTERNS = [
/greater than the context length/i, // LM Studio
/context window exceeds limit/i, // MiniMax
/exceeded model token limit/i, // Kimi For Coding
/too large for model with \d+ maximum context length/i, // Mistral
/context[_ ]length[_ ]exceeded/i, // Generic fallback
/too many tokens/i, // Generic fallback
/token limit exceeded/i, // Generic fallback
@ -60,7 +61,7 @@ const OVERFLOW_PATTERNS = [
* - xAI (Grok): "maximum prompt length is X but request contains Y"
* - Groq: "reduce the length of the messages"
* - Cerebras: 400/413 status code (no body)
* - Mistral: 400/413 status code (no body)
* - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length"
* - OpenRouter (all backends): "maximum context length is X tokens"
* - llama.cpp: "exceeds the available context size"
* - LM Studio: "greater than the context length"
@ -95,7 +96,7 @@ export function isContextOverflow(message: AssistantMessage, contextWindow?: num
return true;
}
// Cerebras and Mistral return 400/413 with no body for context overflow
// Cerebras returns 400/413 with no body for context overflow
// Note: 429 is rate limiting (requests/tokens per time), NOT context overflow
if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) {
return true;

View file

@ -413,7 +413,6 @@ describe("Context overflow error handling", () => {
// =============================================================================
// Mistral
// Expected pattern: TBD - need to test actual error message
// =============================================================================
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral", () => {
@ -423,6 +422,7 @@ describe("Context overflow error handling", () => {
logResult(result);
expect(result.stopReason).toBe("error");
expect(result.errorMessage).toMatch(/too large for model with \d+ maximum context length/i);
expect(isContextOverflow(result.response, model.contextWindow)).toBe(true);
}, 120000);
});

View file

@ -291,11 +291,11 @@ describe("Tool Results with Images", () => {
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (pixtral-12b)", () => {
const llm = getModel("mistral", "pixtral-12b");
it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => {
it("should handle tool result with only image", { retry: 5, timeout: 30000 }, async () => {
await handleToolWithImageResult(llm);
});
it("should handle tool result with text and image", { retry: 3, timeout: 30000 }, async () => {
it("should handle tool result with text and image", { retry: 5, timeout: 30000 }, async () => {
await handleToolWithTextAndImageResult(llm);
});
});

View file

@ -29,7 +29,6 @@ const compat: Required<OpenAICompletionsCompat> = {
requiresToolResultName: false,
requiresAssistantAfterToolResult: false,
requiresThinkingAsText: false,
requiresMistralToolIds: false,
thinkingFormat: "openai",
openRouterRouting: {},
vercelGatewayRouting: {},

View file

@ -765,34 +765,30 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.MISTRAL_API_KEY)(
"Mistral Provider (devstral-medium-latest via OpenAI Completions)",
() => {
const llm = getModel("mistral", "devstral-medium-latest");
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (devstral-medium-latest)", () => {
const llm = getModel("mistral", "devstral-medium-latest");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle thinking mode", { retry: 3 }, async () => {
// FIXME Skip for now, getting a 422 status code, need to test with official SDK
// const llm = getModel("mistral", "magistral-medium-latest");
// await handleThinking(llm, { reasoningEffort: "medium" });
});
it("should handle thinking mode", { retry: 3 }, async () => {
const llm = getModel("mistral", "magistral-medium-latest");
await handleThinking(llm, { reasoningEffort: "medium" });
});
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { reasoningEffort: "medium" });
});
},
);
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { reasoningEffort: "medium" });
});
});
describe.skipIf(!process.env.MISTRAL_API_KEY)("Mistral Provider (pixtral-12b with image support)", () => {
const llm = getModel("mistral", "pixtral-12b");

View file

@ -55,6 +55,7 @@ async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: St
// MiniMax reports input tokens but not output tokens when aborted.
if (
llm.api === "openai-completions" ||
llm.api === "mistral-conversations" ||
llm.api === "openai-responses" ||
llm.api === "azure-openai-responses" ||
llm.api === "openai-codex-responses" ||

View file

@ -159,6 +159,7 @@ The `api` field determines which streaming implementation is used:
| `openai-responses` | OpenAI Responses API |
| `azure-openai-responses` | Azure OpenAI Responses API |
| `openai-codex-responses` | OpenAI Codex Responses API |
| `mistral-conversations` | Mistral SDK Conversations/Chat streaming |
| `google-generative-ai` | Google Generative AI API |
| `google-gemini-cli` | Google Cloud Code Assist API |
| `google-vertex` | Google Vertex AI API |
@ -180,14 +181,17 @@ models: [{
high: "default",
xhigh: "default"
},
maxTokensField: "max_tokens", // instead of "max_completion_tokens"
requiresToolResultName: true, // tool results need name field
requiresMistralToolIds: true,
thinkingFormat: "qwen" // uses enable_thinking: true
}
}]
maxTokensField: "max_tokens", // instead of "max_completion_tokens"
requiresToolResultName: true, // tool results need name field
thinkingFormat: "qwen" // uses enable_thinking: true
}
}]
```
> Migration note: Mistral moved from `openai-completions` to `mistral-conversations`.
> Use `mistral-conversations` for native Mistral models.
> If you intentionally route Mistral-compatible/custom endpoints through `openai-completions`, set `compat` flags explicitly as needed.
### Auth Header
If your provider expects `Authorization: Bearer <key>` but doesn't use a standard API, set `authHeader: true`:
@ -301,6 +305,7 @@ For providers with non-standard APIs, implement `streamSimple`. Study the existi
**Reference implementations:**
- [anthropic.ts](https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/providers/anthropic.ts) - Anthropic Messages API
- [mistral.ts](https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/providers/mistral.ts) - Mistral Conversations API
- [openai-completions.ts](https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/providers/openai-completions.ts) - OpenAI Chat Completions
- [openai-responses.ts](https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/providers/openai-responses.ts) - OpenAI Responses API
- [google.ts](https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/providers/google.ts) - Google Generative AI
@ -581,7 +586,6 @@ interface ProviderModelConfig {
requiresToolResultName?: boolean;
requiresAssistantAfterToolResult?: boolean;
requiresThinkingAsText?: boolean;
requiresMistralToolIds?: boolean;
thinkingFormat?: "openai" | "zai" | "qwen";
};
}