Add MiniMax provider support (#656 by @dannote)

- Add minimax to KnownProvider and Api types
- Add MINIMAX_API_KEY to getEnvApiKey()
- Generate MiniMax-M2 and MiniMax-M2.1 models
- Add context overflow detection pattern
- Add tests to all required test files
- Update README and CHANGELOG with attribution

Also fixes:
- Bedrock duplicate toolResult ID when content has multiple blocks
- Sandbox extension unused parameter lint warning
This commit is contained in:
Mario Zechner 2026-01-13 02:27:09 +01:00
parent edc576024f
commit 8af8d0d672
20 changed files with 233 additions and 31 deletions

View file

@ -2686,6 +2686,42 @@ export const MODELS = {
maxTokens: 16384,
} satisfies Model<"openai-completions">,
},
"minimax": {
"MiniMax-M2": {
id: "MiniMax-M2",
name: "MiniMax-M2",
api: "anthropic-messages",
provider: "minimax",
baseUrl: "https://api.minimax.io/anthropic",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 196608,
maxTokens: 128000,
} satisfies Model<"anthropic-messages">,
"MiniMax-M2.1": {
id: "MiniMax-M2.1",
name: "MiniMax-M2.1",
api: "anthropic-messages",
provider: "minimax",
baseUrl: "https://api.minimax.io/anthropic",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
},
"mistral": {
"codestral-latest": {
id: "codestral-latest",
@ -4529,7 +4565,7 @@ export const MODELS = {
cacheWrite: 18.75,
},
contextWindow: 200000,
maxTokens: 4096,
maxTokens: 32000,
} satisfies Model<"openai-completions">,
"anthropic/claude-opus-4.5": {
id: "anthropic/claude-opus-4.5",

View file

@ -378,38 +378,34 @@ function convertMessages(context: Context, model: Model<"bedrock-converse-stream
// Bedrock requires all tool results to be in one message
const toolResults: ContentBlock.ToolResultMember[] = [];
// Add current tool result
for (const c of m.content) {
toolResults.push({
toolResult: {
toolUseId: m.toolCallId,
content: [
c.type === "image"
? { image: createImageBlock(c.mimeType, c.data) }
: { text: sanitizeSurrogates(c.text) },
],
status: m.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS,
},
});
}
// Add current tool result with all content blocks combined
toolResults.push({
toolResult: {
toolUseId: m.toolCallId,
content: m.content.map((c) =>
c.type === "image"
? { image: createImageBlock(c.mimeType, c.data) }
: { text: sanitizeSurrogates(c.text) },
),
status: m.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS,
},
});
// Look ahead for consecutive toolResult messages
let j = i + 1;
while (j < messages.length && messages[j].role === "toolResult") {
const nextMsg = messages[j] as ToolResultMessage;
for (const c of nextMsg.content) {
toolResults.push({
toolResult: {
toolUseId: nextMsg.toolCallId,
content: [
c.type === "image"
? { image: createImageBlock(c.mimeType, c.data) }
: { text: sanitizeSurrogates(c.text) },
],
status: nextMsg.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS,
},
});
}
toolResults.push({
toolResult: {
toolUseId: nextMsg.toolCallId,
content: nextMsg.content.map((c) =>
c.type === "image"
? { image: createImageBlock(c.mimeType, c.data) }
: { text: sanitizeSurrogates(c.text) },
),
status: nextMsg.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS,
},
});
j++;
}

View file

@ -98,6 +98,7 @@ export function getEnvApiKey(provider: any): string | undefined {
openrouter: "OPENROUTER_API_KEY",
zai: "ZAI_API_KEY",
mistral: "MISTRAL_API_KEY",
minimax: "MINIMAX_API_KEY",
opencode: "OPENCODE_API_KEY",
};

View file

@ -58,6 +58,7 @@ export type KnownProvider =
| "openrouter"
| "zai"
| "mistral"
| "minimax"
| "opencode";
export type Provider = KnownProvider | string;

View file

@ -17,6 +17,7 @@ import type { AssistantMessage } from "../types.js";
* - llama.cpp: "the request exceeds the available context size, try increasing it"
* - LM Studio: "tokens to keep from the initial prompt is greater than the context length"
* - GitHub Copilot: "prompt token count of X exceeds the limit of Y"
* - MiniMax: "invalid params, context window exceeds limit"
* - Cerebras: Returns "400 status code (no body)" - handled separately below
* - Mistral: Returns "400 status code (no body)" - handled separately below
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
@ -33,6 +34,7 @@ const OVERFLOW_PATTERNS = [
/exceeds the limit of \d+/i, // GitHub Copilot
/exceeds the available context size/i, // llama.cpp server
/greater than the context length/i, // LM Studio
/context window exceeds limit/i, // MiniMax
/context[_ ]length[_ ]exceeded/i, // Generic fallback
/too many tokens/i, // Generic fallback
/token limit exceeded/i, // Generic fallback