chore(models): add MiniMax M2.5 entries across providers

This commit is contained in:
Mario Zechner 2026-02-13 21:57:21 +01:00
parent bd040072d3
commit 130c23e6c7
3 changed files with 98 additions and 5 deletions

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added MiniMax M2.5 model entries for `minimax`, `minimax-cn`, `openrouter`, and `vercel-ai-gateway` providers, plus `minimax-m2.5-free` for `opencode`.
## [0.52.10] - 2026-02-12
### Added

View file

@ -1263,6 +1263,40 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 64000,
} satisfies Model<"bedrock-converse-stream">,
"writer.palmyra-x4-v1:0": {
id: "writer.palmyra-x4-v1:0",
name: "Palmyra X4",
api: "bedrock-converse-stream",
provider: "amazon-bedrock",
baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
reasoning: true,
input: ["text"],
cost: {
input: 2.5,
output: 10,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 122880,
maxTokens: 8192,
} satisfies Model<"bedrock-converse-stream">,
"writer.palmyra-x5-v1:0": {
id: "writer.palmyra-x5-v1:0",
name: "Palmyra X5",
api: "bedrock-converse-stream",
provider: "amazon-bedrock",
baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 6,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1040000,
maxTokens: 8192,
} satisfies Model<"bedrock-converse-stream">,
"zai.glm-4.7": {
id: "zai.glm-4.7",
name: "GLM-4.7",
@ -4064,6 +4098,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"MiniMax-M2.5": {
id: "MiniMax-M2.5",
name: "MiniMax-M2.5",
api: "anthropic-messages",
provider: "minimax",
baseUrl: "https://api.minimax.io/anthropic",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0.03,
cacheWrite: 0.375,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
},
"minimax-cn": {
"MiniMax-M2": {
@ -4100,6 +4151,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"MiniMax-M2.5": {
id: "MiniMax-M2.5",
name: "MiniMax-M2.5",
api: "anthropic-messages",
provider: "minimax-cn",
baseUrl: "https://api.minimaxi.com/anthropic",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0.03,
cacheWrite: 0.375,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
},
"mistral": {
"codestral-latest": {
@ -8658,13 +8726,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.11,
output: 0.6,
cacheRead: 0.055,
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-30b-a3b": {
id: "qwen/qwen3-30b-a3b",
@ -8734,6 +8802,23 @@ export const MODELS = {
contextWindow: 40960,
maxTokens: 40960,
} satisfies Model<"openai-completions">,
"qwen/qwen3-4b": {
id: "qwen/qwen3-4b",
name: "Qwen: Qwen3 4B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.0715,
output: 0.273,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"qwen/qwen3-4b:free": {
id: "qwen/qwen3-4b:free",
name: "Qwen: Qwen3 4B (free)",

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added MiniMax M2.5 model entries for `minimax`, `minimax-cn`, `openrouter`, and `vercel-ai-gateway` providers, plus `minimax-m2.5-free` for `opencode`.
## [0.52.10] - 2026-02-12
### New Features