mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-22 02:03:42 +00:00
Add MiniMax provider support (#656 by @dannote)
- Add minimax to KnownProvider and Api types - Add MINIMAX_API_KEY to getEnvApiKey() - Generate MiniMax-M2 and MiniMax-M2.1 models - Add context overflow detection pattern - Add tests to all required test files - Update README and CHANGELOG with attribution Also fixes: - Bedrock duplicate toolResult ID when content has multiple blocks - Sandbox extension unused parameter lint warning
This commit is contained in:
parent
edc576024f
commit
8af8d0d672
20 changed files with 233 additions and 31 deletions
|
|
@ -46,7 +46,8 @@ async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: Op
|
|||
expect(msg.stopReason).toBe("aborted");
|
||||
|
||||
// OpenAI providers, OpenAI Codex, Gemini CLI, zai, Amazon Bedrock, and the GPT-OSS model on Antigravity only send usage in the final chunk,
|
||||
// so when aborted they have no token stats Anthropic and Google send usage information early in the stream
|
||||
// so when aborted they have no token stats. Anthropic and Google send usage information early in the stream.
|
||||
// MiniMax reports input tokens but not output tokens when aborted.
|
||||
if (
|
||||
llm.api === "openai-completions" ||
|
||||
llm.api === "openai-responses" ||
|
||||
|
|
@ -58,6 +59,10 @@ async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: Op
|
|||
) {
|
||||
expect(msg.usage.input).toBe(0);
|
||||
expect(msg.usage.output).toBe(0);
|
||||
} else if (llm.provider === "minimax") {
|
||||
// MiniMax reports input tokens early but output tokens only in final chunk
|
||||
expect(msg.usage.input).toBeGreaterThan(0);
|
||||
expect(msg.usage.output).toBe(0);
|
||||
} else {
|
||||
expect(msg.usage.input).toBeGreaterThan(0);
|
||||
expect(msg.usage.output).toBeGreaterThan(0);
|
||||
|
|
@ -146,6 +151,14 @@ describe("Token Statistics on Abort", () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe.skipIf(!process.env.MINIMAX_API_KEY)("MiniMax Provider", () => {
|
||||
const llm = getModel("minimax", "MiniMax-M2.1");
|
||||
|
||||
it("should include token stats when aborted mid-stream", { retry: 3, timeout: 30000 }, async () => {
|
||||
await testTokensOnAbort(llm);
|
||||
});
|
||||
});
|
||||
|
||||
// =========================================================================
|
||||
// OAuth-based providers (credentials from ~/.pi/agent/oauth.json)
|
||||
// =========================================================================
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue