feat(ai): add Kimi For Coding provider support

- Add kimi-coding provider using Anthropic Messages API
- API endpoint: https://api.kimi.com/coding/v1
- Environment variable: KIMI_API_KEY
- Models: kimi-k2-thinking (text), k2p5 (text + image)
- Add context overflow detection pattern for Kimi errors
- Add tests for all standard test suites
This commit is contained in:
Mario Zechner 2026-01-29 04:12:28 +01:00
parent d1e33599f6
commit 87ab5c5c3b
22 changed files with 262 additions and 1 deletions

View file

@ -193,6 +193,18 @@ describe("AI Providers Abort Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Abort", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm);
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Abort", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -443,6 +443,21 @@ describe("Context overflow error handling", () => {
}, 120000);
});
// =============================================================================
// Kimi For Coding
// =============================================================================
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding", () => {
it("kimi-k2-thinking - should detect overflow via isContextOverflow", async () => {
const model = getModel("kimi-coding", "kimi-k2-thinking");
const result = await testContextOverflow(model, process.env.KIMI_API_KEY!);
logResult(result);
expect(result.stopReason).toBe("error");
expect(isContextOverflow(result.response, model.contextWindow)).toBe(true);
}, 120000);
});
// =============================================================================
// Vercel AI Gateway - Unified API for multiple providers
// =============================================================================

View file

@ -88,6 +88,8 @@ const PROVIDER_MODEL_PAIRS: ProviderModelPair[] = [
{ provider: "groq", model: "openai/gpt-oss-120b", label: "groq-gpt-oss-120b" },
// Hugging Face
{ provider: "huggingface", model: "moonshotai/Kimi-K2.5", label: "huggingface-kimi-k2.5" },
// Kimi For Coding
{ provider: "kimi-coding", model: "kimi-k2-thinking", label: "kimi-coding-k2-thinking" },
// Mistral
{ provider: "mistral", model: "devstral-medium-latest", label: "mistral-devstral-medium" },
// MiniMax

View file

@ -388,6 +388,26 @@ describe("AI Providers Empty Message Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Empty Messages", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should handle empty content array", { retry: 3, timeout: 30000 }, async () => {
await testEmptyMessage(llm);
});
it("should handle empty string content", { retry: 3, timeout: 30000 }, async () => {
await testEmptyStringMessage(llm);
});
it("should handle whitespace-only content", { retry: 3, timeout: 30000 }, async () => {
await testWhitespaceOnlyMessage(llm);
});
it("should handle empty assistant message in conversation", { retry: 3, timeout: 30000 }, async () => {
await testEmptyAssistantMessage(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Empty Messages", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -300,6 +300,18 @@ describe("Tool Results with Images", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider (k2p5)", () => {
const llm = getModel("kimi-coding", "k2p5");
it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => {
await handleToolWithImageResult(llm);
});
it("should handle tool result with text and image", { retry: 3, timeout: 30000 }, async () => {
await handleToolWithTextAndImageResult(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider (google/gemini-2.5-flash)", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -862,6 +862,33 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)(
"Kimi For Coding Provider (kimi-k2-thinking via Anthropic Messages)",
() => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle thinking mode", { retry: 3 }, async () => {
await handleThinking(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
},
);
// =========================================================================
// OAuth-based providers (credentials from ~/.pi/agent/oauth.json)
// Tokens are resolved at module level (see oauthTokens above)

View file

@ -186,6 +186,14 @@ describe("Token Statistics on Abort", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should include token stats when aborted mid-stream", { retry: 3, timeout: 30000 }, async () => {
await testTokensOnAbort(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -200,6 +200,14 @@ describe("Tool Call Without Result Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider", () => {
const model = getModel("kimi-coding", "kimi-k2-thinking");
it("should filter out tool calls without corresponding tool results", { retry: 3, timeout: 30000 }, async () => {
await testToolCallWithoutResult(model);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider", () => {
const model = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -394,6 +394,29 @@ describe("totalTokens field", () => {
);
});
// =========================================================================
// Kimi For Coding
// =========================================================================
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding", () => {
it(
"kimi-k2-thinking - should return totalTokens equal to sum of components",
{ retry: 3, timeout: 60000 },
async () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
console.log(`\nKimi For Coding / ${llm.id}:`);
const { first, second } = await testTotalTokensWithCache(llm, { apiKey: process.env.KIMI_API_KEY });
logUsage("First request", first);
logUsage("Second request", second);
assertTotalTokensEqualsComponents(first);
assertTotalTokensEqualsComponents(second);
},
);
});
// =========================================================================
// Vercel AI Gateway
// =========================================================================

View file

@ -675,6 +675,22 @@ describe("AI Providers Unicode Surrogate Pair Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Unicode Handling", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should handle emoji in tool results", { retry: 3, timeout: 30000 }, async () => {
await testEmojiInToolResults(llm);
});
it("should handle real-world LinkedIn comment data with emoji", { retry: 3, timeout: 30000 }, async () => {
await testRealWorldLinkedInData(llm);
});
it("should handle unpaired high surrogate (0xD83D) in tool results", { retry: 3, timeout: 30000 }, async () => {
await testUnpairedHighSurrogate(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Unicode Handling", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");