feat(ai): add Kimi For Coding provider support

- Add kimi-coding provider using Anthropic Messages API
- API endpoint: https://api.kimi.com/coding/v1
- Environment variable: KIMI_API_KEY
- Models: kimi-k2-thinking (text), k2p5 (text + image)
- Add context overflow detection pattern for Kimi errors
- Add tests for all standard test suites
This commit is contained in:
Mario Zechner 2026-01-29 04:12:28 +01:00
parent d1e33599f6
commit 87ab5c5c3b
22 changed files with 262 additions and 1 deletions

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added Kimi For Coding provider support (Moonshot AI's Anthropic-compatible coding API)
## [0.50.2] - 2026-01-29
### Added

View file

@ -63,6 +63,7 @@ Unified LLM API with automatic model discovery, provider configuration, token an
- **Google Gemini CLI** (requires OAuth, see below)
- **Antigravity** (requires OAuth, see below)
- **Amazon Bedrock**
- **Kimi For Coding** (Moonshot AI, uses Anthropic-compatible API)
- **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
## Installation
@ -894,6 +895,7 @@ In Node.js environments, you can set environment variables to avoid passing API
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
| zAI | `ZAI_API_KEY` |
| MiniMax | `MINIMAX_API_KEY` |
| Kimi For Coding | `KIMI_API_KEY` |
| GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` |
When set, the library automatically uses these keys:

View file

@ -630,6 +630,33 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
}
}
// Process Kimi For Coding models
if (data["kimi-for-coding"]?.models) {
for (const [modelId, model] of Object.entries(data["kimi-for-coding"].models)) {
const m = model as ModelsDevModel;
if (m.tool_call !== true) continue;
models.push({
id: modelId,
name: m.name || modelId,
api: "anthropic-messages",
provider: "kimi-coding",
// Kimi For Coding's Anthropic-compatible API - SDK appends /v1/messages
baseUrl: "https://api.kimi.com/coding",
reasoning: m.reasoning === true,
input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"],
cost: {
input: m.cost?.input || 0,
output: m.cost?.output || 0,
cacheRead: m.cost?.cache_read || 0,
cacheWrite: m.cost?.cache_write || 0,
},
contextWindow: m.limit?.context || 4096,
maxTokens: m.limit?.output || 4096,
});
}
}
console.log(`Loaded ${models.length} tool-capable models from models.dev`);
return models;
} catch (error) {
@ -1130,6 +1157,42 @@ async function generateModels() {
];
allModels.push(...vertexModels);
// Kimi For Coding models (Moonshot AI's Anthropic-compatible coding API)
// Static fallback in case models.dev doesn't have them yet
const KIMI_CODING_BASE_URL = "https://api.kimi.com/coding";
const kimiCodingModels: Model<"anthropic-messages">[] = [
{
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
api: "anthropic-messages",
provider: "kimi-coding",
baseUrl: KIMI_CODING_BASE_URL,
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 32768,
},
{
id: "k2p5",
name: "Kimi K2.5",
api: "anthropic-messages",
provider: "kimi-coding",
baseUrl: KIMI_CODING_BASE_URL,
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 32768,
},
];
// Only add if not already present from models.dev
for (const model of kimiCodingModels) {
if (!allModels.some(m => m.provider === "kimi-coding" && m.id === model.id)) {
allModels.push(model);
}
}
const azureOpenAiModels: Model<Api>[] = allModels
.filter((model) => model.provider === "openai" && model.api === "openai-responses")
.map((model) => ({

View file

@ -107,6 +107,7 @@ export function getEnvApiKey(provider: any): string | undefined {
"minimax-cn": "MINIMAX_CN_API_KEY",
huggingface: "HF_TOKEN",
opencode: "OPENCODE_API_KEY",
"kimi-coding": "KIMI_API_KEY",
};
const envVar = envMap[provider];

View file

@ -3536,6 +3536,42 @@ export const MODELS = {
maxTokens: 128000,
} satisfies Model<"openai-completions">,
},
"kimi-coding": {
"k2p5": {
id: "k2p5",
name: "Kimi K2.5",
api: "anthropic-messages",
provider: "kimi-coding",
baseUrl: "https://api.kimi.com/coding",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
"kimi-k2-thinking": {
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
api: "anthropic-messages",
provider: "kimi-coding",
baseUrl: "https://api.kimi.com/coding",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
},
"minimax": {
"MiniMax-M2": {
id: "MiniMax-M2",

View file

@ -36,7 +36,8 @@ export type KnownProvider =
| "minimax"
| "minimax-cn"
| "huggingface"
| "opencode";
| "opencode"
| "kimi-coding";
export type Provider = KnownProvider | string;
export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh";

View file

@ -18,6 +18,7 @@ import type { AssistantMessage } from "../types.js";
* - LM Studio: "tokens to keep from the initial prompt is greater than the context length"
* - GitHub Copilot: "prompt token count of X exceeds the limit of Y"
* - MiniMax: "invalid params, context window exceeds limit"
* - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)"
* - Cerebras: Returns "400/413 status code (no body)" - handled separately below
* - Mistral: Returns "400/413 status code (no body)" - handled separately below
* - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow
@ -35,6 +36,7 @@ const OVERFLOW_PATTERNS = [
/exceeds the available context size/i, // llama.cpp server
/greater than the context length/i, // LM Studio
/context window exceeds limit/i, // MiniMax
/exceeded model token limit/i, // Kimi For Coding
/context[_ ]length[_ ]exceeded/i, // Generic fallback
/too many tokens/i, // Generic fallback
/token limit exceeded/i, // Generic fallback
@ -62,6 +64,7 @@ const OVERFLOW_PATTERNS = [
* - OpenRouter (all backends): "maximum context length is X tokens"
* - llama.cpp: "exceeds the available context size"
* - LM Studio: "greater than the context length"
* - Kimi For Coding: "exceeded model token limit: X (requested: Y)"
*
* **Unreliable detection:**
* - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow),

View file

@ -193,6 +193,18 @@ describe("AI Providers Abort Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Abort", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should abort mid-stream", { retry: 3 }, async () => {
await testAbortSignal(llm);
});
it("should handle immediate abort", { retry: 3 }, async () => {
await testImmediateAbort(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Abort", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -443,6 +443,21 @@ describe("Context overflow error handling", () => {
}, 120000);
});
// =============================================================================
// Kimi For Coding
// =============================================================================
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding", () => {
it("kimi-k2-thinking - should detect overflow via isContextOverflow", async () => {
const model = getModel("kimi-coding", "kimi-k2-thinking");
const result = await testContextOverflow(model, process.env.KIMI_API_KEY!);
logResult(result);
expect(result.stopReason).toBe("error");
expect(isContextOverflow(result.response, model.contextWindow)).toBe(true);
}, 120000);
});
// =============================================================================
// Vercel AI Gateway - Unified API for multiple providers
// =============================================================================

View file

@ -88,6 +88,8 @@ const PROVIDER_MODEL_PAIRS: ProviderModelPair[] = [
{ provider: "groq", model: "openai/gpt-oss-120b", label: "groq-gpt-oss-120b" },
// Hugging Face
{ provider: "huggingface", model: "moonshotai/Kimi-K2.5", label: "huggingface-kimi-k2.5" },
// Kimi For Coding
{ provider: "kimi-coding", model: "kimi-k2-thinking", label: "kimi-coding-k2-thinking" },
// Mistral
{ provider: "mistral", model: "devstral-medium-latest", label: "mistral-devstral-medium" },
// MiniMax

View file

@ -388,6 +388,26 @@ describe("AI Providers Empty Message Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Empty Messages", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should handle empty content array", { retry: 3, timeout: 30000 }, async () => {
await testEmptyMessage(llm);
});
it("should handle empty string content", { retry: 3, timeout: 30000 }, async () => {
await testEmptyStringMessage(llm);
});
it("should handle whitespace-only content", { retry: 3, timeout: 30000 }, async () => {
await testWhitespaceOnlyMessage(llm);
});
it("should handle empty assistant message in conversation", { retry: 3, timeout: 30000 }, async () => {
await testEmptyAssistantMessage(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Empty Messages", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -300,6 +300,18 @@ describe("Tool Results with Images", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider (k2p5)", () => {
const llm = getModel("kimi-coding", "k2p5");
it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => {
await handleToolWithImageResult(llm);
});
it("should handle tool result with text and image", { retry: 3, timeout: 30000 }, async () => {
await handleToolWithTextAndImageResult(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider (google/gemini-2.5-flash)", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -862,6 +862,33 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)(
"Kimi For Coding Provider (kimi-k2-thinking via Anthropic Messages)",
() => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
it("should handle thinking mode", { retry: 3 }, async () => {
await handleThinking(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => {
await multiTurn(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 });
});
},
);
// =========================================================================
// OAuth-based providers (credentials from ~/.pi/agent/oauth.json)
// Tokens are resolved at module level (see oauthTokens above)

View file

@ -186,6 +186,14 @@ describe("Token Statistics on Abort", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should include token stats when aborted mid-stream", { retry: 3, timeout: 30000 }, async () => {
await testTokensOnAbort(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -200,6 +200,14 @@ describe("Tool Call Without Result Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider", () => {
const model = getModel("kimi-coding", "kimi-k2-thinking");
it("should filter out tool calls without corresponding tool results", { retry: 3, timeout: 30000 }, async () => {
await testToolCallWithoutResult(model);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider", () => {
const model = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -394,6 +394,29 @@ describe("totalTokens field", () => {
);
});
// =========================================================================
// Kimi For Coding
// =========================================================================
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding", () => {
it(
"kimi-k2-thinking - should return totalTokens equal to sum of components",
{ retry: 3, timeout: 60000 },
async () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
console.log(`\nKimi For Coding / ${llm.id}:`);
const { first, second } = await testTotalTokensWithCache(llm, { apiKey: process.env.KIMI_API_KEY });
logUsage("First request", first);
logUsage("Second request", second);
assertTotalTokensEqualsComponents(first);
assertTotalTokensEqualsComponents(second);
},
);
});
// =========================================================================
// Vercel AI Gateway
// =========================================================================

View file

@ -675,6 +675,22 @@ describe("AI Providers Unicode Surrogate Pair Tests", () => {
});
});
describe.skipIf(!process.env.KIMI_API_KEY)("Kimi For Coding Provider Unicode Handling", () => {
const llm = getModel("kimi-coding", "kimi-k2-thinking");
it("should handle emoji in tool results", { retry: 3, timeout: 30000 }, async () => {
await testEmojiInToolResults(llm);
});
it("should handle real-world LinkedIn comment data with emoji", { retry: 3, timeout: 30000 }, async () => {
await testRealWorldLinkedInData(llm);
});
it("should handle unpaired high surrogate (0xD83D) in tool results", { retry: 3, timeout: 30000 }, async () => {
await testUnpairedHighSurrogate(llm);
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)("Vercel AI Gateway Provider Unicode Handling", () => {
const llm = getModel("vercel-ai-gateway", "google/gemini-2.5-flash");

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added Kimi For Coding provider support (Moonshot AI's Anthropic-compatible coding API). Set `KIMI_API_KEY` environment variable. See [README.md#kimi-for-coding](README.md#kimi-for-coding).
## [0.50.2] - 2026-01-29
### New Features

View file

@ -94,6 +94,7 @@ For each built-in provider, pi maintains a list of tool-capable models, updated
- ZAI
- OpenCode Zen
- Hugging Face
- Kimi For Coding
- MiniMax
See [docs/providers.md](docs/providers.md) for detailed setup instructions.

View file

@ -63,6 +63,7 @@ pi
| ZAI | `ZAI_API_KEY` |
| OpenCode Zen | `OPENCODE_API_KEY` |
| Hugging Face | `HF_TOKEN` |
| Kimi For Coding | `KIMI_API_KEY` |
| MiniMax | `MINIMAX_API_KEY` |
| MiniMax (China) | `MINIMAX_CN_API_KEY` |

View file

@ -280,6 +280,7 @@ ${chalk.bold("Environment Variables:")}
ZAI_API_KEY - ZAI API key
MISTRAL_API_KEY - Mistral API key
MINIMAX_API_KEY - MiniMax API key
KIMI_API_KEY - Kimi For Coding API key
AWS_PROFILE - AWS profile for Amazon Bedrock
AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock
AWS_SECRET_ACCESS_KEY - AWS secret key for Amazon Bedrock

View file

@ -33,6 +33,7 @@ export const defaultModelPerProvider: Record<KnownProvider, string> = {
"minimax-cn": "MiniMax-M2.1",
huggingface: "moonshotai/Kimi-K2.5",
opencode: "claude-opus-4-5",
"kimi-coding": "kimi-k2-thinking",
};
export interface ScopedModel {