feat(ai,coding-agent): add OpenCode Go provider support closes #1757

This commit is contained in:
Mario Zechner 2026-03-03 16:02:29 +01:00
parent 1912f0336b
commit 42579dd923
11 changed files with 107 additions and 22 deletions

View file

@ -63,6 +63,8 @@ Unified LLM API with automatic model discovery, provider configuration, token an
- **Google Gemini CLI** (requires OAuth, see below) - **Google Gemini CLI** (requires OAuth, see below)
- **Antigravity** (requires OAuth, see below) - **Antigravity** (requires OAuth, see below)
- **Amazon Bedrock** - **Amazon Bedrock**
- **OpenCode Zen**
- **OpenCode Go**
- **Kimi For Coding** (Moonshot AI, uses Anthropic-compatible API) - **Kimi For Coding** (Moonshot AI, uses Anthropic-compatible API)
- **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc. - **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
@ -905,6 +907,7 @@ In Node.js environments, you can set environment variables to avoid passing API
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` | | Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
| zAI | `ZAI_API_KEY` | | zAI | `ZAI_API_KEY` |
| MiniMax | `MINIMAX_API_KEY` | | MiniMax | `MINIMAX_API_KEY` |
| OpenCode Zen / OpenCode Go | `OPENCODE_API_KEY` |
| Kimi For Coding | `KIMI_API_KEY` | | Kimi For Coding | `KIMI_API_KEY` |
| GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` | | GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` |

View file

@ -460,14 +460,21 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
} }
} }
// Process OpenCode Zen models // Process OpenCode models (Zen and Go)
// API mapping based on provider.npm field: // API mapping based on provider.npm field:
// - @ai-sdk/openai → openai-responses // - @ai-sdk/openai → openai-responses
// - @ai-sdk/anthropic → anthropic-messages // - @ai-sdk/anthropic → anthropic-messages
// - @ai-sdk/google → google-generative-ai // - @ai-sdk/google → google-generative-ai
// - null/undefined/@ai-sdk/openai-compatible → openai-completions // - null/undefined/@ai-sdk/openai-compatible → openai-completions
if (data.opencode?.models) { const opencodeVariants = [
for (const [modelId, model] of Object.entries(data.opencode.models)) { { key: "opencode", provider: "opencode", basePath: "https://opencode.ai/zen" },
{ key: "opencode-go", provider: "opencode-go", basePath: "https://opencode.ai/zen/go" },
] as const;
for (const variant of opencodeVariants) {
if (!data[variant.key]?.models) continue;
for (const [modelId, model] of Object.entries(data[variant.key].models)) {
const m = model as ModelsDevModel & { status?: string }; const m = model as ModelsDevModel & { status?: string };
if (m.tool_call !== true) continue; if (m.tool_call !== true) continue;
if (m.status === "deprecated") continue; if (m.status === "deprecated") continue;
@ -478,25 +485,25 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
if (npm === "@ai-sdk/openai") { if (npm === "@ai-sdk/openai") {
api = "openai-responses"; api = "openai-responses";
baseUrl = "https://opencode.ai/zen/v1"; baseUrl = `${variant.basePath}/v1`;
} else if (npm === "@ai-sdk/anthropic") { } else if (npm === "@ai-sdk/anthropic") {
api = "anthropic-messages"; api = "anthropic-messages";
// Anthropic SDK appends /v1/messages to baseURL // Anthropic SDK appends /v1/messages to baseURL
baseUrl = "https://opencode.ai/zen"; baseUrl = variant.basePath;
} else if (npm === "@ai-sdk/google") { } else if (npm === "@ai-sdk/google") {
api = "google-generative-ai"; api = "google-generative-ai";
baseUrl = "https://opencode.ai/zen/v1"; baseUrl = `${variant.basePath}/v1`;
} else { } else {
// null, undefined, or @ai-sdk/openai-compatible // null, undefined, or @ai-sdk/openai-compatible
api = "openai-completions"; api = "openai-completions";
baseUrl = "https://opencode.ai/zen/v1"; baseUrl = `${variant.basePath}/v1`;
} }
models.push({ models.push({
id: modelId, id: modelId,
name: m.name || modelId, name: m.name || modelId,
api, api,
provider: "opencode", provider: variant.provider,
baseUrl, baseUrl,
reasoning: m.reasoning === true, reasoning: m.reasoning === true,
input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"], input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"],
@ -657,11 +664,17 @@ async function generateModels() {
candidate.cost.cacheWrite = 6.25; candidate.cost.cacheWrite = 6.25;
candidate.contextWindow = 200000; candidate.contextWindow = 200000;
} }
if ((candidate.provider === "anthropic" || candidate.provider === "opencode") && candidate.id === "claude-opus-4-6") { if (
(candidate.provider === "anthropic" || candidate.provider === "opencode" || candidate.provider === "opencode-go") &&
candidate.id === "claude-opus-4-6"
) {
candidate.contextWindow = 200000; candidate.contextWindow = 200000;
} }
// opencode lists Claude Sonnet 4/4.5 with 1M context, actual limit is 200K // OpenCode variants list Claude Sonnet 4/4.5 with 1M context, actual limit is 200K
if (candidate.provider === "opencode" && (candidate.id === "claude-sonnet-4-5" || candidate.id === "claude-sonnet-4")) { if (
(candidate.provider === "opencode" || candidate.provider === "opencode-go") &&
(candidate.id === "claude-sonnet-4-5" || candidate.id === "claude-sonnet-4")
) {
candidate.contextWindow = 200000; candidate.contextWindow = 200000;
} }
} }

View file

@ -113,6 +113,7 @@ export function getEnvApiKey(provider: any): string | undefined {
"minimax-cn": "MINIMAX_CN_API_KEY", "minimax-cn": "MINIMAX_CN_API_KEY",
huggingface: "HF_TOKEN", huggingface: "HF_TOKEN",
opencode: "OPENCODE_API_KEY", opencode: "OPENCODE_API_KEY",
"opencode-go": "OPENCODE_API_KEY",
"kimi-coding": "KIMI_API_KEY", "kimi-coding": "KIMI_API_KEY",
}; };

View file

@ -6155,6 +6155,59 @@ export const MODELS = {
maxTokens: 131072, maxTokens: 131072,
} satisfies Model<"openai-completions">, } satisfies Model<"openai-completions">,
}, },
"opencode-go": {
"glm-5": {
id: "glm-5",
name: "GLM-5",
api: "openai-completions",
provider: "opencode-go",
baseUrl: "https://opencode.ai/zen/go/v1",
reasoning: true,
input: ["text"],
cost: {
input: 1,
output: 3.2,
cacheRead: 0.2,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"kimi-k2.5": {
id: "kimi-k2.5",
name: "Kimi K2.5",
api: "openai-completions",
provider: "opencode-go",
baseUrl: "https://opencode.ai/zen/go/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.6,
output: 3,
cacheRead: 0.1,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"minimax-m2.5": {
id: "minimax-m2.5",
name: "MiniMax M2.5",
api: "anthropic-messages",
provider: "opencode-go",
baseUrl: "https://opencode.ai/zen/go",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0.03,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
},
"openrouter": { "openrouter": {
"ai21/jamba-large-1.7": { "ai21/jamba-large-1.7": {
id: "ai21/jamba-large-1.7", id: "ai21/jamba-large-1.7",

View file

@ -37,6 +37,7 @@ export type KnownProvider =
| "minimax-cn" | "minimax-cn"
| "huggingface" | "huggingface"
| "opencode" | "opencode"
| "opencode-go"
| "kimi-coding"; | "kimi-coding";
export type Provider = KnownProvider | string; export type Provider = KnownProvider | string;

View file

@ -101,6 +101,9 @@ const PROVIDER_MODEL_PAIRS: ProviderModelPair[] = [
{ provider: "opencode", model: "glm-4.7-free", label: "zen-glm-4.7-free" }, { provider: "opencode", model: "glm-4.7-free", label: "zen-glm-4.7-free" },
{ provider: "opencode", model: "gpt-5.2-codex", label: "zen-gpt-5.2-codex" }, { provider: "opencode", model: "gpt-5.2-codex", label: "zen-gpt-5.2-codex" },
{ provider: "opencode", model: "minimax-m2.1-free", label: "zen-minimax-m2.1-free" }, { provider: "opencode", model: "minimax-m2.1-free", label: "zen-minimax-m2.1-free" },
// OpenCode Go
{ provider: "opencode-go", model: "kimi-k2.5", label: "go-kimi-k2.5" },
{ provider: "opencode-go", model: "minimax-m2.5", label: "go-minimax-m2.5" },
]; ];
// Cached context structure // Cached context structure

View file

@ -3,17 +3,23 @@ import { MODELS } from "../src/models.generated.js";
import { complete } from "../src/stream.js"; import { complete } from "../src/stream.js";
import type { Model } from "../src/types.js"; import type { Model } from "../src/types.js";
describe.skipIf(!process.env.OPENCODE_API_KEY)("OpenCode Zen Models Smoke Test", () => { describe.skipIf(!process.env.OPENCODE_API_KEY)("OpenCode Models Smoke Test", () => {
const zenModels = Object.values(MODELS.opencode); const providers = [
{ key: "opencode", label: "OpenCode Zen" },
{ key: "opencode-go", label: "OpenCode Go" },
] as const;
zenModels.forEach((model) => { providers.forEach(({ key, label }) => {
it(`${model.id}`, async () => { const providerModels = Object.values(MODELS[key]);
const response = await complete(model as Model<any>, { providerModels.forEach((model) => {
messages: [{ role: "user", content: "Say hello.", timestamp: Date.now() }], it(`${label}: ${model.id}`, async () => {
}); const response = await complete(model as Model<any>, {
messages: [{ role: "user", content: "Say hello.", timestamp: Date.now() }],
});
expect(response.content).toBeTruthy(); expect(response.content).toBeTruthy();
expect(response.stopReason).toBe("stop"); expect(response.stopReason).toBe("stop");
}, 60000); }, 60000);
});
}); });
}); });

View file

@ -98,6 +98,7 @@ For each built-in provider, pi maintains a list of tool-capable models, updated
- Vercel AI Gateway - Vercel AI Gateway
- ZAI - ZAI
- OpenCode Zen - OpenCode Zen
- OpenCode Go
- Hugging Face - Hugging Face
- Kimi For Coding - Kimi For Coding
- MiniMax - MiniMax

View file

@ -65,6 +65,7 @@ pi
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` | `vercel-ai-gateway` | | Vercel AI Gateway | `AI_GATEWAY_API_KEY` | `vercel-ai-gateway` |
| ZAI | `ZAI_API_KEY` | `zai` | | ZAI | `ZAI_API_KEY` | `zai` |
| OpenCode Zen | `OPENCODE_API_KEY` | `opencode` | | OpenCode Zen | `OPENCODE_API_KEY` | `opencode` |
| OpenCode Go | `OPENCODE_API_KEY` | `opencode-go` |
| Hugging Face | `HF_TOKEN` | `huggingface` | | Hugging Face | `HF_TOKEN` | `huggingface` |
| Kimi For Coding | `KIMI_API_KEY` | `kimi-coding` | | Kimi For Coding | `KIMI_API_KEY` | `kimi-coding` |
| MiniMax | `MINIMAX_API_KEY` | `minimax` | | MiniMax | `MINIMAX_API_KEY` | `minimax` |
@ -81,7 +82,8 @@ Store credentials in `~/.pi/agent/auth.json`:
"anthropic": { "type": "api_key", "key": "sk-ant-..." }, "anthropic": { "type": "api_key", "key": "sk-ant-..." },
"openai": { "type": "api_key", "key": "sk-..." }, "openai": { "type": "api_key", "key": "sk-..." },
"google": { "type": "api_key", "key": "..." }, "google": { "type": "api_key", "key": "..." },
"opencode": { "type": "api_key", "key": "..." } "opencode": { "type": "api_key", "key": "..." },
"opencode-go": { "type": "api_key", "key": "..." }
} }
``` ```

View file

@ -291,6 +291,7 @@ ${chalk.bold("Environment Variables:")}
ZAI_API_KEY - ZAI API key ZAI_API_KEY - ZAI API key
MISTRAL_API_KEY - Mistral API key MISTRAL_API_KEY - Mistral API key
MINIMAX_API_KEY - MiniMax API key MINIMAX_API_KEY - MiniMax API key
OPENCODE_API_KEY - OpenCode Zen/OpenCode Go API key
KIMI_API_KEY - Kimi For Coding API key KIMI_API_KEY - Kimi For Coding API key
AWS_PROFILE - AWS profile for Amazon Bedrock AWS_PROFILE - AWS profile for Amazon Bedrock
AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock

View file

@ -33,6 +33,7 @@ export const defaultModelPerProvider: Record<KnownProvider, string> = {
"minimax-cn": "MiniMax-M2.1", "minimax-cn": "MiniMax-M2.1",
huggingface: "moonshotai/Kimi-K2.5", huggingface: "moonshotai/Kimi-K2.5",
opencode: "claude-opus-4-6", opencode: "claude-opus-4-6",
"opencode-go": "kimi-k2.5",
"kimi-coding": "kimi-k2-thinking", "kimi-coding": "kimi-k2-thinking",
}; };