Add OpenCode Zen provider support

This commit is contained in:
Mario Zechner 2026-01-09 06:58:20 +01:00
parent 98b25baf4d
commit 97d0189eae
8 changed files with 529 additions and 1 deletions

View file

@ -2877,6 +2877,450 @@ export const MODELS = {
maxTokens: 128000,
} satisfies Model<"openai-codex-responses">,
},
"opencode": {
"alpha-gd4": {
id: "alpha-gd4",
name: "Alpha GD4",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text"],
cost: {
input: 0.5,
output: 2,
cacheRead: 0.15,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
"alpha-glm-4.7": {
id: "alpha-glm-4.7",
name: "Alpha GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.6,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"big-pickle": {
id: "big-pickle",
name: "Big Pickle",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 200000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"claude-3-5-haiku": {
id: "claude-3-5-haiku",
name: "Claude Haiku 3.5",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.8,
output: 4,
cacheRead: 0.08,
cacheWrite: 1,
},
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"anthropic-messages">,
"claude-haiku-4-5": {
id: "claude-haiku-4-5",
name: "Claude Haiku 4.5",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1,
output: 5,
cacheRead: 0.1,
cacheWrite: 1.25,
},
contextWindow: 200000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"claude-opus-4-1": {
id: "claude-opus-4-1",
name: "Claude Opus 4.1",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text", "image"],
cost: {
input: 15,
output: 75,
cacheRead: 1.5,
cacheWrite: 18.75,
},
contextWindow: 200000,
maxTokens: 32000,
} satisfies Model<"anthropic-messages">,
"claude-opus-4-5": {
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text", "image"],
cost: {
input: 5,
output: 25,
cacheRead: 0.5,
cacheWrite: 6.25,
},
contextWindow: 200000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"claude-sonnet-4": {
id: "claude-sonnet-4",
name: "Claude Sonnet 4",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text", "image"],
cost: {
input: 3,
output: 15,
cacheRead: 0.3,
cacheWrite: 3.75,
},
contextWindow: 1000000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"claude-sonnet-4-5": {
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text", "image"],
cost: {
input: 3,
output: 15,
cacheRead: 0.3,
cacheWrite: 3.75,
},
contextWindow: 1000000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"gemini-3-flash": {
id: "gemini-3-flash",
name: "Gemini 3 Flash",
api: "google-generative-ai",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.5,
output: 3,
cacheRead: 0.05,
cacheWrite: 0,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"google-generative-ai">,
"gemini-3-pro": {
id: "gemini-3-pro",
name: "Gemini 3 Pro",
api: "google-generative-ai",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2,
output: 12,
cacheRead: 0.2,
cacheWrite: 0,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"google-generative-ai">,
"glm-4.6": {
id: "glm-4.6",
name: "GLM-4.6",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.1,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7-free": {
id: "glm-4.7-free",
name: "GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"gpt-5": {
id: "gpt-5",
name: "GPT-5",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.07,
output: 8.5,
cacheRead: 0.107,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5-codex": {
id: "gpt-5-codex",
name: "GPT-5 Codex",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.07,
output: 8.5,
cacheRead: 0.107,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5-nano": {
id: "gpt-5-nano",
name: "GPT-5 Nano",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.1": {
id: "gpt-5.1",
name: "GPT-5.1",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.07,
output: 8.5,
cacheRead: 0.107,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.1-codex": {
id: "gpt-5.1-codex",
name: "GPT-5.1 Codex",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.07,
output: 8.5,
cacheRead: 0.107,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.1-codex-max": {
id: "gpt-5.1-codex-max",
name: "GPT-5.1 Codex Max",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.1-codex-mini": {
id: "gpt-5.1-codex-mini",
name: "GPT-5.1 Codex Mini",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.25,
output: 2,
cacheRead: 0.025,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"gpt-5.2": {
id: "gpt-5.2",
name: "GPT-5.2",
api: "openai-responses",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-responses">,
"grok-code": {
id: "grok-code",
name: "Grok Code Fast 1",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 256000,
maxTokens: 256000,
} satisfies Model<"openai-completions">,
"kimi-k2": {
id: "kimi-k2",
name: "Kimi K2",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.4,
output: 2.5,
cacheRead: 0.4,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"kimi-k2-thinking": {
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.4,
output: 2.5,
cacheRead: 0.4,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"minimax-m2.1-free": {
id: "minimax-m2.1-free",
name: "MiniMax M2.1",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"qwen3-coder": {
id: "qwen3-coder",
name: "Qwen3 Coder",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.45,
output: 1.8,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
},
"openrouter": {
"ai21/jamba-large-1.7": {
id: "ai21/jamba-large-1.7",