From 9f3eef65f83656d770f683edba6cfedc7d807539 Mon Sep 17 00:00:00 2001 From: Daniel Tatarkin Date: Mon, 26 Jan 2026 17:56:13 -0500 Subject: [PATCH] fix(ai): filter deprecated OpenCode models from generation (#970) Add status === 'deprecated' check for OpenCode Zen models, matching the existing pattern used for GitHub Copilot models. This removes deprecated models like glm-4.7-free and minimax-m2.1-free from the generated model catalog. --- packages/ai/scripts/generate-models.ts | 3 +- packages/ai/src/models.generated.ts | 55 +------------------------- 2 files changed, 4 insertions(+), 54 deletions(-) diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index f6e7af09..caa43423 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -482,8 +482,9 @@ async function loadModelsDevData(): Promise[]> { // - null/undefined/@ai-sdk/openai-compatible → openai-completions if (data.opencode?.models) { for (const [modelId, model] of Object.entries(data.opencode.models)) { - const m = model as ModelsDevModel; + const m = model as ModelsDevModel & { status?: string }; if (m.tool_call !== true) continue; + if (m.status === "deprecated") continue; const npm = m.provider?.npm; let api: Api; diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 4aa08418..777edd2d 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -4636,23 +4636,6 @@ export const MODELS = { contextWindow: 204800, maxTokens: 131072, } satisfies Model<"openai-completions">, - "glm-4.7-free": { - id: "glm-4.7-free", - name: "GLM-4.7", - api: "openai-completions", - provider: "opencode", - baseUrl: "https://opencode.ai/zen/v1", - reasoning: true, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 204800, - maxTokens: 131072, - } satisfies Model<"openai-completions">, "gpt-5": { id: "gpt-5", name: "GPT-5", @@ -4806,23 +4789,6 @@ export const MODELS = { contextWindow: 400000, maxTokens: 128000, } satisfies Model<"openai-responses">, - "grok-code": { - id: "grok-code", - name: "Grok Code Fast 1", - api: "openai-completions", - provider: "opencode", - baseUrl: "https://opencode.ai/zen/v1", - reasoning: true, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 256000, - maxTokens: 256000, - } satisfies Model<"openai-completions">, "kimi-k2": { id: "kimi-k2", name: "Kimi K2", @@ -4857,23 +4823,6 @@ export const MODELS = { contextWindow: 262144, maxTokens: 262144, } satisfies Model<"openai-completions">, - "minimax-m2.1-free": { - id: "minimax-m2.1-free", - name: "MiniMax M2.1", - api: "anthropic-messages", - provider: "opencode", - baseUrl: "https://opencode.ai/zen", - reasoning: true, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 204800, - maxTokens: 131072, - } satisfies Model<"anthropic-messages">, "qwen3-coder": { id: "qwen3-coder", name: "Qwen3 Coder", @@ -5074,8 +5023,8 @@ export const MODELS = { cost: { input: 0.7999999999999999, output: 4, - cacheRead: 0, - cacheWrite: 0, + cacheRead: 0.08, + cacheWrite: 1, }, contextWindow: 200000, maxTokens: 8192,