diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index 8d3c86fd..dd51a992 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -260,28 +260,34 @@ async function loadModelsDevData(): Promise[]> { // Process xAi models if (data.zai?.models) { - for (const [modelId, model] of Object.entries(data.zai.models)) { - const m = model as ModelsDevModel; - if (m.tool_call !== true) continue; + for (const [modelId, model] of Object.entries(data.zai.models)) { + const m = model as ModelsDevModel; + if (m.tool_call !== true) continue; + const supportsImage = m.modalities?.input?.includes("image") - models.push({ - id: modelId, - name: m.name || modelId, - api: "anthropic-messages", - provider: "zai", - baseUrl: "https://api.z.ai/api/anthropic", - reasoning: m.reasoning === true, - input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"], - cost: { - input: m.cost?.input || 0, - output: m.cost?.output || 0, - cacheRead: m.cost?.cache_read || 0, - cacheWrite: m.cost?.cache_write || 0, - }, - contextWindow: m.limit?.context || 4096, - maxTokens: m.limit?.output || 4096, - }); - } + models.push({ + id: modelId, + name: m.name || modelId, + api: supportsImage ? "openai-completions" : "anthropic-messages", + provider: "zai", + baseUrl: supportsImage ? "https://api.z.ai/api/coding/paas/v4" : "https://api.z.ai/api/anthropic", + reasoning: m.reasoning === true, + input: supportsImage ? ["text", "image"] : ["text"], + cost: { + input: m.cost?.input || 0, + output: m.cost?.output || 0, + cacheRead: m.cost?.cache_read || 0, + cacheWrite: m.cost?.cache_write || 0, + }, + ...(supportsImage ? { + compat: { + supportsDeveloperRole: false, + }, + } : {}), + contextWindow: m.limit?.context || 4096, + maxTokens: m.limit?.output || 4096, + }); + } } // Process Mistral models diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 579eab84..4fd67bf1 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -7029,9 +7029,10 @@ export const MODELS = { "glm-4.5v": { id: "glm-4.5v", name: "GLM-4.5V", - api: "anthropic-messages", + api: "openai-completions", provider: "zai", - baseUrl: "https://api.z.ai/api/anthropic", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false}, reasoning: true, input: ["text", "image"], cost: { @@ -7042,7 +7043,7 @@ export const MODELS = { }, contextWindow: 64000, maxTokens: 16384, - } satisfies Model<"anthropic-messages">, + } satisfies Model<"openai-completions">, "glm-4.6": { id: "glm-4.6", name: "GLM-4.6", @@ -7063,9 +7064,10 @@ export const MODELS = { "glm-4.6v": { id: "glm-4.6v", name: "GLM-4.6V", - api: "anthropic-messages", + api: "openai-completions", provider: "zai", - baseUrl: "https://api.z.ai/api/anthropic", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false}, reasoning: true, input: ["text", "image"], cost: { @@ -7076,7 +7078,7 @@ export const MODELS = { }, contextWindow: 128000, maxTokens: 32768, - } satisfies Model<"anthropic-messages">, + } satisfies Model<"openai-completions">, "glm-4.7": { id: "glm-4.7", name: "GLM-4.7", diff --git a/packages/ai/src/providers/openai-completions.ts b/packages/ai/src/providers/openai-completions.ts index b8bc98eb..e2d74cb3 100644 --- a/packages/ai/src/providers/openai-completions.ts +++ b/packages/ai/src/providers/openai-completions.ts @@ -474,10 +474,14 @@ function convertMessages( // Handle thinking blocks const thinkingBlocks = msg.content.filter((b) => b.type === "thinking") as ThinkingContent[]; - if (thinkingBlocks.length > 0) { + // Filter out empty thinking blocks to avoid API validation errors + const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0); + if (nonEmptyThinkingBlocks.length > 0) { if (compat.requiresThinkingAsText) { // Convert thinking blocks to text with delimiters - const thinkingText = thinkingBlocks.map((b) => `\n${b.thinking}\n`).join("\n"); + const thinkingText = nonEmptyThinkingBlocks + .map((b) => `\n${b.thinking}\n`) + .join("\n"); const textContent = assistantMsg.content as Array<{ type: "text"; text: string }> | null; if (textContent) { textContent.unshift({ type: "text", text: thinkingText }); @@ -486,9 +490,9 @@ function convertMessages( } } else { // Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss) - const signature = thinkingBlocks[0].thinkingSignature; + const signature = nonEmptyThinkingBlocks[0].thinkingSignature; if (signature && signature.length > 0) { - (assistantMsg as any)[signature] = thinkingBlocks.map((b) => b.thinking).join("\n"); + (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n"); } } } diff --git a/packages/ai/test/stream.test.ts b/packages/ai/test/stream.test.ts index 3da9b055..6389f49b 100644 --- a/packages/ai/test/stream.test.ts +++ b/packages/ai/test/stream.test.ts @@ -571,9 +571,8 @@ describe("Generate E2E Tests", () => { await handleStreaming(llm); }); - it("should handle thinking", { retry: 3 }, async () => { - // Prompt doesn't trigger thinking - // await handleThinking(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 }); + it.skip("should handle thinking mode", { retry: 3 }, async () => { + await handleThinking(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 }); }); it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => { @@ -581,7 +580,7 @@ describe("Generate E2E Tests", () => { }); }); - describe.skipIf(!process.env.ZAI_API_KEY)("zAI Provider (glm-4.5v via Anthropic Messages)", () => { + describe.skipIf(!process.env.ZAI_API_KEY)("zAI Provider (glm-4.5v via OpenAI Completions)", () => { const llm = getModel("zai", "glm-4.5v"); it("should complete basic text generation", { retry: 3 }, async () => { @@ -596,18 +595,16 @@ describe("Generate E2E Tests", () => { await handleStreaming(llm); }); - it("should handle thinking", { retry: 3 }, async () => { - // Prompt doesn't trigger thinking - // await handleThinking(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 }); + it("should handle thinking mode", { retry: 3 }, async () => { + await handleThinking(llm, { reasoningEffort: "medium" }); }); it("should handle multi-turn with thinking and tools", { retry: 3 }, async () => { - await multiTurn(llm, { thinkingEnabled: true, thinkingBudgetTokens: 2048 }); + await multiTurn(llm, { reasoningEffort: "medium" }); }); it("should handle image input", { retry: 3 }, async () => { - // Can't see image for some reason? - // await handleImage(llm); + await handleImage(llm); }); });