From cf35215686b84b07ef68c4b3430708d8d0e57504 Mon Sep 17 00:00:00 2001 From: Mario Zechner Date: Mon, 1 Sep 2025 21:46:22 +0200 Subject: [PATCH] fix(ai): Fix browser compatibility for Anthropic OAuth tokens - Check if process exists before modifying process.env - Prevents errors in browser environments - Maintains OAuth token functionality in Node.js --- packages/ai/src/models.generated.ts | 170 ++++++++++++------------- packages/ai/src/providers/anthropic.ts | 5 +- 2 files changed, 89 insertions(+), 86 deletions(-) diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index f29f39ce..54fe681e 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -1598,22 +1598,6 @@ export const PROVIDERS = { contextWindow: 131072, maxTokens: 16384, } satisfies Model, - "meta-llama/llama-3.1-70b-instruct": { - id: "meta-llama/llama-3.1-70b-instruct", - name: "Meta: Llama 3.1 70B Instruct", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.09999999999999999, - output: 0.28, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 131072, - maxTokens: 16384, - } satisfies Model, "meta-llama/llama-3.1-405b-instruct": { id: "meta-llama/llama-3.1-405b-instruct", name: "Meta: Llama 3.1 405B Instruct", @@ -1630,6 +1614,22 @@ export const PROVIDERS = { contextWindow: 32768, maxTokens: 16384, } satisfies Model, + "meta-llama/llama-3.1-70b-instruct": { + id: "meta-llama/llama-3.1-70b-instruct", + name: "Meta: Llama 3.1 70B Instruct", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.28, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model, "mistralai/mistral-nemo": { id: "mistralai/mistral-nemo", name: "Mistral: Mistral Nemo", @@ -1646,22 +1646,6 @@ export const PROVIDERS = { contextWindow: 32000, maxTokens: 4096, } satisfies Model, - "mistralai/mistral-7b-instruct-v0.3": { - id: "mistralai/mistral-7b-instruct-v0.3", - name: "Mistral: Mistral 7B Instruct v0.3", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.028, - output: 0.054, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 32768, - maxTokens: 16384, - } satisfies Model, "mistralai/mistral-7b-instruct:free": { id: "mistralai/mistral-7b-instruct:free", name: "Mistral: Mistral 7B Instruct (free)", @@ -1694,6 +1678,22 @@ export const PROVIDERS = { contextWindow: 32768, maxTokens: 16384, } satisfies Model, + "mistralai/mistral-7b-instruct-v0.3": { + id: "mistralai/mistral-7b-instruct-v0.3", + name: "Mistral: Mistral 7B Instruct v0.3", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.028, + output: 0.054, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 16384, + } satisfies Model, "microsoft/phi-3-mini-128k-instruct": { id: "microsoft/phi-3-mini-128k-instruct", name: "Microsoft: Phi-3 Mini 128K Instruct", @@ -1726,22 +1726,6 @@ export const PROVIDERS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model, - "meta-llama/llama-3-70b-instruct": { - id: "meta-llama/llama-3-70b-instruct", - name: "Meta: Llama 3 70B Instruct", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.3, - output: 0.39999999999999997, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 8192, - maxTokens: 16384, - } satisfies Model, "meta-llama/llama-3-8b-instruct": { id: "meta-llama/llama-3-8b-instruct", name: "Meta: Llama 3 8B Instruct", @@ -1758,6 +1742,22 @@ export const PROVIDERS = { contextWindow: 8192, maxTokens: 16384, } satisfies Model, + "meta-llama/llama-3-70b-instruct": { + id: "meta-llama/llama-3-70b-instruct", + name: "Meta: Llama 3 70B Instruct", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 16384, + } satisfies Model, "mistralai/mixtral-8x22b-instruct": { id: "mistralai/mixtral-8x22b-instruct", name: "Mistral: Mixtral 8x22B Instruct", @@ -1854,22 +1854,6 @@ export const PROVIDERS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model, - "mistralai/mistral-tiny": { - id: "mistralai/mistral-tiny", - name: "Mistral Tiny", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.25, - output: 0.25, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 32768, - maxTokens: 4096, - } satisfies Model, "mistralai/mistral-small": { id: "mistralai/mistral-small", name: "Mistral Small", @@ -1886,6 +1870,22 @@ export const PROVIDERS = { contextWindow: 32768, maxTokens: 4096, } satisfies Model, + "mistralai/mistral-tiny": { + id: "mistralai/mistral-tiny", + name: "Mistral Tiny", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 0.25, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model, "mistralai/mixtral-8x7b-instruct": { id: "mistralai/mixtral-8x7b-instruct", name: "Mistral: Mixtral 8x7B Instruct", @@ -2473,6 +2473,21 @@ export const PROVIDERS = { contextWindow: 16385, maxTokens: 4096, } satisfies Model, + "gpt-3.5-turbo": { + id: "gpt-3.5-turbo", + name: "OpenAI: GPT-3.5 Turbo", + provider: "openai", + reasoning: false, + input: ["text"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 16385, + maxTokens: 4096, + } satisfies Model, "gpt-4": { id: "gpt-4", name: "OpenAI: GPT-4", @@ -2503,21 +2518,6 @@ export const PROVIDERS = { contextWindow: 8191, maxTokens: 4096, } satisfies Model, - "gpt-3.5-turbo": { - id: "gpt-3.5-turbo", - name: "OpenAI: GPT-3.5 Turbo", - provider: "openai", - reasoning: false, - input: ["text"], - cost: { - input: 0.5, - output: 1.5, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 16385, - maxTokens: 4096, - } satisfies Model, }, }, anthropic: { @@ -2597,9 +2597,9 @@ export const PROVIDERS = { contextWindow: 200000, maxTokens: 64000, } satisfies Model, - "claude-3-5-haiku-20241022": { - id: "claude-3-5-haiku-20241022", - name: "Anthropic: Claude 3.5 Haiku (2024-10-22)", + "claude-3-5-haiku-latest": { + id: "claude-3-5-haiku-latest", + name: "Anthropic: Claude 3.5 Haiku", provider: "anthropic", reasoning: false, input: ["text", "image"], @@ -2612,9 +2612,9 @@ export const PROVIDERS = { contextWindow: 200000, maxTokens: 8192, } satisfies Model, - "claude-3-5-haiku-latest": { - id: "claude-3-5-haiku-latest", - name: "Anthropic: Claude 3.5 Haiku", + "claude-3-5-haiku-20241022": { + id: "claude-3-5-haiku-20241022", + name: "Anthropic: Claude 3.5 Haiku (2024-10-22)", provider: "anthropic", reasoning: false, input: ["text", "image"], diff --git a/packages/ai/src/providers/anthropic.ts b/packages/ai/src/providers/anthropic.ts index e2f47c57..b2022097 100644 --- a/packages/ai/src/providers/anthropic.ts +++ b/packages/ai/src/providers/anthropic.ts @@ -48,7 +48,10 @@ export class AnthropicLLM implements LLM { "anthropic-beta": "oauth-2025-04-20,fine-grained-tool-streaming-2025-05-14", }; - process.env.ANTHROPIC_API_KEY = undefined; + // Clear the env var if we're in Node.js to prevent SDK from using it + if (typeof process !== "undefined" && process.env) { + process.env.ANTHROPIC_API_KEY = undefined; + } this.client = new Anthropic({ apiKey: null, authToken: apiKey, baseURL: model.baseUrl, defaultHeaders }); this.isOAuthToken = true; } else {