diff --git a/packages/ai/README.md b/packages/ai/README.md index ad4bb21f..c633fa76 100644 --- a/packages/ai/README.md +++ b/packages/ai/README.md @@ -576,6 +576,24 @@ const ollamaModel: Model<'openai-completions'> = { maxTokens: 32000 }; +// Example: Custom endpoint with headers (bypassing Cloudflare bot detection) +const proxyModel: Model<'anthropic-messages'> = { + id: 'claude-sonnet-4', + name: 'Claude Sonnet 4 (Proxied)', + api: 'anthropic-messages', + provider: 'custom-proxy', + baseUrl: 'https://proxy.example.com/v1', + reasoning: true, + input: ['text', 'image'], + cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 }, + contextWindow: 200000, + maxTokens: 8192, + headers: { + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36', + 'X-Custom-Auth': 'bearer-token-here' + } +}; + // Use the custom model const response = await stream(ollamaModel, context, { apiKey: 'dummy' // Ollama doesn't need a real key diff --git a/packages/ai/src/providers/anthropic.ts b/packages/ai/src/providers/anthropic.ts index fe249db5..39523886 100644 --- a/packages/ai/src/providers/anthropic.ts +++ b/packages/ai/src/providers/anthropic.ts @@ -288,11 +288,12 @@ function createClient( accept: "application/json", "anthropic-dangerous-direct-browser-access": "true", "anthropic-beta": "oauth-2025-04-20,fine-grained-tool-streaming-2025-05-14", + ...(model.headers || {}), }; // Clear the env var if we're in Node.js to prevent SDK from using it if (typeof process !== "undefined" && process.env) { - process.env.ANTHROPIC_API_KEY = undefined; + delete process.env.ANTHROPIC_API_KEY; } const client = new Anthropic({ @@ -309,6 +310,7 @@ function createClient( accept: "application/json", "anthropic-dangerous-direct-browser-access": "true", "anthropic-beta": "fine-grained-tool-streaming-2025-05-14", + ...(model.headers || {}), }; const client = new Anthropic({ diff --git a/packages/ai/src/providers/google.ts b/packages/ai/src/providers/google.ts index c83cbdbd..078bac7b 100644 --- a/packages/ai/src/providers/google.ts +++ b/packages/ai/src/providers/google.ts @@ -63,7 +63,7 @@ export const streamGoogle: StreamFunction<"google-generative-ai"> = ( }; try { - const client = createClient(options?.apiKey); + const client = createClient(model, options?.apiKey); const params = buildParams(model, context, options); const googleStream = await client.models.generateContentStream(params); @@ -252,7 +252,7 @@ export const streamGoogle: StreamFunction<"google-generative-ai"> = ( return stream; }; -function createClient(apiKey?: string): GoogleGenAI { +function createClient(model: Model<"google-generative-ai">, apiKey?: string): GoogleGenAI { if (!apiKey) { if (!process.env.GEMINI_API_KEY) { throw new Error( @@ -261,7 +261,10 @@ function createClient(apiKey?: string): GoogleGenAI { } apiKey = process.env.GEMINI_API_KEY; } - return new GoogleGenAI({ apiKey }); + return new GoogleGenAI({ + apiKey, + httpOptions: model.headers ? { headers: model.headers } : undefined, + }); } function buildParams( diff --git a/packages/ai/src/providers/openai-completions.ts b/packages/ai/src/providers/openai-completions.ts index 21e225de..5d4aaa9a 100644 --- a/packages/ai/src/providers/openai-completions.ts +++ b/packages/ai/src/providers/openai-completions.ts @@ -260,7 +260,12 @@ function createClient(model: Model<"openai-completions">, apiKey?: string) { } apiKey = process.env.OPENAI_API_KEY; } - return new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true }); + return new OpenAI({ + apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: model.headers, + }); } function buildParams(model: Model<"openai-completions">, context: Context, options?: OpenAICompletionsOptions) { @@ -285,7 +290,7 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio if (options?.maxTokens) { // Mistral/Chutes uses max_tokens instead of max_completion_tokens - iif (model.baseUrl.includes("mistral.ai") || model.baseUrl.includes("chutes.ai")) { + if (model.baseUrl.includes("mistral.ai") || model.baseUrl.includes("chutes.ai")) { (params as any).max_tokens = options?.maxTokens; } else { params.max_completion_tokens = options?.maxTokens; diff --git a/packages/ai/src/providers/openai-responses.ts b/packages/ai/src/providers/openai-responses.ts index accf5092..59d8cba1 100644 --- a/packages/ai/src/providers/openai-responses.ts +++ b/packages/ai/src/providers/openai-responses.ts @@ -307,7 +307,12 @@ function createClient(model: Model<"openai-responses">, apiKey?: string) { } apiKey = process.env.OPENAI_API_KEY; } - return new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true }); + return new OpenAI({ + apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: model.headers, + }); } function buildParams(model: Model<"openai-responses">, context: Context, options?: OpenAIResponsesOptions) { diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts index 4c8c7a6f..f53b4366 100644 --- a/packages/ai/src/types.ts +++ b/packages/ai/src/types.ts @@ -168,4 +168,5 @@ export interface Model { }; contextWindow: number; maxTokens: number; + headers?: Record; } diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index 3c6175bf..9e810109 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -2,6 +2,15 @@ ## [Unreleased] +### Added + +- **Custom Headers**: Added support for custom HTTP headers in `models.json` configuration. Headers can be specified at both provider and model level, with model-level headers overriding provider-level ones. This enables bypassing Cloudflare bot detection and other proxy requirements. ([#39](https://github.com/badlogic/pi-mono/issues/39)) + +### Fixed + +- **Chutes AI Provider**: Fixed 400 errors when using Chutes AI provider. Added compatibility fixes for `store` field exclusion, `max_tokens` parameter usage, and system prompt role handling. ([#42](https://github.com/badlogic/pi-mono/pull/42) by [@butelo](https://github.com/butelo)) +- **Mistral/Chutes Syntax Error**: Fixed syntax error in merged PR that used `iif` instead of `if`. + ## [0.7.25] - 2025-11-20 ### Added diff --git a/packages/coding-agent/README.md b/packages/coding-agent/README.md index 85ae94cf..fa826998 100644 --- a/packages/coding-agent/README.md +++ b/packages/coding-agent/README.md @@ -206,6 +206,44 @@ This allows both secure env var usage and literal keys for local servers. This is useful when a provider supports multiple API standards through the same base URL. +### Custom Headers + +You can add custom HTTP headers to bypass Cloudflare bot detection, add authentication tokens, or meet other proxy requirements: + +```json +{ + "providers": { + "custom-proxy": { + "baseUrl": "https://proxy.example.com/v1", + "apiKey": "YOUR_API_KEY", + "api": "anthropic-messages", + "headers": { + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36", + "X-Custom-Auth": "bearer-token-here" + }, + "models": [ + { + "id": "claude-sonnet-4", + "name": "Claude Sonnet 4 (Proxied)", + "reasoning": true, + "input": ["text", "image"], + "cost": {"input": 3, "output": 15, "cacheRead": 0.3, "cacheWrite": 3.75}, + "contextWindow": 200000, + "maxTokens": 8192, + "headers": { + "X-Model-Specific-Header": "value" + } + } + ] + } + } +} +``` + +- **Provider-level `headers`**: Applied to all requests for models in that provider +- **Model-level `headers`**: Additional headers for specific models (merged with provider headers) +- Model headers override provider headers when keys conflict + ### Model Selection Priority When starting `pi`, models are selected in this order: diff --git a/packages/coding-agent/src/model-config.ts b/packages/coding-agent/src/model-config.ts index 513eaa72..36a5e355 100644 --- a/packages/coding-agent/src/model-config.ts +++ b/packages/coding-agent/src/model-config.ts @@ -31,6 +31,7 @@ const ModelDefinitionSchema = Type.Object({ }), contextWindow: Type.Number(), maxTokens: Type.Number(), + headers: Type.Optional(Type.Record(Type.String(), Type.String())), }); const ProviderConfigSchema = Type.Object({ @@ -44,6 +45,7 @@ const ProviderConfigSchema = Type.Object({ Type.Literal("google-generative-ai"), ]), ), + headers: Type.Optional(Type.Record(Type.String(), Type.String())), models: Type.Array(ModelDefinitionSchema), }); @@ -174,6 +176,10 @@ function parseModels(config: ModelsConfig): Model[] { continue; } + // Merge headers: provider headers are base, model headers override + const headers = + providerConfig.headers || modelDef.headers ? { ...providerConfig.headers, ...modelDef.headers } : undefined; + models.push({ id: modelDef.id, name: modelDef.name, @@ -185,6 +191,7 @@ function parseModels(config: ModelsConfig): Model[] { cost: modelDef.cost, contextWindow: modelDef.contextWindow, maxTokens: modelDef.maxTokens, + headers, }); } }