Add Vercel AI Gateway support

This commit is contained in:
Timo Lins 2026-01-13 12:51:45 +01:00 committed by Mario Zechner
parent 907fa937e6
commit 164a69a601
12 changed files with 2254 additions and 3 deletions

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added Vercel AI Gateway provider with model discovery and `AI_GATEWAY_API_KEY` env support.
## [0.45.3] - 2026-01-13
## [0.45.2] - 2026-01-13

View file

@ -56,6 +56,7 @@ Unified LLM API with automatic model discovery, provider configuration, token an
- **Cerebras**
- **xAI**
- **OpenRouter**
- **Vercel AI Gateway**
- **MiniMax**
- **GitHub Copilot** (requires OAuth, see below)
- **Google Gemini CLI** (requires OAuth, see below)
@ -862,6 +863,7 @@ In Node.js environments, you can set environment variables to avoid passing API
| Cerebras | `CEREBRAS_API_KEY` |
| xAI | `XAI_API_KEY` |
| OpenRouter | `OPENROUTER_API_KEY` |
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
| zAI | `ZAI_API_KEY` |
| MiniMax | `MINIMAX_API_KEY` |
| GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` |

View file

@ -32,6 +32,20 @@ interface ModelsDevModel {
};
}
interface AiGatewayModel {
id: string;
name?: string;
context_window?: number;
max_tokens?: number;
tags?: string[];
pricing?: {
input?: string | number;
output?: string | number;
input_cache_read?: string | number;
input_cache_write?: string | number;
};
}
const COPILOT_STATIC_HEADERS = {
"User-Agent": "GitHubCopilotChat/0.35.0",
"Editor-Version": "vscode/1.107.0",
@ -39,6 +53,8 @@ const COPILOT_STATIC_HEADERS = {
"Copilot-Integration-Id": "vscode-chat",
} as const;
const AI_GATEWAY_BASE_URL = "https://ai-gateway.vercel.sh/v1";
async function fetchOpenRouterModels(): Promise<Model<any>[]> {
try {
console.log("Fetching models from OpenRouter API...");
@ -97,6 +113,64 @@ async function fetchOpenRouterModels(): Promise<Model<any>[]> {
}
}
async function fetchAiGatewayModels(): Promise<Model<any>[]> {
try {
console.log("Fetching models from Vercel AI Gateway API...");
const response = await fetch(`${AI_GATEWAY_BASE_URL}/models`);
const data = await response.json();
const models: Model<any>[] = [];
const toNumber = (value: string | number | undefined): number => {
if (typeof value === "number") {
return Number.isFinite(value) ? value : 0;
}
const parsed = parseFloat(value ?? "0");
return Number.isFinite(parsed) ? parsed : 0;
};
const items = Array.isArray(data.data) ? (data.data as AiGatewayModel[]) : [];
for (const model of items) {
const tags = Array.isArray(model.tags) ? model.tags : [];
// Only include models that support tools
if (!tags.includes("tool-use")) continue;
const input: ("text" | "image")[] = ["text"];
if (tags.includes("vision")) {
input.push("image");
}
const inputCost = toNumber(model.pricing?.input) * 1_000_000;
const outputCost = toNumber(model.pricing?.output) * 1_000_000;
const cacheReadCost = toNumber(model.pricing?.input_cache_read) * 1_000_000;
const cacheWriteCost = toNumber(model.pricing?.input_cache_write) * 1_000_000;
models.push({
id: model.id,
name: model.name || model.id,
api: "openai-completions",
baseUrl: AI_GATEWAY_BASE_URL,
provider: "ai-gateway",
reasoning: tags.includes("reasoning"),
input,
cost: {
input: inputCost,
output: outputCost,
cacheRead: cacheReadCost,
cacheWrite: cacheWriteCost,
},
contextWindow: model.context_window || 4096,
maxTokens: model.max_tokens || 4096,
});
}
console.log(`Fetched ${models.length} tool-capable models from Vercel AI Gateway`);
return models;
} catch (error) {
console.error("Failed to fetch Vercel AI Gateway models:", error);
return [];
}
}
async function loadModelsDevData(): Promise<Model<any>[]> {
try {
console.log("Fetching models from models.dev API...");
@ -529,11 +603,13 @@ async function generateModels() {
// Fetch models from both sources
// models.dev: Anthropic, Google, OpenAI, Groq, Cerebras
// OpenRouter: xAI and other providers (excluding Anthropic, Google, OpenAI)
// AI Gateway: OpenAI-compatible catalog with tool-capable models
const modelsDevModels = await loadModelsDevData();
const openRouterModels = await fetchOpenRouterModels();
const aiGatewayModels = await fetchAiGatewayModels();
// Combine models (models.dev has priority)
const allModels = [...modelsDevModels, ...openRouterModels];
const allModels = [...modelsDevModels, ...openRouterModels, ...aiGatewayModels];
// Fix incorrect cache pricing for Claude Opus 4.5 from models.dev
// models.dev has 3x the correct pricing (1.5/18.75 instead of 0.5/6.25)

File diff suppressed because it is too large Load diff

View file

@ -96,6 +96,7 @@ export function getEnvApiKey(provider: any): string | undefined {
cerebras: "CEREBRAS_API_KEY",
xai: "XAI_API_KEY",
openrouter: "OPENROUTER_API_KEY",
"ai-gateway": "AI_GATEWAY_API_KEY",
zai: "ZAI_API_KEY",
mistral: "MISTRAL_API_KEY",
minimax: "MINIMAX_API_KEY",

View file

@ -56,6 +56,7 @@ export type KnownProvider =
| "groq"
| "cerebras"
| "openrouter"
| "ai-gateway"
| "zai"
| "mistral"
| "minimax"

View file

@ -598,6 +598,25 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)(
"Vercel AI Gateway Provider (google/gemini-2.5-flash via OpenAI Completions)",
() => {
const llm = getModel("ai-gateway", "google/gemini-2.5-flash");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
},
);
describe.skipIf(!process.env.ZAI_API_KEY)("zAI Provider (glm-4.5-air via OpenAI Completions)", () => {
const llm = getModel("zai", "glm-4.5-air");

View file

@ -166,6 +166,7 @@ Add API keys to `~/.pi/agent/auth.json`:
| Cerebras | `cerebras` | `CEREBRAS_API_KEY` |
| xAI | `xai` | `XAI_API_KEY` |
| OpenRouter | `openrouter` | `OPENROUTER_API_KEY` |
| Vercel AI Gateway | `ai-gateway` | `AI_GATEWAY_API_KEY` |
| ZAI | `zai` | `ZAI_API_KEY` |
| MiniMax | `minimax` | `MINIMAX_API_KEY` |
@ -1143,7 +1144,7 @@ pi [options] [@files...] [messages...]
| Option | Description |
|--------|-------------|
| `--provider <name>` | Provider: `anthropic`, `openai`, `openai-codex`, `google`, `google-vertex`, `amazon-bedrock`, `mistral`, `xai`, `groq`, `cerebras`, `openrouter`, `zai`, `minimax`, `github-copilot`, `google-gemini-cli`, `google-antigravity`, or custom |
| `--provider <name>` | Provider: `anthropic`, `openai`, `openai-codex`, `google`, `google-vertex`, `amazon-bedrock`, `mistral`, `xai`, `groq`, `cerebras`, `openrouter`, `ai-gateway`, `zai`, `minimax`, `github-copilot`, `google-gemini-cli`, `google-antigravity`, or custom |
| `--model <id>` | Model ID |
| `--api-key <key>` | API key (overrides environment) |
| `--system-prompt <text\|file>` | Custom system prompt (text or file path) |

View file

@ -242,6 +242,7 @@ ${chalk.bold("Environment Variables:")}
CEREBRAS_API_KEY - Cerebras API key
XAI_API_KEY - xAI Grok API key
OPENROUTER_API_KEY - OpenRouter API key
AI_GATEWAY_API_KEY - Vercel AI Gateway API key
ZAI_API_KEY - ZAI API key
MISTRAL_API_KEY - Mistral API key
MINIMAX_API_KEY - MiniMax API key

View file

@ -21,6 +21,7 @@ export const defaultModelPerProvider: Record<KnownProvider, string> = {
"google-vertex": "gemini-3-pro-preview",
"github-copilot": "gpt-4o",
openrouter: "openai/gpt-5.1-codex",
"ai-gateway": "anthropic/claude-opus-4.5",
xai: "grok-4-fast-non-reasoning",
groq: "openai/gpt-oss-120b",
cerebras: "zai-glm-4.6",

View file

@ -1,6 +1,6 @@
import type { Model } from "@mariozechner/pi-ai";
import { describe, expect, test } from "vitest";
import { parseModelPattern } from "../src/core/model-resolver.js";
import { defaultModelPerProvider, findInitialModel, parseModelPattern } from "../src/core/model-resolver.js";
// Mock models for testing
const mockModels: Model<"anthropic-messages">[] = [
@ -200,3 +200,37 @@ describe("parseModelPattern", () => {
});
});
});
describe("default model selection", () => {
test("ai-gateway default is opus 4.5", () => {
expect(defaultModelPerProvider["ai-gateway"]).toBe("anthropic/claude-opus-4.5");
});
test("findInitialModel selects ai-gateway default when available", async () => {
const aiGatewayModel: Model<"openai-completions"> = {
id: "anthropic/claude-opus-4.5",
name: "Claude Opus 4.5",
api: "openai-completions",
provider: "ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 15, cacheRead: 0.5, cacheWrite: 5 },
contextWindow: 200000,
maxTokens: 8192,
};
const registry = {
getAvailable: async () => [aiGatewayModel],
} as unknown as Parameters<typeof findInitialModel>[0]["modelRegistry"];
const result = await findInitialModel({
scopedModels: [],
isContinuing: false,
modelRegistry: registry,
});
expect(result.model?.provider).toBe("ai-gateway");
expect(result.model?.id).toBe("anthropic/claude-opus-4.5");
});
});

View file

@ -15,6 +15,7 @@ const TEST_MODELS: Record<string, string> = {
google: "gemini-2.5-flash",
groq: "openai/gpt-oss-20b",
openrouter: "z-ai/glm-4.6",
"ai-gateway": "anthropic/claude-opus-4.5",
cerebras: "gpt-oss-120b",
xai: "grok-4-fast-non-reasoning",
zai: "glm-4.5-air",