Add Vercel AI Gateway support

This commit is contained in:
Timo Lins 2026-01-13 12:51:45 +01:00 committed by Mario Zechner
parent 907fa937e6
commit 164a69a601
12 changed files with 2254 additions and 3 deletions

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Added
- Added Vercel AI Gateway provider with model discovery and `AI_GATEWAY_API_KEY` env support.
## [0.45.3] - 2026-01-13
## [0.45.2] - 2026-01-13

View file

@ -56,6 +56,7 @@ Unified LLM API with automatic model discovery, provider configuration, token an
- **Cerebras**
- **xAI**
- **OpenRouter**
- **Vercel AI Gateway**
- **MiniMax**
- **GitHub Copilot** (requires OAuth, see below)
- **Google Gemini CLI** (requires OAuth, see below)
@ -862,6 +863,7 @@ In Node.js environments, you can set environment variables to avoid passing API
| Cerebras | `CEREBRAS_API_KEY` |
| xAI | `XAI_API_KEY` |
| OpenRouter | `OPENROUTER_API_KEY` |
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
| zAI | `ZAI_API_KEY` |
| MiniMax | `MINIMAX_API_KEY` |
| GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` |

View file

@ -32,6 +32,20 @@ interface ModelsDevModel {
};
}
interface AiGatewayModel {
id: string;
name?: string;
context_window?: number;
max_tokens?: number;
tags?: string[];
pricing?: {
input?: string | number;
output?: string | number;
input_cache_read?: string | number;
input_cache_write?: string | number;
};
}
const COPILOT_STATIC_HEADERS = {
"User-Agent": "GitHubCopilotChat/0.35.0",
"Editor-Version": "vscode/1.107.0",
@ -39,6 +53,8 @@ const COPILOT_STATIC_HEADERS = {
"Copilot-Integration-Id": "vscode-chat",
} as const;
const AI_GATEWAY_BASE_URL = "https://ai-gateway.vercel.sh/v1";
async function fetchOpenRouterModels(): Promise<Model<any>[]> {
try {
console.log("Fetching models from OpenRouter API...");
@ -97,6 +113,64 @@ async function fetchOpenRouterModels(): Promise<Model<any>[]> {
}
}
async function fetchAiGatewayModels(): Promise<Model<any>[]> {
try {
console.log("Fetching models from Vercel AI Gateway API...");
const response = await fetch(`${AI_GATEWAY_BASE_URL}/models`);
const data = await response.json();
const models: Model<any>[] = [];
const toNumber = (value: string | number | undefined): number => {
if (typeof value === "number") {
return Number.isFinite(value) ? value : 0;
}
const parsed = parseFloat(value ?? "0");
return Number.isFinite(parsed) ? parsed : 0;
};
const items = Array.isArray(data.data) ? (data.data as AiGatewayModel[]) : [];
for (const model of items) {
const tags = Array.isArray(model.tags) ? model.tags : [];
// Only include models that support tools
if (!tags.includes("tool-use")) continue;
const input: ("text" | "image")[] = ["text"];
if (tags.includes("vision")) {
input.push("image");
}
const inputCost = toNumber(model.pricing?.input) * 1_000_000;
const outputCost = toNumber(model.pricing?.output) * 1_000_000;
const cacheReadCost = toNumber(model.pricing?.input_cache_read) * 1_000_000;
const cacheWriteCost = toNumber(model.pricing?.input_cache_write) * 1_000_000;
models.push({
id: model.id,
name: model.name || model.id,
api: "openai-completions",
baseUrl: AI_GATEWAY_BASE_URL,
provider: "ai-gateway",
reasoning: tags.includes("reasoning"),
input,
cost: {
input: inputCost,
output: outputCost,
cacheRead: cacheReadCost,
cacheWrite: cacheWriteCost,
},
contextWindow: model.context_window || 4096,
maxTokens: model.max_tokens || 4096,
});
}
console.log(`Fetched ${models.length} tool-capable models from Vercel AI Gateway`);
return models;
} catch (error) {
console.error("Failed to fetch Vercel AI Gateway models:", error);
return [];
}
}
async function loadModelsDevData(): Promise<Model<any>[]> {
try {
console.log("Fetching models from models.dev API...");
@ -529,11 +603,13 @@ async function generateModels() {
// Fetch models from both sources
// models.dev: Anthropic, Google, OpenAI, Groq, Cerebras
// OpenRouter: xAI and other providers (excluding Anthropic, Google, OpenAI)
// AI Gateway: OpenAI-compatible catalog with tool-capable models
const modelsDevModels = await loadModelsDevData();
const openRouterModels = await fetchOpenRouterModels();
const aiGatewayModels = await fetchAiGatewayModels();
// Combine models (models.dev has priority)
const allModels = [...modelsDevModels, ...openRouterModels];
const allModels = [...modelsDevModels, ...openRouterModels, ...aiGatewayModels];
// Fix incorrect cache pricing for Claude Opus 4.5 from models.dev
// models.dev has 3x the correct pricing (1.5/18.75 instead of 0.5/6.25)

File diff suppressed because it is too large Load diff

View file

@ -96,6 +96,7 @@ export function getEnvApiKey(provider: any): string | undefined {
cerebras: "CEREBRAS_API_KEY",
xai: "XAI_API_KEY",
openrouter: "OPENROUTER_API_KEY",
"ai-gateway": "AI_GATEWAY_API_KEY",
zai: "ZAI_API_KEY",
mistral: "MISTRAL_API_KEY",
minimax: "MINIMAX_API_KEY",

View file

@ -56,6 +56,7 @@ export type KnownProvider =
| "groq"
| "cerebras"
| "openrouter"
| "ai-gateway"
| "zai"
| "mistral"
| "minimax"

View file

@ -598,6 +598,25 @@ describe("Generate E2E Tests", () => {
});
});
describe.skipIf(!process.env.AI_GATEWAY_API_KEY)(
"Vercel AI Gateway Provider (google/gemini-2.5-flash via OpenAI Completions)",
() => {
const llm = getModel("ai-gateway", "google/gemini-2.5-flash");
it("should complete basic text generation", { retry: 3 }, async () => {
await basicTextGeneration(llm);
});
it("should handle tool calling", { retry: 3 }, async () => {
await handleToolCall(llm);
});
it("should handle streaming", { retry: 3 }, async () => {
await handleStreaming(llm);
});
},
);
describe.skipIf(!process.env.ZAI_API_KEY)("zAI Provider (glm-4.5-air via OpenAI Completions)", () => {
const llm = getModel("zai", "glm-4.5-air");