mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-19 20:00:41 +00:00
- Generate models.generated.ts from models.json with proper types - Categorize providers: OpenAI (Responses), OpenAI-compatible, Anthropic, Gemini - Create createLLM() factory with TypeScript overloads for type safety - Auto-detect base URLs and environment variables for providers - Support 353 models across 39 providers with full autocompletion - Exclude generated file from git (rebuilt on npm build)
265 lines
No EOL
9.5 KiB
TypeScript
265 lines
No EOL
9.5 KiB
TypeScript
#!/usr/bin/env tsx
|
|
|
|
import { readFileSync, writeFileSync } from "fs";
|
|
import { join } from "path";
|
|
|
|
// Load the models.json file
|
|
const data = JSON.parse(readFileSync(join(process.cwd(), "src/models.json"), "utf-8"));
|
|
|
|
// Categorize providers by their API type
|
|
const openaiModels: Record<string, any> = {};
|
|
const openaiCompatibleProviders: Record<string, any> = {};
|
|
const anthropicModels: Record<string, any> = {};
|
|
const geminiModels: Record<string, any> = {};
|
|
|
|
for (const [providerId, provider] of Object.entries(data)) {
|
|
const p = provider as any;
|
|
|
|
if (providerId === "openai") {
|
|
// All OpenAI models use the Responses API
|
|
openaiModels[providerId] = p;
|
|
} else if (providerId === "anthropic" || providerId === "google-vertex-anthropic") {
|
|
// Anthropic direct and via Vertex
|
|
anthropicModels[providerId] = p;
|
|
} else if (providerId === "google" || providerId === "google-vertex") {
|
|
// Google Gemini models
|
|
geminiModels[providerId] = p;
|
|
} else if (p.npm === "@ai-sdk/openai-compatible" ||
|
|
p.npm === "@ai-sdk/groq" ||
|
|
p.npm === "@ai-sdk/cerebras" ||
|
|
p.npm === "@ai-sdk/fireworks" ||
|
|
p.npm === "@ai-sdk/openrouter" ||
|
|
p.npm === "@ai-sdk/openai" && providerId !== "openai" ||
|
|
p.api?.includes("/v1") ||
|
|
["together", "ollama", "llama", "github-models", "groq", "cerebras", "openrouter", "fireworks"].includes(providerId)) {
|
|
// OpenAI-compatible providers - they all speak the OpenAI completions API
|
|
// Set default base URLs for known providers
|
|
if (!p.api) {
|
|
switch (providerId) {
|
|
case "groq": p.api = "https://api.groq.com/openai/v1"; break;
|
|
case "cerebras": p.api = "https://api.cerebras.com/v1"; break;
|
|
case "together": p.api = "https://api.together.xyz/v1"; break;
|
|
case "fireworks": p.api = "https://api.fireworks.ai/v1"; break;
|
|
}
|
|
}
|
|
openaiCompatibleProviders[providerId] = p;
|
|
}
|
|
}
|
|
|
|
// Generate the TypeScript file
|
|
let output = `// This file is auto-generated by scripts/generate-models.ts
|
|
// Do not edit manually - run 'npm run generate-models' to update
|
|
|
|
import type { ModalityInput, ModalityOutput } from "./models.js";
|
|
|
|
export interface ModelData {
|
|
id: string;
|
|
name: string;
|
|
reasoning: boolean;
|
|
tool_call: boolean;
|
|
attachment: boolean;
|
|
temperature: boolean;
|
|
knowledge?: string;
|
|
release_date: string;
|
|
last_updated: string;
|
|
modalities: {
|
|
input: ModalityInput[];
|
|
output: ModalityOutput[];
|
|
};
|
|
open_weights: boolean;
|
|
limit: {
|
|
context: number;
|
|
output: number;
|
|
};
|
|
cost?: {
|
|
input: number;
|
|
output: number;
|
|
cache_read?: number;
|
|
cache_write?: number;
|
|
};
|
|
}
|
|
|
|
export interface ProviderData {
|
|
id: string;
|
|
name: string;
|
|
baseUrl?: string;
|
|
env?: string[];
|
|
models: Record<string, ModelData>;
|
|
}
|
|
|
|
`;
|
|
|
|
// Generate OpenAI models
|
|
output += `// OpenAI models - all use OpenAIResponsesLLM\n`;
|
|
output += `export const OPENAI_MODELS = {\n`;
|
|
for (const [providerId, provider] of Object.entries(openaiModels)) {
|
|
const p = provider as any;
|
|
for (const [modelId, model] of Object.entries(p.models || {})) {
|
|
const m = model as any;
|
|
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
|
}
|
|
}
|
|
output += `} as const;\n\n`;
|
|
|
|
// Generate OpenAI-compatible providers
|
|
output += `// OpenAI-compatible providers - use OpenAICompletionsLLM\n`;
|
|
output += `export const OPENAI_COMPATIBLE_PROVIDERS = {\n`;
|
|
for (const [providerId, provider] of Object.entries(openaiCompatibleProviders)) {
|
|
const p = provider as any;
|
|
output += ` "${providerId}": {\n`;
|
|
output += ` id: "${providerId}",\n`;
|
|
output += ` name: "${p.name}",\n`;
|
|
if (p.api) {
|
|
output += ` baseUrl: "${p.api}",\n`;
|
|
}
|
|
if (p.env) {
|
|
output += ` env: ${JSON.stringify(p.env)},\n`;
|
|
}
|
|
output += ` models: {\n`;
|
|
for (const [modelId, model] of Object.entries(p.models || {})) {
|
|
const m = model as any;
|
|
output += ` "${modelId}": ${JSON.stringify(m, null, 12).split('\n').join('\n ')},\n`;
|
|
}
|
|
output += ` }\n`;
|
|
output += ` },\n`;
|
|
}
|
|
output += `} as const;\n\n`;
|
|
|
|
// Generate Anthropic models (avoiding duplicates)
|
|
output += `// Anthropic models - use AnthropicLLM\n`;
|
|
output += `export const ANTHROPIC_MODELS = {\n`;
|
|
const seenAnthropicModels = new Set<string>();
|
|
for (const [providerId, provider] of Object.entries(anthropicModels)) {
|
|
const p = provider as any;
|
|
for (const [modelId, model] of Object.entries(p.models || {})) {
|
|
if (!seenAnthropicModels.has(modelId)) {
|
|
seenAnthropicModels.add(modelId);
|
|
const m = model as any;
|
|
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
|
}
|
|
}
|
|
}
|
|
output += `} as const;\n\n`;
|
|
|
|
// Generate Gemini models (avoiding duplicates)
|
|
output += `// Gemini models - use GeminiLLM\n`;
|
|
output += `export const GEMINI_MODELS = {\n`;
|
|
const seenGeminiModels = new Set<string>();
|
|
for (const [providerId, provider] of Object.entries(geminiModels)) {
|
|
const p = provider as any;
|
|
for (const [modelId, model] of Object.entries(p.models || {})) {
|
|
if (!seenGeminiModels.has(modelId)) {
|
|
seenGeminiModels.add(modelId);
|
|
const m = model as any;
|
|
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
|
}
|
|
}
|
|
}
|
|
output += `} as const;\n\n`;
|
|
|
|
// Generate type helpers
|
|
output += `// Type helpers\n`;
|
|
output += `export type OpenAIModel = keyof typeof OPENAI_MODELS;\n`;
|
|
output += `export type OpenAICompatibleProvider = keyof typeof OPENAI_COMPATIBLE_PROVIDERS;\n`;
|
|
output += `export type AnthropicModel = keyof typeof ANTHROPIC_MODELS;\n`;
|
|
output += `export type GeminiModel = keyof typeof GEMINI_MODELS;\n\n`;
|
|
|
|
// Generate the factory function
|
|
output += `// Factory function implementation\n`;
|
|
output += `import { OpenAIResponsesLLM } from "./providers/openai-responses.js";\n`;
|
|
output += `import { OpenAICompletionsLLM } from "./providers/openai-completions.js";\n`;
|
|
output += `import { AnthropicLLM } from "./providers/anthropic.js";\n`;
|
|
output += `import { GeminiLLM } from "./providers/gemini.js";\n`;
|
|
output += `import type { LLM, LLMOptions } from "./types.js";\n\n`;
|
|
|
|
output += `export interface CreateLLMOptions {
|
|
apiKey?: string;
|
|
baseUrl?: string;
|
|
}
|
|
|
|
// Overloads for type safety
|
|
export function createLLM(
|
|
provider: "openai",
|
|
model: OpenAIModel,
|
|
options?: CreateLLMOptions
|
|
): OpenAIResponsesLLM;
|
|
|
|
export function createLLM(
|
|
provider: OpenAICompatibleProvider,
|
|
model: string, // We'll validate at runtime
|
|
options?: CreateLLMOptions
|
|
): OpenAICompletionsLLM;
|
|
|
|
export function createLLM(
|
|
provider: "anthropic",
|
|
model: AnthropicModel,
|
|
options?: CreateLLMOptions
|
|
): AnthropicLLM;
|
|
|
|
export function createLLM(
|
|
provider: "gemini",
|
|
model: GeminiModel,
|
|
options?: CreateLLMOptions
|
|
): GeminiLLM;
|
|
|
|
// Implementation
|
|
export function createLLM(
|
|
provider: string,
|
|
model: string,
|
|
options?: CreateLLMOptions
|
|
): LLM<LLMOptions> {
|
|
const apiKey = options?.apiKey || process.env[getEnvVar(provider)];
|
|
|
|
if (provider === "openai") {
|
|
return new OpenAIResponsesLLM(model, apiKey);
|
|
}
|
|
|
|
if (provider === "anthropic") {
|
|
return new AnthropicLLM(model, apiKey);
|
|
}
|
|
|
|
if (provider === "gemini") {
|
|
return new GeminiLLM(model, apiKey);
|
|
}
|
|
|
|
// OpenAI-compatible providers
|
|
if (provider in OPENAI_COMPATIBLE_PROVIDERS) {
|
|
const providerData = OPENAI_COMPATIBLE_PROVIDERS[provider as OpenAICompatibleProvider];
|
|
const baseUrl = options?.baseUrl || providerData.baseUrl;
|
|
return new OpenAICompletionsLLM(model, apiKey, baseUrl);
|
|
}
|
|
|
|
throw new Error(\`Unknown provider: \${provider}\`);
|
|
}
|
|
|
|
// Helper to get the default environment variable for a provider
|
|
function getEnvVar(provider: string): string {
|
|
switch (provider) {
|
|
case "openai": return "OPENAI_API_KEY";
|
|
case "anthropic": return "ANTHROPIC_API_KEY";
|
|
case "gemini": return "GEMINI_API_KEY";
|
|
case "groq": return "GROQ_API_KEY";
|
|
case "cerebras": return "CEREBRAS_API_KEY";
|
|
case "together": return "TOGETHER_API_KEY";
|
|
case "openrouter": return "OPENROUTER_API_KEY";
|
|
default: return \`\${provider.toUpperCase()}_API_KEY\`;
|
|
}
|
|
}
|
|
`;
|
|
|
|
// Write the generated file
|
|
writeFileSync(join(process.cwd(), "src/models.generated.ts"), output);
|
|
console.log("✅ Generated src/models.generated.ts");
|
|
|
|
// Count statistics
|
|
const openaiCount = Object.values(openaiModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
|
const compatCount = Object.values(openaiCompatibleProviders).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
|
const anthropicCount = Object.values(anthropicModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
|
const geminiCount = Object.values(geminiModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
|
|
|
console.log(`\nModel counts:`);
|
|
console.log(` OpenAI (Responses API): ${openaiCount} models`);
|
|
console.log(` OpenAI-compatible: ${compatCount} models across ${Object.keys(openaiCompatibleProviders).length} providers`);
|
|
console.log(` Anthropic: ${anthropicCount} models`);
|
|
console.log(` Gemini: ${geminiCount} models`);
|
|
console.log(` Total: ${openaiCount + compatCount + anthropicCount + geminiCount} models`); |