mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 13:03:42 +00:00
feat(ai): Add auto-generated TypeScript models with factory function
- Generate models.generated.ts from models.json with proper types - Categorize providers: OpenAI (Responses), OpenAI-compatible, Anthropic, Gemini - Create createLLM() factory with TypeScript overloads for type safety - Auto-detect base URLs and environment variables for providers - Support 353 models across 39 providers with full autocompletion - Exclude generated file from git (rebuilt on npm build)
This commit is contained in:
parent
9b8ea585bd
commit
da66a97ea7
6 changed files with 414 additions and 2 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -21,4 +21,7 @@ packages/*/dist/
|
|||
coverage/
|
||||
.nyc_output/
|
||||
.pi_config/
|
||||
tui-debug.log
|
||||
tui-debug.log
|
||||
|
||||
# Generated files
|
||||
packages/ai/src/models.generated.ts
|
||||
68
package-lock.json
generated
68
package-lock.json
generated
|
|
@ -1062,6 +1062,53 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"node_modules/playwright": {
|
||||
"version": "1.55.0",
|
||||
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.55.0.tgz",
|
||||
"integrity": "sha512-sdCWStblvV1YU909Xqx0DhOjPZE4/5lJsIS84IfN9dAZfcl/CIZ5O8l3o0j7hPMjDvqoTF8ZUcc+i/GL5erstA==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"playwright-core": "1.55.0"
|
||||
},
|
||||
"bin": {
|
||||
"playwright": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"fsevents": "2.3.2"
|
||||
}
|
||||
},
|
||||
"node_modules/playwright-core": {
|
||||
"version": "1.55.0",
|
||||
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.55.0.tgz",
|
||||
"integrity": "sha512-GvZs4vU3U5ro2nZpeiwyb0zuFaqb9sUiAJuyrWpcGouD8y9/HLgGbNRjIph7zU9D3hnPaisMl9zG9CgFi/biIg==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"playwright-core": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/playwright/node_modules/fsevents": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
|
||||
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/resolve-pkg-maps": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
|
||||
|
|
@ -1607,10 +1654,31 @@
|
|||
"chalk": "^5.5.0",
|
||||
"openai": "^5.15.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.3.0",
|
||||
"playwright": "^1.55.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
}
|
||||
},
|
||||
"packages/ai/node_modules/@types/node": {
|
||||
"version": "24.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.3.0.tgz",
|
||||
"integrity": "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~7.10.0"
|
||||
}
|
||||
},
|
||||
"packages/ai/node_modules/undici-types": {
|
||||
"version": "7.10.0",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.10.0.tgz",
|
||||
"integrity": "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"packages/pods": {
|
||||
"name": "@mariozechner/pi",
|
||||
"version": "0.5.8",
|
||||
|
|
|
|||
56
packages/ai/docs/models.md
Normal file
56
packages/ai/docs/models.md
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
# OpenAI Models
|
||||
|
||||
## All Models
|
||||
|
||||
- [ ] [GPT-5](https://platform.openai.com/docs/models/gpt-5)
|
||||
- [ ] [GPT-5 mini](https://platform.openai.com/docs/models/gpt-5-mini)
|
||||
- [ ] [GPT-5 nano](https://platform.openai.com/docs/models/gpt-5-nano)
|
||||
- [ ] [o3-deep-research](https://platform.openai.com/docs/models/o3-deep-research)
|
||||
- [ ] [o4-mini-deep-research](https://platform.openai.com/docs/models/o4-mini-deep-research)
|
||||
- [ ] [o3-pro](https://platform.openai.com/docs/models/o3-pro)
|
||||
- [ ] [GPT-4o Audio](https://platform.openai.com/docs/models/gpt-4o-audio-preview)
|
||||
- [ ] [GPT-4o Realtime](https://platform.openai.com/docs/models/gpt-4o-realtime-preview)
|
||||
- [ ] [o3](https://platform.openai.com/docs/models/o3)
|
||||
- [ ] [o4-mini](https://platform.openai.com/docs/models/o4-mini)
|
||||
- [ ] [GPT-4.1](https://platform.openai.com/docs/models/gpt-4.1)
|
||||
- [ ] [GPT-4.1 mini](https://platform.openai.com/docs/models/gpt-4.1-mini)
|
||||
- [ ] [GPT-4.1 nano](https://platform.openai.com/docs/models/gpt-4.1-nano)
|
||||
- [ ] [o1-pro](https://platform.openai.com/docs/models/o1-pro)
|
||||
- [ ] [computer-use-preview](https://platform.openai.com/docs/models/computer-use-preview)
|
||||
- [ ] [GPT-4o mini Search Preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview)
|
||||
- [ ] [GPT-4o Search Preview](https://platform.openai.com/docs/models/gpt-4o-search-preview)
|
||||
- [ ] [GPT-4.5 Preview (Deprecated)](https://platform.openai.com/docs/models/gpt-4.5-preview)
|
||||
- [ ] [o3-mini](https://platform.openai.com/docs/models/o3-mini)
|
||||
- [ ] [GPT-4o mini Audio](https://platform.openai.com/docs/models/gpt-4o-mini-audio-preview)
|
||||
- [ ] [GPT-4o mini Realtime](https://platform.openai.com/docs/models/gpt-4o-mini-realtime-preview)
|
||||
- [ ] [o1](https://platform.openai.com/docs/models/o1)
|
||||
- [ ] [omni-moderation](https://platform.openai.com/docs/models/omni-moderation-latest)
|
||||
- [ ] [o1-mini](https://platform.openai.com/docs/models/o1-mini)
|
||||
- [ ] [o1 Preview](https://platform.openai.com/docs/models/o1-preview)
|
||||
- [ ] [GPT-4o](https://platform.openai.com/docs/models/gpt-4o)
|
||||
- [ ] [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini)
|
||||
- [ ] [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo)
|
||||
- [ ] [babbage-002](https://platform.openai.com/docs/models/babbage-002)
|
||||
- [ ] [ChatGPT-4o](https://platform.openai.com/docs/models/chatgpt-4o-latest)
|
||||
- [ ] [codex-mini-latest](https://platform.openai.com/docs/models/codex-mini-latest)
|
||||
- [ ] [DALL·E 2](https://platform.openai.com/docs/models/dall-e-2)
|
||||
- [ ] [DALL·E 3](https://platform.openai.com/docs/models/dall-e-3)
|
||||
- [ ] [davinci-002](https://platform.openai.com/docs/models/davinci-002)
|
||||
- [ ] [GPT-3.5 Turbo](https://platform.openai.com/docs/models/gpt-3.5-turbo)
|
||||
- [ ] [GPT-4](https://platform.openai.com/docs/models/gpt-4)
|
||||
- [ ] [GPT-4 Turbo Preview](https://platform.openai.com/docs/models/gpt-4-turbo-preview)
|
||||
- [ ] [GPT-4o mini Transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe)
|
||||
- [ ] [GPT-4o mini TTS](https://platform.openai.com/docs/models/gpt-4o-mini-tts)
|
||||
- [ ] [GPT-4o Transcribe](https://platform.openai.com/docs/models/gpt-4o-transcribe)
|
||||
- [ ] [GPT-5 Chat](https://platform.openai.com/docs/models/gpt-5-chat-latest)
|
||||
- [ ] [GPT Image 1](https://platform.openai.com/docs/models/gpt-image-1)
|
||||
- [ ] [gpt-oss-120b](https://platform.openai.com/docs/models/gpt-oss-120b)
|
||||
- [ ] [gpt-oss-20b](https://platform.openai.com/docs/models/gpt-oss-20b)
|
||||
- [ ] [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large)
|
||||
- [ ] [text-embedding-3-small](https://platform.openai.com/docs/models/text-embedding-3-small)
|
||||
- [ ] [text-embedding-ada-002](https://platform.openai.com/docs/models/text-embedding-ada-002)
|
||||
- [ ] [text-moderation](https://platform.openai.com/docs/models/text-moderation-latest)
|
||||
- [ ] [text-moderation-stable](https://platform.openai.com/docs/models/text-moderation-stable)
|
||||
- [ ] [TTS-1](https://platform.openai.com/docs/models/tts-1)
|
||||
- [ ] [TTS-1 HD](https://platform.openai.com/docs/models/tts-1-hd)
|
||||
- [ ] [Whisper](https://platform.openai.com/docs/models/whisper-1)
|
||||
|
|
@ -12,9 +12,11 @@
|
|||
"scripts": {
|
||||
"clean": "rm -rf dist",
|
||||
"models": "curl -s https://models.dev/api.json -o src/models.json",
|
||||
"build": "tsc -p tsconfig.build.json && cp src/models.json dist/models.json",
|
||||
"generate-models": "npx tsx scripts/generate-models.ts",
|
||||
"build": "npm run generate-models && tsc -p tsconfig.build.json && cp src/models.json dist/models.json",
|
||||
"check": "biome check --write .",
|
||||
"test": "npx tsx --test test/providers.test.ts",
|
||||
"extract-models": "npx tsx scripts/extract-openai-models.ts",
|
||||
"prepublishOnly": "npm run clean && npm run models && npm run build"
|
||||
},
|
||||
"dependencies": {
|
||||
|
|
@ -41,5 +43,8 @@
|
|||
},
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.3.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
265
packages/ai/scripts/generate-models.ts
Normal file
265
packages/ai/scripts/generate-models.ts
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
#!/usr/bin/env tsx
|
||||
|
||||
import { readFileSync, writeFileSync } from "fs";
|
||||
import { join } from "path";
|
||||
|
||||
// Load the models.json file
|
||||
const data = JSON.parse(readFileSync(join(process.cwd(), "src/models.json"), "utf-8"));
|
||||
|
||||
// Categorize providers by their API type
|
||||
const openaiModels: Record<string, any> = {};
|
||||
const openaiCompatibleProviders: Record<string, any> = {};
|
||||
const anthropicModels: Record<string, any> = {};
|
||||
const geminiModels: Record<string, any> = {};
|
||||
|
||||
for (const [providerId, provider] of Object.entries(data)) {
|
||||
const p = provider as any;
|
||||
|
||||
if (providerId === "openai") {
|
||||
// All OpenAI models use the Responses API
|
||||
openaiModels[providerId] = p;
|
||||
} else if (providerId === "anthropic" || providerId === "google-vertex-anthropic") {
|
||||
// Anthropic direct and via Vertex
|
||||
anthropicModels[providerId] = p;
|
||||
} else if (providerId === "google" || providerId === "google-vertex") {
|
||||
// Google Gemini models
|
||||
geminiModels[providerId] = p;
|
||||
} else if (p.npm === "@ai-sdk/openai-compatible" ||
|
||||
p.npm === "@ai-sdk/groq" ||
|
||||
p.npm === "@ai-sdk/cerebras" ||
|
||||
p.npm === "@ai-sdk/fireworks" ||
|
||||
p.npm === "@ai-sdk/openrouter" ||
|
||||
p.npm === "@ai-sdk/openai" && providerId !== "openai" ||
|
||||
p.api?.includes("/v1") ||
|
||||
["together", "ollama", "llama", "github-models", "groq", "cerebras", "openrouter", "fireworks"].includes(providerId)) {
|
||||
// OpenAI-compatible providers - they all speak the OpenAI completions API
|
||||
// Set default base URLs for known providers
|
||||
if (!p.api) {
|
||||
switch (providerId) {
|
||||
case "groq": p.api = "https://api.groq.com/openai/v1"; break;
|
||||
case "cerebras": p.api = "https://api.cerebras.com/v1"; break;
|
||||
case "together": p.api = "https://api.together.xyz/v1"; break;
|
||||
case "fireworks": p.api = "https://api.fireworks.ai/v1"; break;
|
||||
}
|
||||
}
|
||||
openaiCompatibleProviders[providerId] = p;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the TypeScript file
|
||||
let output = `// This file is auto-generated by scripts/generate-models.ts
|
||||
// Do not edit manually - run 'npm run generate-models' to update
|
||||
|
||||
import type { ModalityInput, ModalityOutput } from "./models.js";
|
||||
|
||||
export interface ModelData {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
tool_call: boolean;
|
||||
attachment: boolean;
|
||||
temperature: boolean;
|
||||
knowledge?: string;
|
||||
release_date: string;
|
||||
last_updated: string;
|
||||
modalities: {
|
||||
input: ModalityInput[];
|
||||
output: ModalityOutput[];
|
||||
};
|
||||
open_weights: boolean;
|
||||
limit: {
|
||||
context: number;
|
||||
output: number;
|
||||
};
|
||||
cost?: {
|
||||
input: number;
|
||||
output: number;
|
||||
cache_read?: number;
|
||||
cache_write?: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ProviderData {
|
||||
id: string;
|
||||
name: string;
|
||||
baseUrl?: string;
|
||||
env?: string[];
|
||||
models: Record<string, ModelData>;
|
||||
}
|
||||
|
||||
`;
|
||||
|
||||
// Generate OpenAI models
|
||||
output += `// OpenAI models - all use OpenAIResponsesLLM\n`;
|
||||
output += `export const OPENAI_MODELS = {\n`;
|
||||
for (const [providerId, provider] of Object.entries(openaiModels)) {
|
||||
const p = provider as any;
|
||||
for (const [modelId, model] of Object.entries(p.models || {})) {
|
||||
const m = model as any;
|
||||
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
||||
}
|
||||
}
|
||||
output += `} as const;\n\n`;
|
||||
|
||||
// Generate OpenAI-compatible providers
|
||||
output += `// OpenAI-compatible providers - use OpenAICompletionsLLM\n`;
|
||||
output += `export const OPENAI_COMPATIBLE_PROVIDERS = {\n`;
|
||||
for (const [providerId, provider] of Object.entries(openaiCompatibleProviders)) {
|
||||
const p = provider as any;
|
||||
output += ` "${providerId}": {\n`;
|
||||
output += ` id: "${providerId}",\n`;
|
||||
output += ` name: "${p.name}",\n`;
|
||||
if (p.api) {
|
||||
output += ` baseUrl: "${p.api}",\n`;
|
||||
}
|
||||
if (p.env) {
|
||||
output += ` env: ${JSON.stringify(p.env)},\n`;
|
||||
}
|
||||
output += ` models: {\n`;
|
||||
for (const [modelId, model] of Object.entries(p.models || {})) {
|
||||
const m = model as any;
|
||||
output += ` "${modelId}": ${JSON.stringify(m, null, 12).split('\n').join('\n ')},\n`;
|
||||
}
|
||||
output += ` }\n`;
|
||||
output += ` },\n`;
|
||||
}
|
||||
output += `} as const;\n\n`;
|
||||
|
||||
// Generate Anthropic models (avoiding duplicates)
|
||||
output += `// Anthropic models - use AnthropicLLM\n`;
|
||||
output += `export const ANTHROPIC_MODELS = {\n`;
|
||||
const seenAnthropicModels = new Set<string>();
|
||||
for (const [providerId, provider] of Object.entries(anthropicModels)) {
|
||||
const p = provider as any;
|
||||
for (const [modelId, model] of Object.entries(p.models || {})) {
|
||||
if (!seenAnthropicModels.has(modelId)) {
|
||||
seenAnthropicModels.add(modelId);
|
||||
const m = model as any;
|
||||
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
output += `} as const;\n\n`;
|
||||
|
||||
// Generate Gemini models (avoiding duplicates)
|
||||
output += `// Gemini models - use GeminiLLM\n`;
|
||||
output += `export const GEMINI_MODELS = {\n`;
|
||||
const seenGeminiModels = new Set<string>();
|
||||
for (const [providerId, provider] of Object.entries(geminiModels)) {
|
||||
const p = provider as any;
|
||||
for (const [modelId, model] of Object.entries(p.models || {})) {
|
||||
if (!seenGeminiModels.has(modelId)) {
|
||||
seenGeminiModels.add(modelId);
|
||||
const m = model as any;
|
||||
output += ` "${modelId}": ${JSON.stringify(m, null, 8).split('\n').join('\n ')},\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
output += `} as const;\n\n`;
|
||||
|
||||
// Generate type helpers
|
||||
output += `// Type helpers\n`;
|
||||
output += `export type OpenAIModel = keyof typeof OPENAI_MODELS;\n`;
|
||||
output += `export type OpenAICompatibleProvider = keyof typeof OPENAI_COMPATIBLE_PROVIDERS;\n`;
|
||||
output += `export type AnthropicModel = keyof typeof ANTHROPIC_MODELS;\n`;
|
||||
output += `export type GeminiModel = keyof typeof GEMINI_MODELS;\n\n`;
|
||||
|
||||
// Generate the factory function
|
||||
output += `// Factory function implementation\n`;
|
||||
output += `import { OpenAIResponsesLLM } from "./providers/openai-responses.js";\n`;
|
||||
output += `import { OpenAICompletionsLLM } from "./providers/openai-completions.js";\n`;
|
||||
output += `import { AnthropicLLM } from "./providers/anthropic.js";\n`;
|
||||
output += `import { GeminiLLM } from "./providers/gemini.js";\n`;
|
||||
output += `import type { LLM, LLMOptions } from "./types.js";\n\n`;
|
||||
|
||||
output += `export interface CreateLLMOptions {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
}
|
||||
|
||||
// Overloads for type safety
|
||||
export function createLLM(
|
||||
provider: "openai",
|
||||
model: OpenAIModel,
|
||||
options?: CreateLLMOptions
|
||||
): OpenAIResponsesLLM;
|
||||
|
||||
export function createLLM(
|
||||
provider: OpenAICompatibleProvider,
|
||||
model: string, // We'll validate at runtime
|
||||
options?: CreateLLMOptions
|
||||
): OpenAICompletionsLLM;
|
||||
|
||||
export function createLLM(
|
||||
provider: "anthropic",
|
||||
model: AnthropicModel,
|
||||
options?: CreateLLMOptions
|
||||
): AnthropicLLM;
|
||||
|
||||
export function createLLM(
|
||||
provider: "gemini",
|
||||
model: GeminiModel,
|
||||
options?: CreateLLMOptions
|
||||
): GeminiLLM;
|
||||
|
||||
// Implementation
|
||||
export function createLLM(
|
||||
provider: string,
|
||||
model: string,
|
||||
options?: CreateLLMOptions
|
||||
): LLM<LLMOptions> {
|
||||
const apiKey = options?.apiKey || process.env[getEnvVar(provider)];
|
||||
|
||||
if (provider === "openai") {
|
||||
return new OpenAIResponsesLLM(model, apiKey);
|
||||
}
|
||||
|
||||
if (provider === "anthropic") {
|
||||
return new AnthropicLLM(model, apiKey);
|
||||
}
|
||||
|
||||
if (provider === "gemini") {
|
||||
return new GeminiLLM(model, apiKey);
|
||||
}
|
||||
|
||||
// OpenAI-compatible providers
|
||||
if (provider in OPENAI_COMPATIBLE_PROVIDERS) {
|
||||
const providerData = OPENAI_COMPATIBLE_PROVIDERS[provider as OpenAICompatibleProvider];
|
||||
const baseUrl = options?.baseUrl || providerData.baseUrl;
|
||||
return new OpenAICompletionsLLM(model, apiKey, baseUrl);
|
||||
}
|
||||
|
||||
throw new Error(\`Unknown provider: \${provider}\`);
|
||||
}
|
||||
|
||||
// Helper to get the default environment variable for a provider
|
||||
function getEnvVar(provider: string): string {
|
||||
switch (provider) {
|
||||
case "openai": return "OPENAI_API_KEY";
|
||||
case "anthropic": return "ANTHROPIC_API_KEY";
|
||||
case "gemini": return "GEMINI_API_KEY";
|
||||
case "groq": return "GROQ_API_KEY";
|
||||
case "cerebras": return "CEREBRAS_API_KEY";
|
||||
case "together": return "TOGETHER_API_KEY";
|
||||
case "openrouter": return "OPENROUTER_API_KEY";
|
||||
default: return \`\${provider.toUpperCase()}_API_KEY\`;
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
// Write the generated file
|
||||
writeFileSync(join(process.cwd(), "src/models.generated.ts"), output);
|
||||
console.log("✅ Generated src/models.generated.ts");
|
||||
|
||||
// Count statistics
|
||||
const openaiCount = Object.values(openaiModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
||||
const compatCount = Object.values(openaiCompatibleProviders).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
||||
const anthropicCount = Object.values(anthropicModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
||||
const geminiCount = Object.values(geminiModels).reduce((acc, p: any) => acc + Object.keys(p.models || {}).length, 0);
|
||||
|
||||
console.log(`\nModel counts:`);
|
||||
console.log(` OpenAI (Responses API): ${openaiCount} models`);
|
||||
console.log(` OpenAI-compatible: ${compatCount} models across ${Object.keys(openaiCompatibleProviders).length} providers`);
|
||||
console.log(` Anthropic: ${anthropicCount} models`);
|
||||
console.log(` Gemini: ${geminiCount} models`);
|
||||
console.log(` Total: ${openaiCount + compatCount + anthropicCount + geminiCount} models`);
|
||||
|
|
@ -3,6 +3,21 @@
|
|||
|
||||
export const version = "0.5.8";
|
||||
|
||||
// Export generated models and factory
|
||||
export {
|
||||
ANTHROPIC_MODELS,
|
||||
type AnthropicModel,
|
||||
type CreateLLMOptions,
|
||||
createLLM,
|
||||
GEMINI_MODELS,
|
||||
type GeminiModel,
|
||||
type ModelData,
|
||||
OPENAI_COMPATIBLE_PROVIDERS,
|
||||
OPENAI_MODELS,
|
||||
type OpenAICompatibleProvider,
|
||||
type OpenAIModel,
|
||||
type ProviderData,
|
||||
} from "./models.generated.js";
|
||||
// Export models utilities
|
||||
export {
|
||||
getAllProviders,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue