feat(ai): Add models.dev data integration

- Add models script to download latest model information
- Create models.ts module to query model capabilities
- Include models.json in package distribution
- Export utilities to check model features (reasoning, tools)
- Update build process to copy models.json to dist
This commit is contained in:
Mario Zechner 2025-08-25 20:10:54 +02:00
parent 4bb3a5ad02
commit 02a9b4f09f
6 changed files with 8516 additions and 6 deletions

View file

@ -16,9 +16,9 @@ import { OpenAICompletionsLLM } from '@mariozechner/ai/providers/openai-completi
import { GeminiLLM } from '@mariozechner/ai/providers/gemini';
// Pick your provider - same API for all
const llm = new AnthropicLLM('claude-3-5-sonnet-20241022');
// const llm = new OpenAICompletionsLLM('gpt-4o');
// const llm = new GeminiLLM('gemini-2.0-flash-exp');
const llm = new AnthropicLLM('claude-sonnet-4-0');
// const llm = new OpenAICompletionsLLM('gpt-5-mini');
// const llm = new GeminiLLM('gemini-2.5-flash');
// Basic completion
const response = await llm.complete({

View file

@ -11,10 +11,11 @@
],
"scripts": {
"clean": "rm -rf dist",
"build": "tsc -p tsconfig.build.json",
"models": "curl -s https://models.dev/api.json -o src/models.json",
"build": "tsc -p tsconfig.build.json && cp src/models.json dist/models.json",
"check": "biome check --write .",
"test": "npx tsx --test test/providers.test.ts",
"prepublishOnly": "npm run clean && npm run build"
"prepublishOnly": "npm run clean && npm run models && npm run build"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.60.0",

View file

@ -1,5 +1,26 @@
// @mariozechner/ai - Unified API for OpenAI, Anthropic, and Google Gemini
// This package provides a common interface for working with multiple LLM providers
// TODO: Export types and implementations once defined
export const version = "0.5.8";
// Export models utilities
export {
getAllProviders,
getModelInfo,
getProviderInfo,
getProviderModels,
loadModels,
type ModelInfo,
type ModelsData,
type ProviderInfo,
supportsThinking,
supportsTools,
} from "./models.js";
// Export providers
export { AnthropicLLM } from "./providers/anthropic.js";
export { GeminiLLM } from "./providers/gemini.js";
export { OpenAICompletionsLLM } from "./providers/openai-completions.js";
export { OpenAIResponsesLLM } from "./providers/openai-responses.js";
// Export types
export type * from "./types.js";

8314
packages/ai/src/models.json Normal file

File diff suppressed because it is too large Load diff

131
packages/ai/src/models.ts Normal file
View file

@ -0,0 +1,131 @@
import { readFileSync } from "fs";
import { dirname, join } from "path";
import { fileURLToPath } from "url";
export interface ModelInfo {
id: string;
name: string;
attachment?: boolean;
reasoning?: boolean;
temperature?: boolean;
tool_call?: boolean;
knowledge?: string;
release_date?: string;
last_updated?: string;
modalities?: {
input?: string[];
output?: string[];
};
open_weights?: boolean;
cost?: {
input?: number;
output?: number;
cache_read?: number;
cache_write?: number;
};
limit?: {
context?: number;
output?: number;
};
[key: string]: any;
}
export interface ProviderInfo {
id: string;
env?: string[];
npm?: string;
api?: string;
name: string;
doc?: string;
models: Record<string, ModelInfo>;
}
export type ModelsData = Record<string, ProviderInfo>;
let cachedModels: ModelsData | null = null;
/**
* Load models data from models.json
* The file is loaded relative to this module's location
*/
export function loadModels(): ModelsData {
if (cachedModels) {
return cachedModels;
}
try {
// Get the directory of this module
const currentDir = dirname(fileURLToPath(import.meta.url));
const modelsPath = join(currentDir, "models.json");
const data = readFileSync(modelsPath, "utf-8");
cachedModels = JSON.parse(data);
return cachedModels!;
} catch (error) {
console.error("Failed to load models.json:", error);
// Return empty providers object as fallback
return {};
}
}
/**
* Get information about a specific model
*/
export function getModelInfo(modelId: string): ModelInfo | undefined {
const data = loadModels();
// Search through all providers
for (const provider of Object.values(data)) {
if (provider.models && provider.models[modelId]) {
return provider.models[modelId];
}
}
return undefined;
}
/**
* Get all models for a specific provider
*/
export function getProviderModels(providerId: string): ModelInfo[] {
const data = loadModels();
const provider = data[providerId];
if (!provider || !provider.models) {
return [];
}
return Object.values(provider.models);
}
/**
* Get provider information
*/
export function getProviderInfo(providerId: string): ProviderInfo | undefined {
const data = loadModels();
return data[providerId];
}
/**
* Check if a model supports thinking/reasoning
*/
export function supportsThinking(modelId: string): boolean {
const model = getModelInfo(modelId);
return model?.reasoning === true;
}
/**
* Check if a model supports tool calling
*/
export function supportsTools(modelId: string): boolean {
const model = getModelInfo(modelId);
return model?.tool_call === true;
}
/**
* Get all available providers
*/
export function getAllProviders(): ProviderInfo[] {
const data = loadModels();
return Object.values(data);
}

View file

@ -0,0 +1,43 @@
#!/usr/bin/env tsx
import { loadModels, getModelInfo, getProviderModels, getProviderInfo, getAllProviders, supportsThinking, supportsTools } from "../src/models.js";
// Test loading models
console.log("Loading models data...");
const data = loadModels();
const providers = getAllProviders();
console.log(`Loaded ${providers.length} providers\n`);
// Test getting provider info
console.log("OpenAI provider info:");
const openai = getProviderInfo("openai");
if (openai) {
console.log(` Name: ${openai.name}`);
console.log(` NPM: ${openai.npm}`);
console.log(` Models: ${Object.keys(openai.models).length}`);
}
// Test getting a specific model
console.log("\nGetting info for gpt-4o:");
const gpt4o = getModelInfo("gpt-4o");
if (gpt4o) {
console.log(` Name: ${gpt4o.name}`);
console.log(` Context: ${gpt4o.limit?.context}`);
console.log(` Max Output: ${gpt4o.limit?.output}`);
console.log(` Reasoning: ${gpt4o.reasoning}`);
console.log(` Tool Call: ${gpt4o.tool_call}`);
}
// Test getting provider models
console.log("\nOpenAI models:");
const openaiModels = getProviderModels("openai");
console.log(` Found ${openaiModels.length} OpenAI models`);
console.log(` First 5: ${openaiModels.slice(0, 5).map(m => m.id).join(", ")}`);
// Test checking capabilities
console.log("\nModel capabilities:");
console.log(` gpt-4o supports thinking: ${supportsThinking("gpt-4o")}`);
console.log(` gpt-4o supports tools: ${supportsTools("gpt-4o")}`);
console.log(` o1 supports thinking: ${supportsThinking("o1")}`);
console.log(` o1 supports tools: ${supportsTools("o1")}`);
console.log(` claude-3-5-sonnet-20241022 supports tools: ${supportsTools("claude-3-5-sonnet-20241022")}`);