mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-20 07:04:34 +00:00
refactor(ai): Implement unified model system with type-safe createLLM
- Add Model interface to types.ts with normalized structure - Create type-safe generic createLLM function with provider-specific model constraints - Generate models from OpenRouter API and models.dev data - Strip provider prefixes for direct providers (google, openai, anthropic, xai) - Keep full model IDs for OpenRouter-proxied models - Clean separation: types.ts (Model interface), models.ts (factory logic), models.generated.ts (data) - Remove old model scripts and unused dependencies - Rename GeminiLLM to GoogleLLM for consistency - Add tests for new providers (xAI, Groq, Cerebras, OpenRouter) - Support 181 tool-capable models across 7 providers with full type safety
This commit is contained in:
parent
3f36051bc6
commit
c7618db3f7
8 changed files with 409 additions and 418 deletions
|
|
@ -3,41 +3,30 @@
|
|||
|
||||
export const version = "0.5.8";
|
||||
|
||||
// Export generated models and factory
|
||||
// Export generated models data
|
||||
export { PROVIDERS } from "./models.generated.js";
|
||||
|
||||
// Export models utilities and types
|
||||
export {
|
||||
ANTHROPIC_MODELS,
|
||||
type AnthropicModel,
|
||||
type CreateLLMOptions,
|
||||
type CerebrasModel,
|
||||
createLLM,
|
||||
GEMINI_MODELS,
|
||||
type GeminiModel,
|
||||
type ModelData,
|
||||
OPENAI_COMPATIBLE_PROVIDERS,
|
||||
OPENAI_MODELS,
|
||||
type OpenAICompatibleProvider,
|
||||
type GoogleModel,
|
||||
type GroqModel,
|
||||
type Model,
|
||||
type OpenAIModel,
|
||||
type ProviderData,
|
||||
} from "./models.generated.js";
|
||||
// Export models utilities
|
||||
export {
|
||||
getAllProviders,
|
||||
getModelInfo,
|
||||
getProviderInfo,
|
||||
getProviderModels,
|
||||
loadModels,
|
||||
type ModalityInput,
|
||||
type ModalityOutput,
|
||||
type ModelInfo,
|
||||
type ModelsData,
|
||||
type ProviderInfo,
|
||||
supportsThinking,
|
||||
supportsTools,
|
||||
type OpenRouterModel,
|
||||
PROVIDER_CONFIG,
|
||||
type ProviderModels,
|
||||
type ProviderToLLM,
|
||||
type XAIModel,
|
||||
} from "./models.js";
|
||||
|
||||
// Export providers
|
||||
export { AnthropicLLM } from "./providers/anthropic.js";
|
||||
export { GeminiLLM } from "./providers/gemini.js";
|
||||
export { GoogleLLM } from "./providers/gemini.js";
|
||||
export { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
||||
export { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
||||
|
||||
// Export types
|
||||
export type * from "./types.js";
|
||||
|
|
|
|||
|
|
@ -1,133 +1,97 @@
|
|||
import { readFileSync } from "fs";
|
||||
import { dirname, join } from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
import { PROVIDERS } from "./models.generated.js";
|
||||
import { AnthropicLLM } from "./providers/anthropic.js";
|
||||
import { GoogleLLM } from "./providers/gemini.js";
|
||||
import { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
||||
import { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
||||
import type { Model } from "./types.js";
|
||||
|
||||
export type ModalityInput = "text" | "image" | "audio" | "video" | "pdf";
|
||||
export type ModalityOutput = "text" | "image" | "audio";
|
||||
// Provider configuration with factory functions
|
||||
export const PROVIDER_CONFIG = {
|
||||
google: {
|
||||
envKey: "GEMINI_API_KEY",
|
||||
create: (model: string, apiKey: string) => new GoogleLLM(model, apiKey),
|
||||
},
|
||||
openai: {
|
||||
envKey: "OPENAI_API_KEY",
|
||||
create: (model: string, apiKey: string) => new OpenAIResponsesLLM(model, apiKey),
|
||||
},
|
||||
anthropic: {
|
||||
envKey: "ANTHROPIC_API_KEY",
|
||||
create: (model: string, apiKey: string) => new AnthropicLLM(model, apiKey),
|
||||
},
|
||||
xai: {
|
||||
envKey: "XAI_API_KEY",
|
||||
create: (model: string, apiKey: string) => new OpenAICompletionsLLM(model, apiKey, "https://api.x.ai/v1"),
|
||||
},
|
||||
groq: {
|
||||
envKey: "GROQ_API_KEY",
|
||||
create: (model: string, apiKey: string) =>
|
||||
new OpenAICompletionsLLM(model, apiKey, "https://api.groq.com/openai/v1"),
|
||||
},
|
||||
cerebras: {
|
||||
envKey: "CEREBRAS_API_KEY",
|
||||
create: (model: string, apiKey: string) => new OpenAICompletionsLLM(model, apiKey, "https://api.cerebras.ai/v1"),
|
||||
},
|
||||
openrouter: {
|
||||
envKey: "OPENROUTER_API_KEY",
|
||||
create: (model: string, apiKey: string) =>
|
||||
new OpenAICompletionsLLM(model, apiKey, "https://openrouter.ai/api/v1"),
|
||||
},
|
||||
} as const;
|
||||
|
||||
export interface ModelInfo {
|
||||
id: string;
|
||||
name: string;
|
||||
attachment: boolean;
|
||||
reasoning: boolean;
|
||||
temperature: boolean;
|
||||
tool_call: boolean;
|
||||
release_date: string;
|
||||
last_updated: string;
|
||||
modalities: {
|
||||
input: ModalityInput[];
|
||||
output: ModalityOutput[];
|
||||
};
|
||||
open_weights: boolean;
|
||||
limit: {
|
||||
context: number;
|
||||
output: number;
|
||||
};
|
||||
knowledge?: string; // Optional - knowledge cutoff date
|
||||
cost?: {
|
||||
input: number;
|
||||
output: number;
|
||||
cache_read?: number;
|
||||
cache_write?: number;
|
||||
};
|
||||
// Type mapping from provider to LLM implementation
|
||||
export type ProviderToLLM = {
|
||||
google: GoogleLLM;
|
||||
openai: OpenAIResponsesLLM;
|
||||
anthropic: AnthropicLLM;
|
||||
xai: OpenAICompletionsLLM;
|
||||
groq: OpenAICompletionsLLM;
|
||||
cerebras: OpenAICompletionsLLM;
|
||||
openrouter: OpenAICompletionsLLM;
|
||||
};
|
||||
|
||||
// Extract model types for each provider
|
||||
export type GoogleModel = keyof typeof PROVIDERS.google.models;
|
||||
export type OpenAIModel = keyof typeof PROVIDERS.openai.models;
|
||||
export type AnthropicModel = keyof typeof PROVIDERS.anthropic.models;
|
||||
export type XAIModel = keyof typeof PROVIDERS.xai.models;
|
||||
export type GroqModel = keyof typeof PROVIDERS.groq.models;
|
||||
export type CerebrasModel = keyof typeof PROVIDERS.cerebras.models;
|
||||
export type OpenRouterModel = keyof typeof PROVIDERS.openrouter.models;
|
||||
|
||||
// Map providers to their model types
|
||||
export type ProviderModels = {
|
||||
google: GoogleModel;
|
||||
openai: OpenAIModel;
|
||||
anthropic: AnthropicModel;
|
||||
xai: XAIModel;
|
||||
groq: GroqModel;
|
||||
cerebras: CerebrasModel;
|
||||
openrouter: OpenRouterModel;
|
||||
};
|
||||
|
||||
// Single generic factory function
|
||||
export function createLLM<P extends keyof typeof PROVIDERS, M extends keyof (typeof PROVIDERS)[P]["models"]>(
|
||||
provider: P,
|
||||
model: M,
|
||||
apiKey?: string,
|
||||
): ProviderToLLM[P] {
|
||||
const config = PROVIDER_CONFIG[provider as keyof typeof PROVIDER_CONFIG];
|
||||
if (!config) throw new Error(`Unknown provider: ${provider}`);
|
||||
|
||||
const providerData = PROVIDERS[provider];
|
||||
if (!providerData) throw new Error(`Unknown provider: ${provider}`);
|
||||
|
||||
// Type-safe model lookup
|
||||
const models = providerData.models as Record<string, Model>;
|
||||
const modelData = models[model as string];
|
||||
if (!modelData) throw new Error(`Unknown model: ${String(model)} for provider ${provider}`);
|
||||
|
||||
const key = apiKey || process.env[config.envKey];
|
||||
if (!key) throw new Error(`No API key provided for ${provider}. Set ${config.envKey} or pass apiKey.`);
|
||||
|
||||
return config.create(model as string, key) as ProviderToLLM[P];
|
||||
}
|
||||
|
||||
export interface ProviderInfo {
|
||||
id: string;
|
||||
env?: string[];
|
||||
npm?: string;
|
||||
api?: string;
|
||||
name: string;
|
||||
doc?: string;
|
||||
models: Record<string, ModelInfo>;
|
||||
}
|
||||
|
||||
export type ModelsData = Record<string, ProviderInfo>;
|
||||
|
||||
let cachedModels: ModelsData | null = null;
|
||||
|
||||
/**
|
||||
* Load models data from models.json
|
||||
* The file is loaded relative to this module's location
|
||||
*/
|
||||
export function loadModels(): ModelsData {
|
||||
if (cachedModels) {
|
||||
return cachedModels;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the directory of this module
|
||||
const currentDir = dirname(fileURLToPath(import.meta.url));
|
||||
const modelsPath = join(currentDir, "models.json");
|
||||
|
||||
const data = readFileSync(modelsPath, "utf-8");
|
||||
cachedModels = JSON.parse(data);
|
||||
return cachedModels!;
|
||||
} catch (error) {
|
||||
console.error("Failed to load models.json:", error);
|
||||
// Return empty providers object as fallback
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about a specific model
|
||||
*/
|
||||
export function getModelInfo(modelId: string): ModelInfo | undefined {
|
||||
const data = loadModels();
|
||||
|
||||
// Search through all providers
|
||||
for (const provider of Object.values(data)) {
|
||||
if (provider.models && provider.models[modelId]) {
|
||||
return provider.models[modelId];
|
||||
}
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all models for a specific provider
|
||||
*/
|
||||
export function getProviderModels(providerId: string): ModelInfo[] {
|
||||
const data = loadModels();
|
||||
const provider = data[providerId];
|
||||
|
||||
if (!provider || !provider.models) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return Object.values(provider.models);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider information
|
||||
*/
|
||||
export function getProviderInfo(providerId: string): ProviderInfo | undefined {
|
||||
const data = loadModels();
|
||||
return data[providerId];
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model supports thinking/reasoning
|
||||
*/
|
||||
export function supportsThinking(modelId: string): boolean {
|
||||
const model = getModelInfo(modelId);
|
||||
return model?.reasoning === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model supports tool calling
|
||||
*/
|
||||
export function supportsTools(modelId: string): boolean {
|
||||
const model = getModelInfo(modelId);
|
||||
return model?.tool_call === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available providers
|
||||
*/
|
||||
export function getAllProviders(): ProviderInfo[] {
|
||||
const data = loadModels();
|
||||
return Object.values(data);
|
||||
}
|
||||
// Re-export Model type for convenience
|
||||
export type { Model };
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import type {
|
|||
ToolCall,
|
||||
} from "../types.js";
|
||||
|
||||
export interface GeminiLLMOptions extends LLMOptions {
|
||||
export interface GoogleLLMOptions extends LLMOptions {
|
||||
toolChoice?: "auto" | "none" | "any";
|
||||
thinking?: {
|
||||
enabled: boolean;
|
||||
|
|
@ -25,7 +25,7 @@ export interface GeminiLLMOptions extends LLMOptions {
|
|||
};
|
||||
}
|
||||
|
||||
export class GeminiLLM implements LLM<GeminiLLMOptions> {
|
||||
export class GoogleLLM implements LLM<GoogleLLMOptions> {
|
||||
private client: GoogleGenAI;
|
||||
private model: string;
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ export class GeminiLLM implements LLM<GeminiLLMOptions> {
|
|||
this.model = model;
|
||||
}
|
||||
|
||||
async complete(context: Context, options?: GeminiLLMOptions): Promise<AssistantMessage> {
|
||||
async complete(context: Context, options?: GoogleLLMOptions): Promise<AssistantMessage> {
|
||||
try {
|
||||
const contents = this.convertMessages(context.messages);
|
||||
|
||||
|
|
|
|||
|
|
@ -106,3 +106,20 @@ export interface TokenUsage {
|
|||
}
|
||||
|
||||
export type StopReason = "stop" | "length" | "toolUse" | "safety" | "error";
|
||||
|
||||
// Model interface for the unified model system
|
||||
export interface Model {
|
||||
id: string;
|
||||
name: string;
|
||||
provider: string;
|
||||
reasoning: boolean;
|
||||
input: ("text" | "image")[];
|
||||
cost: {
|
||||
input: number; // $/million tokens
|
||||
output: number; // $/million tokens
|
||||
cacheRead: number; // $/million tokens
|
||||
cacheWrite: number; // $/million tokens
|
||||
};
|
||||
contextWindow: number;
|
||||
maxTokens: number;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue