mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-21 01:01:42 +00:00
feat(ai): Add cost tracking to LLM implementations
- Track input/output token costs for all providers - Calculate costs based on Model pricing information - Include cost information in AssistantMessage responses - Add Usage interface with detailed cost breakdown - Implement calculateCost utility function for cost calculations
This commit is contained in:
parent
f9d688d577
commit
550da5e47c
6 changed files with 61 additions and 14 deletions
|
|
@ -3,7 +3,7 @@ import { AnthropicLLM } from "./providers/anthropic.js";
|
|||
import { GoogleLLM } from "./providers/google.js";
|
||||
import { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
||||
import { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
||||
import type { Model } from "./types.js";
|
||||
import type { Model, Usage } from "./types.js";
|
||||
|
||||
// Provider configuration with factory functions
|
||||
export const PROVIDER_CONFIG = {
|
||||
|
|
@ -102,5 +102,14 @@ export function getModel<P extends keyof typeof PROVIDERS>(
|
|||
return models[modelId as string];
|
||||
}
|
||||
|
||||
export function calculateCost(model: Model, usage: Usage) {
|
||||
usage.cost.input = (model.cost.input / 1000000) * usage.input;
|
||||
usage.cost.output = (model.cost.output / 1000000) * usage.output;
|
||||
usage.cost.cacheRead = (model.cost.cacheRead / 1000000) * usage.cacheRead;
|
||||
usage.cost.cacheWrite = (model.cost.cacheWrite / 1000000) * usage.cacheWrite;
|
||||
usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
|
||||
return usage.cost;
|
||||
}
|
||||
|
||||
// Re-export Model type for convenience
|
||||
export type { Model };
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue