feat(ai): Add cost tracking to LLM implementations

- Track input/output token costs for all providers
- Calculate costs based on Model pricing information
- Include cost information in AssistantMessage responses
- Add Usage interface with detailed cost breakdown
- Implement calculateCost utility function for cost calculations
This commit is contained in:
Mario Zechner 2025-08-30 00:45:08 +02:00
parent f9d688d577
commit 550da5e47c
6 changed files with 61 additions and 14 deletions

View file

@ -5,6 +5,7 @@ import type {
MessageParam,
Tool,
} from "@anthropic-ai/sdk/resources/messages.js";
import { calculateCost } from "../models.js";
import type {
AssistantMessage,
Context,
@ -13,8 +14,8 @@ import type {
Message,
Model,
StopReason,
TokenUsage,
ToolCall,
Usage,
} from "../types.js";
export interface AnthropicLLMOptions extends LLMOptions {
@ -186,13 +187,20 @@ export class AnthropicLLM implements LLM<AnthropicLLMOptions> {
name: block.name,
arguments: block.input as Record<string, any>,
}));
const usage: TokenUsage = {
const usage: Usage = {
input: msg.usage.input_tokens,
output: msg.usage.output_tokens,
cacheRead: msg.usage.cache_read_input_tokens || 0,
cacheWrite: msg.usage.cache_creation_input_tokens || 0,
// TODO add cost
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
};
calculateCost(this.modelInfo, usage);
return {
role: "assistant",
@ -215,6 +223,7 @@ export class AnthropicLLM implements LLM<AnthropicLLMOptions> {
output: 0,
cacheRead: 0,
cacheWrite: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "error",
error: error instanceof Error ? error.message : String(error),