mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-21 18:05:11 +00:00
Fix token counts in google-gemini-cli.ts for anthropic models.
This commit is contained in:
parent
24f89cf070
commit
2a0283ecfd
1 changed files with 5 additions and 2 deletions
|
|
@ -335,12 +335,15 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
|
||||||
}
|
}
|
||||||
|
|
||||||
if (responseData.usageMetadata) {
|
if (responseData.usageMetadata) {
|
||||||
|
// promptTokenCount includes cachedContentTokenCount, so subtract to get fresh input
|
||||||
|
const promptTokens = responseData.usageMetadata.promptTokenCount || 0;
|
||||||
|
const cacheReadTokens = responseData.usageMetadata.cachedContentTokenCount || 0;
|
||||||
output.usage = {
|
output.usage = {
|
||||||
input: responseData.usageMetadata.promptTokenCount || 0,
|
input: promptTokens - cacheReadTokens,
|
||||||
output:
|
output:
|
||||||
(responseData.usageMetadata.candidatesTokenCount || 0) +
|
(responseData.usageMetadata.candidatesTokenCount || 0) +
|
||||||
(responseData.usageMetadata.thoughtsTokenCount || 0),
|
(responseData.usageMetadata.thoughtsTokenCount || 0),
|
||||||
cacheRead: responseData.usageMetadata.cachedContentTokenCount || 0,
|
cacheRead: cacheReadTokens,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
totalTokens: responseData.usageMetadata.totalTokenCount || 0,
|
totalTokens: responseData.usageMetadata.totalTokenCount || 0,
|
||||||
cost: {
|
cost: {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue