mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-18 12:03:03 +00:00
Fix token statistics on abort for Anthropic provider
- Add handling for message_start event to capture initial token usage - Fix message_delta to use assignment (=) instead of addition (+=) since Anthropic sends cumulative token counts, not incremental - Add comprehensive tests for all providers (Google, OpenAI Completions, OpenAI Responses, Anthropic) - Document OpenAI limitation: token stats only available at stream end Fixes issue where aborted streams had zero token counts despite Anthropic sending input tokens in the initial message_start event.
This commit is contained in:
parent
23be934a9a
commit
bc8d994a7b
3 changed files with 161 additions and 73 deletions
|
|
@ -3810,23 +3810,6 @@ export const MODELS = {
|
|||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-08-2024": {
|
||||
id: "cohere/command-r-08-2024",
|
||||
name: "Cohere: Command R (08-2024)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.15,
|
||||
output: 0.6,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-plus-08-2024": {
|
||||
id: "cohere/command-r-plus-08-2024",
|
||||
name: "Cohere: Command R+ (08-2024)",
|
||||
|
|
@ -3844,6 +3827,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-08-2024": {
|
||||
id: "cohere/command-r-08-2024",
|
||||
name: "Cohere: Command R (08-2024)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.15,
|
||||
output: 0.6,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"sao10k/l3.1-euryale-70b": {
|
||||
id: "sao10k/l3.1-euryale-70b",
|
||||
name: "Sao10K: Llama 3.1 Euryale 70B v2.2",
|
||||
|
|
@ -3912,23 +3912,6 @@ export const MODELS = {
|
|||
contextWindow: 16384,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-70b-instruct": {
|
||||
id: "meta-llama/llama-3.1-70b-instruct",
|
||||
name: "Meta: Llama 3.1 70B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.39999999999999997,
|
||||
output: 0.39999999999999997,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-405b-instruct": {
|
||||
id: "meta-llama/llama-3.1-405b-instruct",
|
||||
name: "Meta: Llama 3.1 405B Instruct",
|
||||
|
|
@ -3946,6 +3929,23 @@ export const MODELS = {
|
|||
contextWindow: 32768,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-70b-instruct": {
|
||||
id: "meta-llama/llama-3.1-70b-instruct",
|
||||
name: "Meta: Llama 3.1 70B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.39999999999999997,
|
||||
output: 0.39999999999999997,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-nemo": {
|
||||
id: "mistralai/mistral-nemo",
|
||||
name: "Mistral: Mistral Nemo",
|
||||
|
|
@ -4065,23 +4065,6 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-70b-instruct": {
|
||||
id: "meta-llama/llama-3-70b-instruct",
|
||||
name: "Meta: Llama 3 70B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.3,
|
||||
output: 0.39999999999999997,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-8b-instruct": {
|
||||
id: "meta-llama/llama-3-8b-instruct",
|
||||
name: "Meta: Llama 3 8B Instruct",
|
||||
|
|
@ -4099,6 +4082,23 @@ export const MODELS = {
|
|||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-70b-instruct": {
|
||||
id: "meta-llama/llama-3-70b-instruct",
|
||||
name: "Meta: Llama 3 70B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.3,
|
||||
output: 0.39999999999999997,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mixtral-8x22b-instruct": {
|
||||
id: "mistralai/mixtral-8x22b-instruct",
|
||||
name: "Mistral: Mixtral 8x22B Instruct",
|
||||
|
|
@ -4133,23 +4133,6 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-tiny": {
|
||||
id: "mistralai/mistral-tiny",
|
||||
name: "Mistral Tiny",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.25,
|
||||
output: 0.25,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-small": {
|
||||
id: "mistralai/mistral-small",
|
||||
name: "Mistral Small",
|
||||
|
|
@ -4167,6 +4150,23 @@ export const MODELS = {
|
|||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-tiny": {
|
||||
id: "mistralai/mistral-tiny",
|
||||
name: "Mistral Tiny",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.25,
|
||||
output: 0.25,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mixtral-8x7b-instruct": {
|
||||
id: "mistralai/mixtral-8x7b-instruct",
|
||||
name: "Mistral: Mixtral 8x7B Instruct",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue