Merge pull request #57 from badlogic/feature/footer-cost-dollar-sign

Add $ prefix to cost in footer
This commit is contained in:
Mario Zechner 2025-11-25 21:35:18 +01:00 committed by GitHub
commit a7423b954e
4 changed files with 68 additions and 79 deletions

16
package-lock.json generated
View file

@ -3264,8 +3264,8 @@
"version": "0.9.3",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-ai": "^0.9.2",
"@mariozechner/pi-tui": "^0.9.2"
"@mariozechner/pi-ai": "^0.9.3",
"@mariozechner/pi-tui": "^0.9.3"
},
"devDependencies": {
"@types/node": "^24.3.0",
@ -3331,9 +3331,9 @@
"version": "0.9.3",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent-core": "^0.9.2",
"@mariozechner/pi-ai": "^0.9.2",
"@mariozechner/pi-tui": "^0.9.2",
"@mariozechner/pi-agent-core": "^0.9.3",
"@mariozechner/pi-ai": "^0.9.3",
"@mariozechner/pi-tui": "^0.9.3",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"
@ -3369,7 +3369,7 @@
"version": "0.9.3",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent-core": "^0.9.2",
"@mariozechner/pi-agent-core": "^0.9.3",
"chalk": "^5.5.0"
},
"bin": {
@ -3435,8 +3435,8 @@
"license": "MIT",
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.9.2",
"@mariozechner/pi-tui": "^0.9.2",
"@mariozechner/pi-ai": "^0.9.3",
"@mariozechner/pi-tui": "^0.9.3",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",

View file

@ -109,7 +109,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"claude-opus-4-5": {
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
name: "Claude Opus 4.5 (latest)",
api: "anthropic-messages",
provider: "anthropic",
baseUrl: "https://api.anthropic.com",
@ -2125,7 +2125,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 256000,
maxTokens: 32000,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"moonshotai/kimi-k2-thinking": {
id: "moonshotai/kimi-k2-thinking",
@ -3445,13 +3445,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 1,
output: 4,
input: 0.38,
output: 1.53,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"google/gemini-2.5-flash-lite": {
id: "google/gemini-2.5-flash-lite",
@ -3887,8 +3887,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.15,
output: 0.59,
input: 0.049999999999999996,
output: 0.19999999999999998,
cacheRead: 0,
cacheWrite: 0,
},
@ -4830,9 +4830,9 @@ export const MODELS = {
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -4847,9 +4847,9 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -4915,23 +4915,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen-2.5-7b-instruct": {
id: "qwen/qwen-2.5-7b-instruct",
name: "Qwen: Qwen2.5 7B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.04,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"nvidia/llama-3.1-nemotron-70b-instruct": {
id: "nvidia/llama-3.1-nemotron-70b-instruct",
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
@ -5187,9 +5170,9 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5204,9 +5187,9 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5306,23 +5289,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -5357,6 +5323,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
@ -5476,23 +5459,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
@ -5510,6 +5476,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-small": {
id: "mistralai/mistral-small",
name: "Mistral Small",

View file

@ -1,5 +1,11 @@
# Changelog
## [Unreleased]
### Fixed
- **Footer Cost Display**: Added `$` prefix to cost display in footer. Now shows `$0.078` instead of `0.078`. ([#53](https://github.com/badlogic/pi-mono/issues/53))
## [0.9.3] - 2025-11-24
### Added

View file

@ -85,7 +85,7 @@ export class FooterComponent implements Component {
if (totalOutput) statsParts.push(`${formatTokens(totalOutput)}`);
if (totalCacheRead) statsParts.push(`R${formatTokens(totalCacheRead)}`);
if (totalCacheWrite) statsParts.push(`W${formatTokens(totalCacheWrite)}`);
if (totalCost) statsParts.push(`${totalCost.toFixed(3)}`);
if (totalCost) statsParts.push(`$${totalCost.toFixed(3)}`);
// Colorize context percentage based on usage
let contextPercentStr: string;