chore: bump version to 0.5.41

This commit is contained in:
Mario Zechner 2025-09-18 11:18:02 +02:00
parent b6af05bbf6
commit 293a6e878d
9 changed files with 114 additions and 63 deletions

12
package-lock.json generated
View file

@ -2819,10 +2819,10 @@
},
"packages/agent": {
"name": "@mariozechner/pi-agent",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-tui": "^0.5.39",
"@mariozechner/pi-tui": "^0.5.40",
"@types/glob": "^8.1.0",
"chalk": "^5.5.0",
"glob": "^11.0.3",
@ -3201,7 +3201,7 @@
},
"packages/ai": {
"name": "@mariozechner/pi-ai",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.61.0",
@ -3242,10 +3242,10 @@
},
"packages/pods": {
"name": "@mariozechner/pi",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.5.39",
"@mariozechner/pi-agent": "^0.5.40",
"chalk": "^5.5.0"
},
"bin": {
@ -3258,7 +3258,7 @@
},
"packages/tui": {
"name": "@mariozechner/pi-tui",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.5.40",
"version": "0.5.41",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/pi-agent",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@mariozechner/tui": "^0.1.1",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.5.40",
"version": "0.5.41",
"description": "General-purpose agent with tool calling and session persistence",
"type": "module",
"bin": {
@ -18,7 +18,7 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-tui": "^0.5.40",
"@mariozechner/pi-tui": "^0.5.41",
"@types/glob": "^8.1.0",
"chalk": "^5.5.0",
"glob": "^11.0.3",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-ai",
"version": "0.5.40",
"version": "0.5.41",
"description": "Unified LLM API with automatic model discovery and provider configuration",
"type": "module",
"main": "./dist/index.js",

View file

@ -1413,6 +1413,40 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
},
openrouter: {
"qwen/qwen3-coder-flash": {
id: "qwen/qwen3-coder-flash",
name: "Qwen: Qwen3 Coder Flash",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.3,
output: 1.5,
cacheRead: 0.08,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3-coder-plus": {
id: "qwen/qwen3-coder-plus",
name: "Qwen: Qwen3 Coder Plus",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 5,
cacheRead: 0.09999999999999999,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3-next-80b-a3b-thinking": {
id: "qwen/qwen3-next-80b-a3b-thinking",
name: "Qwen: Qwen3 Next 80B A3B Thinking",
@ -1422,8 +1456,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.09782604,
output: 0.391304304,
input: 0.09999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
@ -1439,8 +1473,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.09782604,
output: 0.391304304,
input: 0.09999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
@ -1456,13 +1490,13 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.15,
output: 0.75,
input: 0.12,
output: 0.6,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen-plus-2025-07-28": {
id: "qwen/qwen-plus-2025-07-28",
@ -1592,8 +1626,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.38043459999999996,
output: 1.52173896,
input: 0.38,
output: 1.52,
cacheRead: 0,
cacheWrite: 0,
},
@ -1643,8 +1677,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.08967387,
output: 0.358695612,
input: 0.08,
output: 0.29,
cacheRead: 0,
cacheWrite: 0,
},
@ -1660,8 +1694,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.127173852,
output: 0.5086955952000001,
input: 0.11,
output: 0.38,
cacheRead: 0,
cacheWrite: 0,
},
@ -1699,7 +1733,7 @@ export const MODELS = {
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
contextWindow: 163840,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"deepseek/deepseek-chat-v3.1": {
@ -1819,7 +1853,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-30b-a3b-instruct-2507": {
id: "qwen/qwen3-30b-a3b-instruct-2507",
@ -1830,8 +1864,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.07065213999999999,
output: 0.282608664,
input: 0.07,
output: 0.28,
cacheRead: 0,
cacheWrite: 0,
},
@ -1847,8 +1881,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.41249980199999997,
output: 1.6499998152000002,
input: 0.41,
output: 1.6500000000000001,
cacheRead: 0,
cacheWrite: 0,
},
@ -1898,8 +1932,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.0974999532,
output: 0.38999995632,
input: 0.09999999999999999,
output: 0.39,
cacheRead: 0,
cacheWrite: 0,
},
@ -1949,8 +1983,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.24999987999999998,
output: 0.999999888,
input: 0.22,
output: 0.95,
cacheRead: 0,
cacheWrite: 0,
},
@ -1966,13 +2000,13 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.0974999532,
output: 0.38999995632,
input: 0.09999999999999999,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 4096,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"moonshotai/kimi-k2:free": {
id: "moonshotai/kimi-k2:free",
@ -2170,8 +2204,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.24999987999999998,
output: 0.999999888,
input: 0.39999999999999997,
output: 1.75,
cacheRead: 0,
cacheWrite: 0,
},
@ -2204,8 +2238,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.035869548,
output: 0.14347824480000002,
input: 0.04,
output: 0.14,
cacheRead: 0,
cacheWrite: 0,
},
@ -2306,8 +2340,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.035869548,
output: 0.14347824480000002,
input: 0.06,
output: 0.22,
cacheRead: 0,
cacheWrite: 0,
},
@ -2340,8 +2374,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.0322825932,
output: 0.12913042032,
input: 0.03,
output: 0.13,
cacheRead: 0,
cacheWrite: 0,
},
@ -2510,14 +2544,31 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.03804346,
output: 0.152173896,
input: 0.04,
output: 0.15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 96000,
} satisfies Model<"openai-completions">,
"microsoft/phi-4-multimodal-instruct": {
id: "microsoft/phi-4-multimodal-instruct",
name: "Microsoft: Phi 4 Multimodal Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.049999999999999996,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwq-32b": {
id: "qwen/qwq-32b",
name: "Qwen: QwQ 32B",
@ -2612,8 +2663,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.03804346,
output: 0.152173896,
input: 0.04,
output: 0.15,
cacheRead: 0,
cacheWrite: 0,
},
@ -2629,8 +2680,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.03260868,
output: 0.130434768,
input: 0.03,
output: 0.13,
cacheRead: 0,
cacheWrite: 0,
},
@ -2918,8 +2969,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.06521736,
output: 0.260869536,
input: 0.07,
output: 0.26,
cacheRead: 0,
cacheWrite: 0,
},
@ -3071,13 +3122,13 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.017934774,
output: 0.07173912240000001,
input: 0.02,
output: 0.04,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"mistralai/mistral-7b-instruct:free": {
id: "mistralai/mistral-7b-instruct:free",

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/pi",
"version": "0.5.40",
"version": "0.5.41",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/pi",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@ai-sdk/openai": "^2.0.5",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi",
"version": "0.5.40",
"version": "0.5.41",
"description": "CLI tool for managing vLLM deployments on GPU pods",
"type": "module",
"bin": {
@ -34,7 +34,7 @@
"node": ">=20.0.0"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.5.40",
"@mariozechner/pi-agent": "^0.5.41",
"chalk": "^5.5.0"
},
"devDependencies": {}

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/tui",
"version": "0.5.40",
"version": "0.5.41",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/tui",
"version": "0.5.40",
"version": "0.5.41",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-tui",
"version": "0.5.40",
"version": "0.5.41",
"description": "Terminal User Interface library with differential rendering for efficient text-based applications",
"type": "module",
"main": "dist/index.js",