diff --git a/AGENTS.md b/AGENTS.md index ebdfa155..59f79d36 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -21,6 +21,13 @@ read README.md, then ask which module(s) to work on. Based on the answer, read t - NEVER run: `npm run dev`, `npm run build` - NEVER commit unless user asks +## GitHub Issues + +When creating issues: +- Add `pkg:*` labels to indicate which package(s) the issue affects + - Available labels: `pkg:agent`, `pkg:ai`, `pkg:coding-agent`, `pkg:mom`, `pkg:pods`, `pkg:proxy`, `pkg:tui`, `pkg:web-ui` +- If an issue spans multiple packages, add all relevant labels + ## Tools - GitHub CLI for issues/PRs - Add package labels to issues/PRs: pkg:agent, pkg:ai, pkg:coding-agent, pkg:mom, pkg:pods, pkg:proxy, pkg:tui, pkg:web-ui diff --git a/package-lock.json b/package-lock.json index ea12a3a4..79276b3a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6074,11 +6074,11 @@ }, "packages/agent": { "name": "@mariozechner/pi-agent-core", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { - "@mariozechner/pi-ai": "^0.11.0", - "@mariozechner/pi-tui": "^0.11.0" + "@mariozechner/pi-ai": "^0.11.1", + "@mariozechner/pi-tui": "^0.11.1" }, "devDependencies": { "@types/node": "^24.3.0", @@ -6108,7 +6108,7 @@ }, "packages/ai": { "name": "@mariozechner/pi-ai", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { "@anthropic-ai/sdk": "^0.61.0", @@ -6149,12 +6149,12 @@ }, "packages/coding-agent": { "name": "@mariozechner/pi-coding-agent", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { - "@mariozechner/pi-agent-core": "^0.11.0", - "@mariozechner/pi-ai": "^0.11.0", - "@mariozechner/pi-tui": "^0.11.0", + "@mariozechner/pi-agent-core": "^0.11.1", + "@mariozechner/pi-ai": "^0.11.1", + "@mariozechner/pi-tui": "^0.11.1", "chalk": "^5.5.0", "diff": "^8.0.2", "glob": "^11.0.3" @@ -6191,12 +6191,12 @@ }, "packages/mom": { "name": "@mariozechner/pi-mom", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { "@anthropic-ai/sandbox-runtime": "^0.0.16", - "@mariozechner/pi-agent-core": "^0.11.0", - "@mariozechner/pi-ai": "^0.11.0", + "@mariozechner/pi-agent-core": "^0.11.1", + "@mariozechner/pi-ai": "^0.11.1", "@sinclair/typebox": "^0.34.0", "@slack/socket-mode": "^2.0.0", "@slack/web-api": "^7.0.0", @@ -6234,10 +6234,10 @@ }, "packages/pods": { "name": "@mariozechner/pi", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { - "@mariozechner/pi-agent-core": "^0.11.0", + "@mariozechner/pi-agent-core": "^0.11.1", "chalk": "^5.5.0" }, "bin": { @@ -6250,7 +6250,7 @@ }, "packages/proxy": { "name": "@mariozechner/pi-proxy", - "version": "0.11.1", + "version": "0.11.2", "dependencies": { "@hono/node-server": "^1.14.0", "hono": "^4.6.16" @@ -6266,7 +6266,7 @@ }, "packages/tui": { "name": "@mariozechner/pi-tui", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { "@types/mime-types": "^2.1.4", @@ -6310,12 +6310,12 @@ }, "packages/web-ui": { "name": "@mariozechner/pi-web-ui", - "version": "0.11.1", + "version": "0.11.2", "license": "MIT", "dependencies": { "@lmstudio/sdk": "^1.5.0", - "@mariozechner/pi-ai": "^0.11.0", - "@mariozechner/pi-tui": "^0.11.0", + "@mariozechner/pi-ai": "^0.11.1", + "@mariozechner/pi-tui": "^0.11.1", "docx-preview": "^0.3.7", "jszip": "^3.10.1", "lucide": "^0.544.0", diff --git a/packages/agent/package.json b/packages/agent/package.json index 9f088db1..aa3245ae 100644 --- a/packages/agent/package.json +++ b/packages/agent/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-agent-core", - "version": "0.11.1", + "version": "0.11.2", "description": "General-purpose agent with transport abstraction, state management, and attachment support", "type": "module", "main": "./dist/index.js", @@ -18,8 +18,8 @@ "prepublishOnly": "npm run clean && npm run build" }, "dependencies": { - "@mariozechner/pi-ai": "^0.11.1", - "@mariozechner/pi-tui": "^0.11.1" + "@mariozechner/pi-ai": "^0.11.2", + "@mariozechner/pi-tui": "^0.11.2" }, "keywords": [ "ai", diff --git a/packages/ai/package.json b/packages/ai/package.json index 56f1e24a..3995e4ac 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-ai", - "version": "0.11.1", + "version": "0.11.2", "description": "Unified LLM API with automatic model discovery and provider configuration", "type": "module", "main": "./dist/index.js", diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index b5fe849f..ff5191d3 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -3158,7 +3158,7 @@ export const MODELS = { cost: { input: 0.48, output: 1.44, - cacheRead: 0.11, + cacheRead: 0.088, cacheWrite: 0, }, contextWindow: 65536, @@ -4314,7 +4314,7 @@ export const MODELS = { cost: { input: 0.19999999999999998, output: 0.88, - cacheRead: 0, + cacheRead: 0.135, cacheWrite: 0, }, contextWindow: 163840, @@ -5034,23 +5034,6 @@ export const MODELS = { contextWindow: 32768, maxTokens: 4096, } satisfies Model<"openai-completions">, - "cohere/command-r-08-2024": { - id: "cohere/command-r-08-2024", - name: "Cohere: Command R (08-2024)", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.15, - output: 0.6, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 4000, - } satisfies Model<"openai-completions">, "cohere/command-r-plus-08-2024": { id: "cohere/command-r-plus-08-2024", name: "Cohere: Command R+ (08-2024)", @@ -5068,6 +5051,23 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4000, } satisfies Model<"openai-completions">, + "cohere/command-r-08-2024": { + id: "cohere/command-r-08-2024", + name: "Cohere: Command R (08-2024)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"openai-completions">, "sao10k/l3.1-euryale-70b": { id: "sao10k/l3.1-euryale-70b", name: "Sao10K: Llama 3.1 Euryale 70B v2.2", @@ -5187,9 +5187,9 @@ export const MODELS = { contextWindow: 131072, maxTokens: 16384, } satisfies Model<"openai-completions">, - "openai/gpt-4o-mini-2024-07-18": { - id: "openai/gpt-4o-mini-2024-07-18", - name: "OpenAI: GPT-4o-mini (2024-07-18)", + "openai/gpt-4o-mini": { + id: "openai/gpt-4o-mini", + name: "OpenAI: GPT-4o-mini", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", @@ -5204,9 +5204,9 @@ export const MODELS = { contextWindow: 128000, maxTokens: 16384, } satisfies Model<"openai-completions">, - "openai/gpt-4o-mini": { - id: "openai/gpt-4o-mini", - name: "OpenAI: GPT-4o-mini", + "openai/gpt-4o-mini-2024-07-18": { + id: "openai/gpt-4o-mini-2024-07-18", + name: "OpenAI: GPT-4o-mini (2024-07-18)", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", @@ -5306,23 +5306,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4o-2024-05-13": { - id: "openai/gpt-4o-2024-05-13", - name: "OpenAI: GPT-4o (2024-05-13)", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text", "image"], - cost: { - input: 5, - output: 15, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-4o": { id: "openai/gpt-4o", name: "OpenAI: GPT-4o", @@ -5357,22 +5340,22 @@ export const MODELS = { contextWindow: 128000, maxTokens: 64000, } satisfies Model<"openai-completions">, - "meta-llama/llama-3-70b-instruct": { - id: "meta-llama/llama-3-70b-instruct", - name: "Meta: Llama 3 70B Instruct", + "openai/gpt-4o-2024-05-13": { + id: "openai/gpt-4o-2024-05-13", + name: "OpenAI: GPT-4o (2024-05-13)", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, - input: ["text"], + input: ["text", "image"], cost: { - input: 0.3, - output: 0.39999999999999997, + input: 5, + output: 15, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 8192, - maxTokens: 16384, + contextWindow: 128000, + maxTokens: 4096, } satisfies Model<"openai-completions">, "meta-llama/llama-3-8b-instruct": { id: "meta-llama/llama-3-8b-instruct", @@ -5391,6 +5374,23 @@ export const MODELS = { contextWindow: 8192, maxTokens: 16384, } satisfies Model<"openai-completions">, + "meta-llama/llama-3-70b-instruct": { + id: "meta-llama/llama-3-70b-instruct", + name: "Meta: Llama 3 70B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 16384, + } satisfies Model<"openai-completions">, "mistralai/mixtral-8x22b-instruct": { id: "mistralai/mixtral-8x22b-instruct", name: "Mistral: Mixtral 8x22B Instruct", @@ -5476,23 +5476,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-3.5-turbo-0613": { - id: "openai/gpt-3.5-turbo-0613", - name: "OpenAI: GPT-3.5 Turbo (older v0613)", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 1, - output: 2, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 4095, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-4-turbo-preview": { id: "openai/gpt-4-turbo-preview", name: "OpenAI: GPT-4 Turbo Preview", @@ -5510,6 +5493,23 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, + "openai/gpt-3.5-turbo-0613": { + id: "openai/gpt-3.5-turbo-0613", + name: "OpenAI: GPT-3.5 Turbo (older v0613)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 4095, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "mistralai/mistral-small": { id: "mistralai/mistral-small", name: "Mistral Small", @@ -5612,23 +5612,6 @@ export const MODELS = { contextWindow: 8191, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4": { - id: "openai/gpt-4", - name: "OpenAI: GPT-4", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 30, - output: 60, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 8191, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-3.5-turbo": { id: "openai/gpt-3.5-turbo", name: "OpenAI: GPT-3.5 Turbo", @@ -5646,6 +5629,23 @@ export const MODELS = { contextWindow: 16385, maxTokens: 4096, } satisfies Model<"openai-completions">, + "openai/gpt-4": { + id: "openai/gpt-4", + name: "OpenAI: GPT-4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8191, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "openrouter/auto": { id: "openrouter/auto", name: "OpenRouter: Auto Router", diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index 90c6205c..9ea5f59f 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## [0.11.2] - 2025-12-01 + +### Fixed + +- **RPC Mode Session Management**: Fixed session files not being saved in RPC mode (`--mode rpc`). Since version 0.9.0, the `agent.subscribe()` call with session management logic was only present in the TUI renderer, causing RPC mode to skip saving messages to session files. RPC mode now properly saves sessions just like interactive mode. ([#83](https://github.com/badlogic/pi-mono/issues/83)) + ## [0.11.1] - 2025-11-29 ### Added diff --git a/packages/coding-agent/package.json b/packages/coding-agent/package.json index f6432a3e..6bd4334b 100644 --- a/packages/coding-agent/package.json +++ b/packages/coding-agent/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-coding-agent", - "version": "0.11.1", + "version": "0.11.2", "description": "Coding agent CLI with read, bash, edit, write tools and session management", "type": "module", "bin": { @@ -22,9 +22,9 @@ "prepublishOnly": "npm run clean && npm run build" }, "dependencies": { - "@mariozechner/pi-agent-core": "^0.11.1", - "@mariozechner/pi-ai": "^0.11.1", - "@mariozechner/pi-tui": "^0.11.1", + "@mariozechner/pi-agent-core": "^0.11.2", + "@mariozechner/pi-ai": "^0.11.2", + "@mariozechner/pi-tui": "^0.11.2", "chalk": "^5.5.0", "diff": "^8.0.2", "glob": "^11.0.3" diff --git a/packages/coding-agent/src/main.ts b/packages/coding-agent/src/main.ts index 1a9541ae..a303ca93 100644 --- a/packages/coding-agent/src/main.ts +++ b/packages/coding-agent/src/main.ts @@ -806,10 +806,24 @@ async function runSingleShotMode( } } -async function runRpcMode(agent: Agent, _sessionManager: SessionManager): Promise { - // Subscribe to all events and output as JSON - agent.subscribe((event) => { +async function runRpcMode(agent: Agent, sessionManager: SessionManager): Promise { + // Subscribe to all events and output as JSON (same pattern as tui-renderer) + agent.subscribe(async (event) => { console.log(JSON.stringify(event)); + + // Save messages to session + if (event.type === "message_end") { + sessionManager.saveMessage(event.message); + + // Yield to microtask queue to allow agent state to update + // (tui-renderer does this implicitly via await handleEvent) + await Promise.resolve(); + + // Check if we should initialize session now (after first user+assistant exchange) + if (sessionManager.shouldInitializeSession(agent.state.messages)) { + sessionManager.startSession(agent.state); + } + } }); // Listen for JSON input on stdin diff --git a/packages/coding-agent/test/rpc.test.ts b/packages/coding-agent/test/rpc.test.ts new file mode 100644 index 00000000..f70dd868 --- /dev/null +++ b/packages/coding-agent/test/rpc.test.ts @@ -0,0 +1,134 @@ +import { type ChildProcess, spawn } from "node:child_process"; +import { existsSync, readdirSync, readFileSync, rmSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { dirname, join } from "node:path"; +import * as readline from "node:readline"; +import { fileURLToPath } from "node:url"; +import type { AgentEvent } from "@mariozechner/pi-agent-core"; +import { afterEach, beforeEach, describe, expect, test } from "vitest"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +/** + * RPC mode tests. + * Regression test for issue #83: https://github.com/badlogic/pi-mono/issues/83 + */ +describe("RPC mode", () => { + let agent: ChildProcess; + let sessionDir: string; + + beforeEach(() => { + // Create a unique temp directory for sessions + sessionDir = join(tmpdir(), `pi-rpc-test-${Date.now()}`); + }); + + afterEach(() => { + // Kill the agent if still running + if (agent && !agent.killed) { + agent.kill("SIGKILL"); + } + // Clean up session directory + if (sessionDir && existsSync(sessionDir)) { + rmSync(sessionDir, { recursive: true }); + } + }); + + test("should save messages to session file", async () => { + // Spawn agent in RPC mode with custom session directory + agent = spawn("node", ["dist/cli.js", "--mode", "rpc"], { + cwd: join(__dirname, ".."), + env: { + ...process.env, + PI_CODING_AGENT_DIR: sessionDir, + }, + }); + + const events: AgentEvent[] = []; + + // Parse agent events + const rl = readline.createInterface({ input: agent.stdout!, terminal: false }); + + // Collect stderr for debugging + let stderr = ""; + agent.stderr?.on("data", (data) => { + stderr += data.toString(); + }); + + // Wait for agent_end which signals the full prompt/response cycle is complete + const waitForAgentEnd = new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error("Timeout waiting for agent_end")), 60000); + + rl.on("line", (line: string) => { + try { + const event = JSON.parse(line) as AgentEvent; + events.push(event); + + // agent_end means the full prompt cycle completed (user msg + assistant response) + if (event.type === "agent_end") { + clearTimeout(timeout); + resolve(); + } + } catch { + // Ignore non-JSON lines + } + }); + + rl.on("close", () => { + clearTimeout(timeout); + reject(new Error("Agent stdout closed before agent_end")); + }); + }); + + // Send a simple prompt - the LLM will respond + agent.stdin!.write(JSON.stringify({ type: "prompt", message: "Reply with just the word 'hello'" }) + "\n"); + + // Wait for full prompt/response cycle to complete + await waitForAgentEnd; + + // Check that message_end events were emitted + const messageEndEvents = events.filter((e) => e.type === "message_end"); + expect(messageEndEvents.length).toBeGreaterThanOrEqual(2); // user + assistant + + // Wait a bit for file writes to complete + await new Promise((resolve) => setTimeout(resolve, 200)); + + // Kill the agent gracefully + agent.kill("SIGTERM"); + + // Find and verify the session file + const sessionsPath = join(sessionDir, "sessions"); + expect(existsSync(sessionsPath), `Sessions path should exist: ${sessionsPath}. Stderr: ${stderr}`).toBe(true); + + // Find the session directory (it's based on cwd) + const sessionDirs = readdirSync(sessionsPath); + expect(sessionDirs.length, `Should have at least one session dir. Stderr: ${stderr}`).toBeGreaterThan(0); + + const cwdSessionDir = join(sessionsPath, sessionDirs[0]); + const allFiles = readdirSync(cwdSessionDir); + const sessionFiles = allFiles.filter((f) => f.endsWith(".jsonl")); + expect( + sessionFiles.length, + `Should have exactly one session file. Dir: ${cwdSessionDir}, Files: ${JSON.stringify(allFiles)}, Stderr: ${stderr}`, + ).toBe(1); + + // Read and verify session content + const sessionContent = readFileSync(join(cwdSessionDir, sessionFiles[0]), "utf8"); + const lines = sessionContent.trim().split("\n"); + + // Should have session header and at least 2 messages (user + assistant) + expect(lines.length).toBeGreaterThanOrEqual(3); + + const entries = lines.map((line) => JSON.parse(line)); + + // First entry should be session header + expect(entries[0].type).toBe("session"); + + // Should have user and assistant messages + const messages = entries.filter((e: { type: string }) => e.type === "message"); + expect(messages.length).toBeGreaterThanOrEqual(2); + + const roles = messages.map((m: { message: { role: string } }) => m.message.role); + expect(roles).toContain("user"); + expect(roles).toContain("assistant"); + }, 90000); +}); diff --git a/packages/mom/package.json b/packages/mom/package.json index 83aff3ae..75e68f9b 100644 --- a/packages/mom/package.json +++ b/packages/mom/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-mom", - "version": "0.11.1", + "version": "0.11.2", "description": "Slack bot that delegates messages to the pi coding agent", "type": "module", "bin": { @@ -21,8 +21,8 @@ }, "dependencies": { "@anthropic-ai/sandbox-runtime": "^0.0.16", - "@mariozechner/pi-agent-core": "^0.11.1", - "@mariozechner/pi-ai": "^0.11.1", + "@mariozechner/pi-agent-core": "^0.11.2", + "@mariozechner/pi-ai": "^0.11.2", "@sinclair/typebox": "^0.34.0", "@slack/socket-mode": "^2.0.0", "@slack/web-api": "^7.0.0", diff --git a/packages/pods/package.json b/packages/pods/package.json index cd1f66bc..1bdc6a5a 100644 --- a/packages/pods/package.json +++ b/packages/pods/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi", - "version": "0.11.1", + "version": "0.11.2", "description": "CLI tool for managing vLLM deployments on GPU pods", "type": "module", "bin": { @@ -34,7 +34,7 @@ "node": ">=20.0.0" }, "dependencies": { - "@mariozechner/pi-agent-core": "^0.11.1", + "@mariozechner/pi-agent-core": "^0.11.2", "chalk": "^5.5.0" }, "devDependencies": {} diff --git a/packages/proxy/package.json b/packages/proxy/package.json index b5bb890b..a9f81133 100644 --- a/packages/proxy/package.json +++ b/packages/proxy/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-proxy", - "version": "0.11.1", + "version": "0.11.2", "type": "module", "description": "CORS and authentication proxy for pi-ai", "main": "dist/index.js", diff --git a/packages/tui/package.json b/packages/tui/package.json index bc7eee49..22a0b016 100644 --- a/packages/tui/package.json +++ b/packages/tui/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-tui", - "version": "0.11.1", + "version": "0.11.2", "description": "Terminal User Interface library with differential rendering for efficient text-based applications", "type": "module", "main": "dist/index.js", diff --git a/packages/web-ui/package.json b/packages/web-ui/package.json index 9c543578..0106ae84 100644 --- a/packages/web-ui/package.json +++ b/packages/web-ui/package.json @@ -1,6 +1,6 @@ { "name": "@mariozechner/pi-web-ui", - "version": "0.11.1", + "version": "0.11.2", "description": "Reusable web UI components for AI chat interfaces powered by @mariozechner/pi-ai", "type": "module", "main": "dist/index.js", @@ -18,8 +18,8 @@ }, "dependencies": { "@lmstudio/sdk": "^1.5.0", - "@mariozechner/pi-ai": "^0.11.1", - "@mariozechner/pi-tui": "^0.11.1", + "@mariozechner/pi-ai": "^0.11.2", + "@mariozechner/pi-tui": "^0.11.2", "docx-preview": "^0.3.7", "jszip": "^3.10.1", "lucide": "^0.544.0",