Release v0.7.28

- Add message queuing with configurable modes (one-at-a-time/all) (#15)
- Add /queue command to select queue mode
- Add TruncatedText component for proper viewport-aware text truncation
- Queue mode setting persists in ~/.pi/agent/settings.json
- Visual feedback for queued messages with proper ANSI handling
- Press Escape to abort and restore queued messages to editor
This commit is contained in:
Mario Zechner 2025-11-20 20:39:43 +01:00
parent e694d435fd
commit d44073b140
18 changed files with 477 additions and 108 deletions

28
package-lock.json generated
View file

@ -5413,11 +5413,11 @@
},
"packages/agent": {
"name": "@mariozechner/pi-agent",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-ai": "^0.7.26",
"@mariozechner/pi-tui": "^0.7.26"
"@mariozechner/pi-ai": "^0.7.27",
"@mariozechner/pi-tui": "^0.7.27"
},
"devDependencies": {
"@types/node": "^24.3.0",
@ -5443,7 +5443,7 @@
},
"packages/ai": {
"name": "@mariozechner/pi-ai",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.61.0",
@ -5490,11 +5490,11 @@
},
"packages/coding-agent": {
"name": "@mariozechner/pi-coding-agent",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.7.26",
"@mariozechner/pi-ai": "^0.7.26",
"@mariozechner/pi-agent": "^0.7.27",
"@mariozechner/pi-ai": "^0.7.27",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"
@ -5537,10 +5537,10 @@
},
"packages/pods": {
"name": "@mariozechner/pi",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.7.26",
"@mariozechner/pi-agent": "^0.7.27",
"chalk": "^5.5.0"
},
"bin": {
@ -5563,7 +5563,7 @@
},
"packages/proxy": {
"name": "@mariozechner/pi-proxy",
"version": "0.7.27",
"version": "0.7.28",
"dependencies": {
"@hono/node-server": "^1.14.0",
"hono": "^4.6.16"
@ -5579,7 +5579,7 @@
},
"packages/tui": {
"name": "@mariozechner/pi-tui",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",
@ -5661,12 +5661,12 @@
},
"packages/web-ui": {
"name": "@mariozechner/pi-web-ui",
"version": "0.7.27",
"version": "0.7.28",
"license": "MIT",
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.7.26",
"@mariozechner/pi-tui": "^0.7.26",
"@mariozechner/pi-ai": "^0.7.27",
"@mariozechner/pi-tui": "^0.7.27",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.7.27",
"version": "0.7.28",
"description": "General-purpose agent with transport abstraction, state management, and attachment support",
"type": "module",
"main": "./dist/index.js",
@ -18,8 +18,8 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-ai": "^0.7.27",
"@mariozechner/pi-tui": "^0.7.27"
"@mariozechner/pi-ai": "^0.7.28",
"@mariozechner/pi-tui": "^0.7.28"
},
"keywords": [
"ai",

View file

@ -55,6 +55,8 @@ export interface AgentOptions {
transport: AgentTransport;
// Transform app messages to LLM-compatible messages before sending to transport
messageTransformer?: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
// Queue mode: "all" = send all queued messages at once, "one-at-a-time" = send one queued message per turn
queueMode?: "all" | "one-at-a-time";
}
export class Agent {
@ -74,11 +76,13 @@ export class Agent {
private transport: AgentTransport;
private messageTransformer: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
private messageQueue: Array<QueuedMessage<AppMessage>> = [];
private queueMode: "all" | "one-at-a-time";
constructor(opts: AgentOptions) {
this._state = { ...this._state, ...opts.initialState };
this.transport = opts.transport;
this.messageTransformer = opts.messageTransformer || defaultMessageTransformer;
this.queueMode = opts.queueMode || "one-at-a-time";
}
get state(): AgentState {
@ -103,6 +107,14 @@ export class Agent {
this._state.thinkingLevel = l;
}
setQueueMode(mode: "all" | "one-at-a-time") {
this.queueMode = mode;
}
getQueueMode(): "all" | "one-at-a-time" {
return this.queueMode;
}
setTools(t: typeof this._state.tools) {
this._state.tools = t;
}
@ -124,6 +136,10 @@ export class Agent {
});
}
clearMessageQueue() {
this.messageQueue = [];
}
clearMessages() {
this._state.messages = [];
}
@ -179,10 +195,21 @@ export class Agent {
model,
reasoning,
getQueuedMessages: async <T>() => {
// Return queued messages (they'll be added to state via message_end event)
const queued = this.messageQueue.slice();
this.messageQueue = [];
return queued as QueuedMessage<T>[];
// Return queued messages based on queue mode
if (this.queueMode === "one-at-a-time") {
// Return only first message
if (this.messageQueue.length > 0) {
const first = this.messageQueue[0];
this.messageQueue = this.messageQueue.slice(1);
return [first] as QueuedMessage<T>[];
}
return [];
} else {
// Return all queued messages at once
const queued = this.messageQueue.slice();
this.messageQueue = [];
return queued as QueuedMessage<T>[];
}
},
};

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-ai",
"version": "0.7.27",
"version": "0.7.28",
"description": "Unified LLM API with automatic model discovery and provider configuration",
"type": "module",
"main": "./dist/index.js",

View file

@ -1974,6 +1974,23 @@ export const MODELS = {
contextWindow: 2000000,
maxTokens: 30000,
} satisfies Model<"openai-completions">,
"x-ai/grok-4.1-fast:free": {
id: "x-ai/grok-4.1-fast:free",
name: "xAI: Grok 4.1 Fast (free)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 2000000,
maxTokens: 30000,
} satisfies Model<"openai-completions">,
"google/gemini-3-pro-preview": {
id: "google/gemini-3-pro-preview",
name: "Google: Gemini 3 Pro Preview",
@ -5085,23 +5102,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 3.5,
output: 3.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
@ -5119,6 +5119,23 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 3.5,
output: 3.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -5136,9 +5153,9 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5153,9 +5170,9 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5255,23 +5272,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -5306,6 +5306,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
@ -5425,23 +5442,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
@ -5459,6 +5459,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-small": {
id: "mistralai/mistral-small",
name: "Mistral Small",
@ -5561,23 +5578,6 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
@ -5595,6 +5595,23 @@ export const MODELS = {
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/auto": {
id: "openrouter/auto",
name: "OpenRouter: Auto Router",

View file

@ -2,6 +2,12 @@
## [Unreleased]
## [0.7.28] - 2025-11-20
### Added
- **Message Queuing**: You can now send multiple messages while the agent is processing without waiting for the previous response to complete. Messages submitted during streaming are queued and processed based on your queue mode setting. Queued messages are shown in a pending area below the chat. Press Escape to abort and restore all queued messages to the editor. Use `/queue` to select between "one-at-a-time" (process queued messages sequentially, recommended) or "all" (process all queued messages at once). The queue mode setting is saved and persists across sessions. ([#15](https://github.com/badlogic/pi-mono/issues/15))
## [0.7.27] - 2025-11-20
### Fixed

View file

@ -298,6 +298,14 @@ The selector only displays models for which API keys are configured in your envi
Adjust thinking/reasoning level for supported models (Claude Sonnet 4, GPT-5, Gemini 2.5). Opens an interactive selector where you can use arrow keys to navigate, Enter to select, or Escape to cancel.
### /queue
Select message queue mode. Opens an interactive selector where you can choose between:
- **one-at-a-time** (default): Process queued messages one by one. When you submit messages while the agent is processing, they're queued and sent individually after each agent response completes.
- **all**: Process all queued messages at once. All queued messages are injected into the context together before the next agent response.
The queue mode setting is saved and persists across sessions.
### /export [filename]
Export the current session to a self-contained HTML file:
@ -385,6 +393,31 @@ Drag files from your OS file explorer (Finder on macOS, Explorer on Windows) dir
Paste multiple lines of text (e.g., code snippets, logs) and they'll be automatically coalesced into a compact `[paste #123 <N> lines]` reference in the editor. The full content is still sent to the model.
### Message Queuing
You can submit multiple messages while the agent is processing without waiting for responses. Messages are queued and processed based on your queue mode setting:
**One-at-a-time mode (default):**
- Each queued message is processed sequentially with its own response
- Example: Queue "task 1", "task 2", "task 3" → agent completes task 1 → processes task 2 → completes task 2 → processes task 3
- Recommended for most use cases
**All mode:**
- All queued messages are sent to the model at once in a single context
- Example: Queue "task 1", "task 2", "task 3" → agent receives all three together → responds considering all tasks
- Useful when tasks should be considered together
**Visual feedback:**
- Queued messages appear below the chat with "Queued: <message text>"
- Messages disappear from the queue as they're processed
**Abort and restore:**
- Press **Escape** while streaming to abort the current operation
- All queued messages (plus any text in the editor) are restored to the editor
- Allows you to modify or remove queued messages before resubmitting
Change queue mode with `/queue` command. Setting is saved in `~/.pi/agent/settings.json`.
### Keyboard Shortcuts
- **Ctrl+W**: Delete word backwards (stops at whitespace or punctuation)

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-coding-agent",
"version": "0.7.27",
"version": "0.7.28",
"description": "Coding agent CLI with read, bash, edit, write tools and session management",
"type": "module",
"bin": {
@ -21,8 +21,8 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.7.27",
"@mariozechner/pi-ai": "^0.7.27",
"@mariozechner/pi-agent": "^0.7.28",
"@mariozechner/pi-ai": "^0.7.28",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"

View file

@ -760,6 +760,7 @@ export async function main(args: string[]) {
thinkingLevel: "off",
tools: codingTools,
},
queueMode: settingsManager.getQueueMode(),
transport: new ProviderTransport({
// Dynamic API key lookup based on current model's provider
getApiKey: async () => {

View file

@ -6,6 +6,7 @@ export interface Settings {
lastChangelogVersion?: string;
defaultProvider?: string;
defaultModel?: string;
queueMode?: "all" | "one-at-a-time";
}
export class SettingsManager {
@ -78,4 +79,13 @@ export class SettingsManager {
this.settings.defaultModel = modelId;
this.save();
}
getQueueMode(): "all" | "one-at-a-time" {
return this.settings.queueMode || "one-at-a-time";
}
setQueueMode(mode: "all" | "one-at-a-time"): void {
this.settings.queueMode = mode;
this.save();
}
}

View file

@ -0,0 +1,64 @@
import { type Component, Container, type SelectItem, SelectList } from "@mariozechner/pi-tui";
import chalk from "chalk";
/**
* Dynamic border component that adjusts to viewport width
*/
class DynamicBorder implements Component {
render(width: number): string[] {
return [chalk.blue("─".repeat(Math.max(1, width)))];
}
}
/**
* Component that renders a queue mode selector with borders
*/
export class QueueModeSelectorComponent extends Container {
private selectList: SelectList;
constructor(
currentMode: "all" | "one-at-a-time",
onSelect: (mode: "all" | "one-at-a-time") => void,
onCancel: () => void,
) {
super();
const queueModes: SelectItem[] = [
{
value: "one-at-a-time",
label: "one-at-a-time",
description: "Process queued messages one by one (recommended)",
},
{ value: "all", label: "all", description: "Process all queued messages at once" },
];
// Add top border
this.addChild(new DynamicBorder());
// Create selector
this.selectList = new SelectList(queueModes, 2);
// Preselect current mode
const currentIndex = queueModes.findIndex((item) => item.value === currentMode);
if (currentIndex !== -1) {
this.selectList.setSelectedIndex(currentIndex);
}
this.selectList.onSelect = (item) => {
onSelect(item.value as "all" | "one-at-a-time");
};
this.selectList.onCancel = () => {
onCancel();
};
this.addChild(this.selectList);
// Add bottom border
this.addChild(new DynamicBorder());
}
getSelectList(): SelectList {
return this.selectList;
}
}

View file

@ -10,6 +10,7 @@ import {
ProcessTerminal,
Spacer,
Text,
TruncatedText,
TUI,
} from "@mariozechner/pi-tui";
import chalk from "chalk";
@ -26,6 +27,7 @@ import { DynamicBorder } from "./dynamic-border.js";
import { FooterComponent } from "./footer.js";
import { ModelSelectorComponent } from "./model-selector.js";
import { OAuthSelectorComponent } from "./oauth-selector.js";
import { QueueModeSelectorComponent } from "./queue-mode-selector.js";
import { ThinkingSelectorComponent } from "./thinking-selector.js";
import { ToolExecutionComponent } from "./tool-execution.js";
import { UserMessageComponent } from "./user-message.js";
@ -37,6 +39,7 @@ import { UserMessageSelectorComponent } from "./user-message-selector.js";
export class TuiRenderer {
private ui: TUI;
private chatContainer: Container;
private pendingMessagesContainer: Container;
private statusContainer: Container;
private editor: CustomEditor;
private editorContainer: Container; // Container to swap between editor and selector
@ -53,6 +56,9 @@ export class TuiRenderer {
private changelogMarkdown: string | null = null;
private newVersion: string | null = null;
// Message queueing
private queuedMessages: string[] = [];
// Streaming message tracking
private streamingComponent: AssistantMessageComponent | null = null;
@ -62,6 +68,9 @@ export class TuiRenderer {
// Thinking level selector
private thinkingSelector: ThinkingSelectorComponent | null = null;
// Queue mode selector
private queueModeSelector: QueueModeSelectorComponent | null = null;
// Model selector
private modelSelector: ModelSelectorComponent | null = null;
@ -98,6 +107,7 @@ export class TuiRenderer {
this.scopedModels = scopedModels;
this.ui = new TUI(new ProcessTerminal());
this.chatContainer = new Container();
this.pendingMessagesContainer = new Container();
this.statusContainer = new Container();
this.editor = new CustomEditor();
this.editorContainer = new Container(); // Container to hold editor or selector
@ -145,6 +155,11 @@ export class TuiRenderer {
description: "Logout from OAuth provider",
};
const queueCommand: SlashCommand = {
name: "queue",
description: "Select message queue mode (opens selector UI)",
};
// Setup autocomplete for file paths and slash commands
const autocompleteProvider = new CombinedAutocompleteProvider(
[
@ -156,6 +171,7 @@ export class TuiRenderer {
branchCommand,
loginCommand,
logoutCommand,
queueCommand,
],
process.cwd(),
);
@ -228,6 +244,7 @@ export class TuiRenderer {
}
this.ui.addChild(this.chatContainer);
this.ui.addChild(this.pendingMessagesContainer);
this.ui.addChild(this.statusContainer);
this.ui.addChild(new Spacer(1));
this.ui.addChild(this.editorContainer); // Use container that can hold editor or selector
@ -238,6 +255,26 @@ export class TuiRenderer {
this.editor.onEscape = () => {
// Intercept Escape key when processing
if (this.loadingAnimation && this.onInterruptCallback) {
// Get all queued messages
const queuedText = this.queuedMessages.join("\n\n");
// Get current editor text
const currentText = this.editor.getText();
// Combine: queued messages + current editor text
const combinedText = [queuedText, currentText].filter((t) => t.trim()).join("\n\n");
// Put back in editor
this.editor.setText(combinedText);
// Clear queued messages
this.queuedMessages = [];
this.updatePendingMessagesDisplay();
// Clear agent's queue too
this.agent.clearMessageQueue();
// Abort
this.onInterruptCallback();
}
};
@ -321,6 +358,13 @@ export class TuiRenderer {
return;
}
// Check for /queue command
if (text === "/queue") {
this.showQueueModeSelector();
this.editor.setText("");
return;
}
// Normal message submission - validate model and API key first
const currentModel = this.agent.state.model;
if (!currentModel) {
@ -343,6 +387,27 @@ export class TuiRenderer {
return;
}
// Check if agent is currently streaming
if (this.agent.state.isStreaming) {
// Queue the message instead of submitting
this.queuedMessages.push(text);
// Queue in agent
await this.agent.queueMessage({
role: "user",
content: [{ type: "text", text }],
timestamp: Date.now(),
});
// Update pending messages display
this.updatePendingMessagesDisplay();
// Clear editor
this.editor.setText("");
this.ui.requestRender();
return;
}
// All good, proceed with submission
if (this.onInputCallback) {
this.onInputCallback(text);
@ -365,7 +430,7 @@ export class TuiRenderer {
switch (event.type) {
case "agent_start":
// Show loading animation
this.editor.disableSubmit = true;
// Note: Don't disable submit - we handle queuing in onSubmit callback
// Stop old loader before clearing
if (this.loadingAnimation) {
this.loadingAnimation.stop();
@ -378,6 +443,18 @@ export class TuiRenderer {
case "message_start":
if (event.message.role === "user") {
// Check if this is a queued message
const userMsg = event.message as any;
const textBlocks = userMsg.content.filter((c: any) => c.type === "text");
const messageText = textBlocks.map((c: any) => c.text).join("");
const queuedIndex = this.queuedMessages.indexOf(messageText);
if (queuedIndex !== -1) {
// Remove from queued messages
this.queuedMessages.splice(queuedIndex, 1);
this.updatePendingMessagesDisplay();
}
// Show user message immediately and clear editor
this.addMessageToChat(event.message);
this.editor.setText("");
@ -497,7 +574,7 @@ export class TuiRenderer {
this.streamingComponent = null;
}
this.pendingTools.clear();
this.editor.disableSubmit = false;
// Note: Don't need to re-enable submit - we never disable it
this.ui.requestRender();
break;
}
@ -810,6 +887,48 @@ export class TuiRenderer {
this.ui.setFocus(this.editor);
}
private showQueueModeSelector(): void {
// Create queue mode selector with current mode
this.queueModeSelector = new QueueModeSelectorComponent(
this.agent.getQueueMode(),
(mode) => {
// Apply the selected queue mode
this.agent.setQueueMode(mode);
// Save queue mode to settings
this.settingsManager.setQueueMode(mode);
// Show confirmation message with proper spacing
this.chatContainer.addChild(new Spacer(1));
const confirmText = new Text(chalk.dim(`Queue mode: ${mode}`), 1, 0);
this.chatContainer.addChild(confirmText);
// Hide selector and show editor again
this.hideQueueModeSelector();
this.ui.requestRender();
},
() => {
// Just hide the selector
this.hideQueueModeSelector();
this.ui.requestRender();
},
);
// Replace editor with selector
this.editorContainer.clear();
this.editorContainer.addChild(this.queueModeSelector);
this.ui.setFocus(this.queueModeSelector.getSelectList());
this.ui.requestRender();
}
private hideQueueModeSelector(): void {
// Replace selector with editor in the container
this.editorContainer.clear();
this.editorContainer.addChild(this.editor);
this.queueModeSelector = null;
this.ui.setFocus(this.editor);
}
private showModelSelector(): void {
// Create model selector with current model
this.modelSelector = new ModelSelectorComponent(
@ -1171,6 +1290,19 @@ export class TuiRenderer {
this.ui.requestRender();
}
private updatePendingMessagesDisplay(): void {
this.pendingMessagesContainer.clear();
if (this.queuedMessages.length > 0) {
this.pendingMessagesContainer.addChild(new Spacer(1));
for (const message of this.queuedMessages) {
const queuedText = chalk.dim("Queued: " + message);
this.pendingMessagesContainer.addChild(new TruncatedText(queuedText, 1, 0));
}
}
}
stop(): void {
if (this.loadingAnimation) {
this.loadingAnimation.stop();

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi",
"version": "0.7.27",
"version": "0.7.28",
"description": "CLI tool for managing vLLM deployments on GPU pods",
"type": "module",
"bin": {
@ -34,7 +34,7 @@
"node": ">=20.0.0"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.7.27",
"@mariozechner/pi-agent": "^0.7.28",
"chalk": "^5.5.0"
},
"devDependencies": {}

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-proxy",
"version": "0.7.27",
"version": "0.7.28",
"type": "module",
"description": "CORS and authentication proxy for pi-ai",
"main": "dist/index.js",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-tui",
"version": "0.7.27",
"version": "0.7.28",
"description": "Terminal User Interface library with differential rendering for efficient text-based applications",
"type": "module",
"main": "dist/index.js",

View file

@ -0,0 +1,78 @@
import type { Component } from "../tui.js";
import { visibleWidth } from "../utils.js";
/**
* Text component that truncates to fit viewport width
*/
export class TruncatedText implements Component {
private text: string;
private paddingX: number;
private paddingY: number;
constructor(text: string, paddingX: number = 0, paddingY: number = 0) {
this.text = text;
this.paddingX = paddingX;
this.paddingY = paddingY;
}
render(width: number): string[] {
const result: string[] = [];
// Add vertical padding above
for (let i = 0; i < this.paddingY; i++) {
result.push("");
}
// Calculate available width after horizontal padding
const availableWidth = Math.max(1, width - this.paddingX * 2);
// Truncate text if needed (accounting for ANSI codes)
let displayText = this.text;
const textVisibleWidth = visibleWidth(this.text);
if (textVisibleWidth > availableWidth) {
// Need to truncate - walk through the string character by character
let currentWidth = 0;
let truncateAt = 0;
let i = 0;
const ellipsisWidth = 3;
const targetWidth = availableWidth - ellipsisWidth;
while (i < this.text.length && currentWidth < targetWidth) {
// Skip ANSI escape sequences
if (this.text[i] === "\x1b" && this.text[i + 1] === "[") {
let j = i + 2;
while (j < this.text.length && !/[a-zA-Z]/.test(this.text[j])) {
j++;
}
i = j + 1;
continue;
}
const char = this.text[i];
const charWidth = visibleWidth(char);
if (currentWidth + charWidth > targetWidth) {
break;
}
currentWidth += charWidth;
truncateAt = i + 1;
i++;
}
displayText = this.text.substring(0, truncateAt) + "...";
}
// Add horizontal padding
const paddingStr = " ".repeat(this.paddingX);
result.push(paddingStr + displayText);
// Add vertical padding below
for (let i = 0; i < this.paddingY; i++) {
result.push("");
}
return result;
}
}

View file

@ -15,6 +15,7 @@ export { Markdown } from "./components/markdown.js";
export { type SelectItem, SelectList } from "./components/select-list.js";
export { Spacer } from "./components/spacer.js";
export { Text } from "./components/text.js";
export { TruncatedText } from "./components/truncated-text.js";
// Terminal interface and implementations
export { ProcessTerminal, type Terminal } from "./terminal.js";
export { type Component, Container, TUI } from "./tui.js";

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-web-ui",
"version": "0.7.27",
"version": "0.7.28",
"description": "Reusable web UI components for AI chat interfaces powered by @mariozechner/pi-ai",
"type": "module",
"main": "dist/index.js",
@ -18,8 +18,8 @@
},
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.7.27",
"@mariozechner/pi-tui": "^0.7.27",
"@mariozechner/pi-ai": "^0.7.28",
"@mariozechner/pi-tui": "^0.7.28",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",