Merge remote-tracking branch 'origin/main' into feat/model-cycling-enhancements

This commit is contained in:
Mario Zechner 2025-11-21 21:16:05 +01:00
commit df3af27288
18 changed files with 264 additions and 173 deletions

30
package-lock.json generated
View file

@ -3847,11 +3847,11 @@
},
"packages/agent": {
"name": "@mariozechner/pi-agent",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-ai": "^0.8.3",
"@mariozechner/pi-tui": "^0.8.3"
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4"
},
"devDependencies": {
"@types/node": "^24.3.0",
@ -3881,7 +3881,7 @@
},
"packages/ai": {
"name": "@mariozechner/pi-ai",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.61.0",
@ -3922,12 +3922,12 @@
},
"packages/coding-agent": {
"name": "@mariozechner/pi-coding-agent",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.8.3",
"@mariozechner/pi-ai": "^0.8.3",
"@mariozechner/pi-tui": "^0.8.3",
"@mariozechner/pi-agent": "^0.8.4",
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"
@ -3964,10 +3964,10 @@
},
"packages/pods": {
"name": "@mariozechner/pi",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.8.3",
"@mariozechner/pi-agent": "^0.8.4",
"chalk": "^5.5.0"
},
"bin": {
@ -3980,7 +3980,7 @@
},
"packages/proxy": {
"name": "@mariozechner/pi-proxy",
"version": "0.8.4",
"version": "0.8.5",
"dependencies": {
"@hono/node-server": "^1.14.0",
"hono": "^4.6.16"
@ -3996,7 +3996,7 @@
},
"packages/tui": {
"name": "@mariozechner/pi-tui",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",
@ -4031,12 +4031,12 @@
},
"packages/web-ui": {
"name": "@mariozechner/pi-web-ui",
"version": "0.8.4",
"version": "0.8.5",
"license": "MIT",
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.8.3",
"@mariozechner/pi-tui": "^0.8.3",
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.8.4",
"version": "0.8.5",
"description": "General-purpose agent with transport abstraction, state management, and attachment support",
"type": "module",
"main": "./dist/index.js",
@ -18,8 +18,8 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4"
"@mariozechner/pi-ai": "^0.8.5",
"@mariozechner/pi-tui": "^0.8.5"
},
"keywords": [
"ai",

View file

@ -77,6 +77,8 @@ export class Agent {
private messageTransformer: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
private messageQueue: Array<QueuedMessage<AppMessage>> = [];
private queueMode: "all" | "one-at-a-time";
private runningPrompt?: Promise<void>;
private resolveRunningPrompt?: () => void;
constructor(opts: AgentOptions) {
this._state = { ...this._state, ...opts.initialState };
@ -148,12 +150,37 @@ export class Agent {
this.abortController?.abort();
}
/**
* Returns a promise that resolves when the current prompt completes.
* Returns immediately resolved promise if no prompt is running.
*/
waitForIdle(): Promise<void> {
return this.runningPrompt ?? Promise.resolve();
}
/**
* Clear all messages and state. Call abort() first if a prompt is in flight.
*/
reset() {
this._state.messages = [];
this._state.isStreaming = false;
this._state.streamMessage = null;
this._state.pendingToolCalls = new Set<string>();
this._state.error = undefined;
this.messageQueue = [];
}
async prompt(input: string, attachments?: Attachment[]) {
const model = this._state.model;
if (!model) {
throw new Error("No model configured");
}
// Set up running prompt tracking
this.runningPrompt = new Promise<void>((resolve) => {
this.resolveRunningPrompt = resolve;
});
// Build user message with attachments
const content: Array<TextContent | ImageContent> = [{ type: "text", text: input }];
if (attachments?.length) {
@ -322,6 +349,9 @@ export class Agent {
this._state.streamMessage = null;
this._state.pendingToolCalls = new Set<string>();
this.abortController = undefined;
this.resolveRunningPrompt?.();
this.runningPrompt = undefined;
this.resolveRunningPrompt = undefined;
}
}

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-ai",
"version": "0.8.4",
"version": "0.8.5",
"description": "Unified LLM API with automatic model discovery and provider configuration",
"type": "module",
"main": "./dist/index.js",

View file

@ -5102,23 +5102,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 3.5,
output: 3.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
@ -5136,6 +5119,23 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 3.5,
output: 3.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -5153,9 +5153,9 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5170,9 +5170,9 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5272,23 +5272,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -5323,6 +5306,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
@ -5442,23 +5442,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
@ -5476,6 +5459,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-small": {
id: "mistralai/mistral-small",
name: "Mistral Small",
@ -5578,23 +5578,6 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
@ -5612,6 +5595,23 @@ export const MODELS = {
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/auto": {
id: "openrouter/auto",
name: "OpenRouter: Auto Router",

View file

@ -2,6 +2,21 @@
## [Unreleased]
### Added
- **`/clear` Command**: New slash command to reset the conversation context and start a fresh session. Aborts any in-flight agent work, clears all messages, and creates a new session file. ([#48](https://github.com/badlogic/pi-mono/pull/48))
### Fixed
- **Markdown Link Rendering**: Fixed links with identical text and href (e.g., `https://github.com/badlogic/pi-mono/pull/48/files`) being rendered twice. Now correctly compares raw text instead of styled text (which contains ANSI codes) when determining if link text matches href.
## [0.8.5] - 2025-11-21
### Fixed
- **Path Completion Hanging**: Fixed catastrophic regex backtracking in path completion that caused the terminal to hang when text contained many `/` characters (e.g., URLs). Replaced complex regex with simple string operations. ([#18](https://github.com/badlogic/pi-mono/issues/18))
- **Autocomplete Arrow Keys**: Fixed issue where arrow keys would move both the autocomplete selection and the editor cursor simultaneously when the file selector list was shown.
## [0.8.4] - 2025-11-21
### Fixed

View file

@ -445,6 +445,16 @@ Logout from OAuth providers:
Shows a list of logged-in providers to logout from.
### /clear
Clear the conversation context and start a fresh session:
```
/clear
```
Aborts any in-flight agent work, clears all messages, and creates a new session file.
## Editor Features
The interactive input editor includes several productivity features:

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-coding-agent",
"version": "0.8.4",
"version": "0.8.5",
"description": "Coding agent CLI with read, bash, edit, write tools and session management",
"type": "module",
"bin": {
@ -22,9 +22,9 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.8.4",
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4",
"@mariozechner/pi-agent": "^0.8.5",
"@mariozechner/pi-ai": "^0.8.5",
"@mariozechner/pi-tui": "^0.8.5",
"chalk": "^5.5.0",
"diff": "^8.0.2",
"glob": "^11.0.3"

View file

@ -21,17 +21,6 @@ const __dirname = dirname(__filename);
const packageJson = JSON.parse(readFileSync(join(__dirname, "../package.json"), "utf-8"));
const VERSION = packageJson.version;
const envApiKeyMap: Record<KnownProvider, string[]> = {
google: ["GEMINI_API_KEY"],
openai: ["OPENAI_API_KEY"],
anthropic: ["ANTHROPIC_OAUTH_TOKEN", "ANTHROPIC_API_KEY"],
xai: ["XAI_API_KEY"],
groq: ["GROQ_API_KEY"],
cerebras: ["CEREBRAS_API_KEY"],
openrouter: ["OPENROUTER_API_KEY"],
zai: ["ZAI_API_KEY"],
};
const defaultModelPerProvider: Record<KnownProvider, string> = {
anthropic: "claude-sonnet-4-5",
openai: "gpt-5.1-codex",
@ -478,14 +467,9 @@ async function runInteractiveMode(
scopedModels,
);
// Initialize TUI
// Initialize TUI (subscribes to agent events internally)
await renderer.init();
// Set interrupt callback
renderer.setInterruptCallback(() => {
agent.abort();
});
// Render any existing messages (from --continue mode)
renderer.renderInitialMessages(agent.state);
@ -494,12 +478,6 @@ async function runInteractiveMode(
renderer.showWarning(modelFallbackMessage);
}
// Subscribe to agent events
agent.subscribe(async (event) => {
// Pass all events to the renderer
await renderer.handleEvent(event, agent.state);
});
// Interactive loop
while (true) {
const userInput = await renderer.getUserInput();
@ -718,11 +696,6 @@ export async function main(args: string[]) {
// Load previous messages if continuing or resuming
// This may update initialModel if restoring from session
if (parsed.continue || parsed.resume) {
const messages = sessionManager.loadMessages();
if (messages.length > 0 && shouldPrintMessages) {
console.log(chalk.dim(`Loaded ${messages.length} messages from previous session`));
}
// Load and restore model (overrides initialModel if found and has API key)
const savedModel = sessionManager.loadModel();
if (savedModel) {
@ -871,9 +844,6 @@ export async function main(args: string[]) {
}
}
// Note: Session will be started lazily after first user+assistant message exchange
// (unless continuing/resuming, in which case it's already initialized)
// Log loaded context files (they're already in the system prompt)
if (shouldPrintMessages && !parsed.continue && !parsed.resume) {
const contextFiles = loadProjectContextFiles();
@ -885,19 +855,6 @@ export async function main(args: string[]) {
}
}
// Subscribe to agent events to save messages
agent.subscribe((event) => {
// Save messages on completion
if (event.type === "message_end") {
sessionManager.saveMessage(event.message);
// Check if we should initialize session now (after first user+assistant exchange)
if (sessionManager.shouldInitializeSession(agent.state.messages)) {
sessionManager.startSession(agent.state);
}
}
});
// Route to appropriate mode
if (mode === "rpc") {
// RPC mode - headless operation
@ -930,8 +887,6 @@ export async function main(args: string[]) {
}
} else {
// Parse current and last versions
const currentParts = VERSION.split(".").map(Number);
const current = { major: currentParts[0] || 0, minor: currentParts[1] || 0, patch: currentParts[2] || 0 };
const changelogPath = getChangelogPath();
const entries = parseChangelog(changelogPath);
const newEntries = getNewEntries(entries, lastVersion);

View file

@ -97,6 +97,13 @@ export class SessionManager {
this.sessionFile = join(this.sessionDir, `${timestamp}_${this.sessionId}.jsonl`);
}
/** Reset to a fresh session. Clears pending messages and starts a new session file. */
reset(): void {
this.pendingMessages = [];
this.sessionInitialized = false;
this.initNewSession();
}
private findMostRecentlyModifiedSession(): string | null {
try {
const files = readdirSync(this.sessionDir)

View file

@ -53,7 +53,7 @@ export class TuiRenderer {
private isInitialized = false;
private onInputCallback?: (text: string) => void;
private loadingAnimation: Loader | null = null;
private onInterruptCallback?: () => void;
private lastSigintTime = 0;
private changelogMarkdown: string | null = null;
private newVersion: string | null = null;
@ -97,6 +97,9 @@ export class TuiRenderer {
// Tool output expansion state
private toolOutputExpanded = false;
// Agent subscription unsubscribe function
private unsubscribe?: () => void;
constructor(
agent: Agent,
sessionManager: SessionManager,
@ -173,6 +176,11 @@ export class TuiRenderer {
description: "Select color theme (opens selector UI)",
};
const clearCommand: SlashCommand = {
name: "clear",
description: "Clear context and start a fresh session",
};
// Setup autocomplete for file paths and slash commands
const autocompleteProvider = new CombinedAutocompleteProvider(
[
@ -186,6 +194,7 @@ export class TuiRenderer {
loginCommand,
logoutCommand,
queueCommand,
clearCommand,
],
process.cwd(),
);
@ -268,7 +277,7 @@ export class TuiRenderer {
// Set up custom key handlers on the editor
this.editor.onEscape = () => {
// Intercept Escape key when processing
if (this.loadingAnimation && this.onInterruptCallback) {
if (this.loadingAnimation) {
// Get all queued messages
const queuedText = this.queuedMessages.join("\n\n");
@ -289,7 +298,7 @@ export class TuiRenderer {
this.agent.clearMessageQueue();
// Abort
this.onInterruptCallback();
this.agent.abort();
}
};
@ -386,6 +395,13 @@ export class TuiRenderer {
return;
}
// Check for /clear command
if (text === "/clear") {
this.handleClearCommand();
this.editor.setText("");
return;
}
// Normal message submission - validate model and API key first
const currentModel = this.agent.state.model;
if (!currentModel) {
@ -439,6 +455,9 @@ export class TuiRenderer {
this.ui.start();
this.isInitialized = true;
// Subscribe to agent events for UI updates and session saving
this.subscribeToAgent();
// Set up theme file watcher for live reload
onThemeChange(() => {
this.ui.invalidate();
@ -447,7 +466,24 @@ export class TuiRenderer {
});
}
async handleEvent(event: AgentEvent, state: AgentState): Promise<void> {
private subscribeToAgent(): void {
this.unsubscribe = this.agent.subscribe(async (event) => {
// Handle UI updates
await this.handleEvent(event, this.agent.state);
// Save messages to session
if (event.type === "message_end") {
this.sessionManager.saveMessage(event.message);
// Check if we should initialize session now (after first user+assistant exchange)
if (this.sessionManager.shouldInitializeSession(this.agent.state.messages)) {
this.sessionManager.startSession(this.agent.state);
}
}
});
}
private async handleEvent(event: AgentEvent, state: AgentState): Promise<void> {
if (!this.isInitialized) {
await this.init();
}
@ -713,10 +749,6 @@ export class TuiRenderer {
});
}
setInterruptCallback(callback: () => void): void {
this.onInterruptCallback = callback;
}
private handleCtrlC(): void {
// Handle Ctrl+C double-press logic
const now = Date.now();
@ -1435,6 +1467,45 @@ export class TuiRenderer {
this.ui.requestRender();
}
private async handleClearCommand(): Promise<void> {
// Unsubscribe first to prevent processing abort events
this.unsubscribe?.();
// Abort and wait for completion
this.agent.abort();
await this.agent.waitForIdle();
// Stop loading animation
if (this.loadingAnimation) {
this.loadingAnimation.stop();
this.loadingAnimation = null;
}
this.statusContainer.clear();
// Reset agent and session
this.agent.reset();
this.sessionManager.reset();
// Resubscribe to agent
this.subscribeToAgent();
// Clear UI state
this.chatContainer.clear();
this.pendingMessagesContainer.clear();
this.queuedMessages = [];
this.streamingComponent = null;
this.pendingTools.clear();
this.isFirstUserMessage = true;
// Show confirmation
this.chatContainer.addChild(new Spacer(1));
this.chatContainer.addChild(
new Text(theme.fg("accent", "✓ Context cleared") + "\n" + theme.fg("muted", "Started fresh session"), 1, 1),
);
this.ui.requestRender();
}
private updatePendingMessagesDisplay(): void {
this.pendingMessagesContainer.clear();

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi",
"version": "0.8.4",
"version": "0.8.5",
"description": "CLI tool for managing vLLM deployments on GPU pods",
"type": "module",
"bin": {
@ -34,7 +34,7 @@
"node": ">=20.0.0"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.8.4",
"@mariozechner/pi-agent": "^0.8.5",
"chalk": "^5.5.0"
},
"devDependencies": {}

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-proxy",
"version": "0.8.4",
"version": "0.8.5",
"type": "module",
"description": "CORS and authentication proxy for pi-ai",
"main": "dist/index.js",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-tui",
"version": "0.8.4",
"version": "0.8.5",
"description": "Terminal User Interface library with differential rendering for efficient text-based applications",
"type": "module",
"main": "dist/index.js",

View file

@ -283,21 +283,17 @@ export class CombinedAutocompleteProvider implements AutocompleteProvider {
return atMatch[0]; // Return the full @path pattern
}
// Match paths - including those ending with /, ~/, or any word at end for forced extraction
// This regex captures:
// - Paths starting from beginning of line or after space/quote/equals
// - Absolute paths starting with /
// - Relative paths with ./ or ../
// - Home directory paths with ~/
// - The path itself (can include / in the middle)
// - For forced extraction, capture any word at the end
const matches = text.match(/(?:^|[\s"'=])((?:\/|~\/|\.{1,2}\/)?(?:[^\s"'=]*\/?)*[^\s"'=]*)$/);
if (!matches) {
// If forced extraction and no matches, return empty string to trigger from current dir
return forceExtract ? "" : null;
}
// Simple approach: find the last whitespace/delimiter and extract the word after it
// This avoids catastrophic backtracking from nested quantifiers
const lastDelimiterIndex = Math.max(
text.lastIndexOf(" "),
text.lastIndexOf("\t"),
text.lastIndexOf('"'),
text.lastIndexOf("'"),
text.lastIndexOf("="),
);
const pathPrefix = matches[1] || "";
const pathPrefix = lastDelimiterIndex === -1 ? text : text.slice(lastDelimiterIndex + 1);
// For forced extraction (Tab key), always return something
if (forceExtract) {

View file

@ -192,6 +192,7 @@ export class Editor implements Component {
// Only pass arrow keys to the list, not Enter/Tab (we handle those directly)
if (data === "\x1b[A" || data === "\x1b[B") {
this.autocompleteList.handleInput(data);
return;
}
// If Tab was pressed, always apply the selection
@ -832,6 +833,11 @@ export class Editor implements Component {
this.tryTriggerAutocomplete(true);
}
/*
https://github.com/EsotericSoftware/spine-runtimes/actions/runs/19536643416/job/559322883
17 this job fails with https://github.com/EsotericSoftware/spine-runtimes/actions/runs/19
536643416/job/55932288317 havea look at .gi
*/
private forceFileAutocomplete(): void {
if (!this.autocompleteProvider) return;

View file

@ -321,7 +321,8 @@ export class Markdown implements Component {
case "link": {
const linkText = this.renderInlineTokens(token.tokens || []);
// If link text matches href, only show the link once
if (linkText === token.href) {
// Compare raw text (token.text) not styled text (linkText) since linkText has ANSI codes
if (token.text === token.href) {
result += this.theme.link(this.theme.underline(linkText)) + this.applyDefaultStyle("");
} else {
result +=

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-web-ui",
"version": "0.8.4",
"version": "0.8.5",
"description": "Reusable web UI components for AI chat interfaces powered by @mariozechner/pi-ai",
"type": "module",
"main": "dist/index.js",
@ -18,8 +18,8 @@
},
"dependencies": {
"@lmstudio/sdk": "^1.5.0",
"@mariozechner/pi-ai": "^0.8.4",
"@mariozechner/pi-tui": "^0.8.4",
"@mariozechner/pi-ai": "^0.8.5",
"@mariozechner/pi-tui": "^0.8.5",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
"lucide": "^0.544.0",