mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 08:03:39 +00:00
fix(agent): Properly handle ESC interrupt in TUI with centralized event emission
Fixed the interrupt mechanism to show "[Interrupted by user]" message when ESC is pressed: - Removed duplicate UI cleanup from ESC key handler that interfered with event processing - Added centralized interrupted event emission in exception handler when abort signal is detected - Removed duplicate event emissions from API call methods to prevent multiple messages - Added abort signal support to preflight reasoning check for proper cancellation - Simplified abort detection to only check signal state, not error messages
This commit is contained in:
parent
1f9d10cab0
commit
1d9b77298c
5 changed files with 218 additions and 22 deletions
|
|
@ -178,7 +178,13 @@ async function checkReasoningSupport(
|
|||
model: string,
|
||||
api: "completions" | "responses",
|
||||
baseURL?: string,
|
||||
signal?: AbortSignal,
|
||||
): Promise<boolean> {
|
||||
// Check if already aborted
|
||||
if (signal?.aborted) {
|
||||
throw new Error("Interrupted");
|
||||
}
|
||||
|
||||
// Check cache first
|
||||
const cacheKey = model;
|
||||
const cached = modelReasoningSupport.get(cacheKey);
|
||||
|
|
@ -200,7 +206,7 @@ async function checkReasoningSupport(
|
|||
effort: "low", // Use low instead of minimal to ensure we get summaries
|
||||
},
|
||||
};
|
||||
await client.responses.create(testRequest);
|
||||
await client.responses.create(testRequest, { signal });
|
||||
supportsReasoning = true;
|
||||
} catch (error) {
|
||||
supportsReasoning = false;
|
||||
|
|
@ -234,7 +240,7 @@ async function checkReasoningSupport(
|
|||
testRequest.reasoning_effort = "minimal";
|
||||
}
|
||||
|
||||
await client.chat.completions.create(testRequest);
|
||||
await client.chat.completions.create(testRequest, { signal });
|
||||
supportsReasoning = true;
|
||||
} catch (error) {
|
||||
supportsReasoning = false;
|
||||
|
|
@ -263,7 +269,6 @@ export async function callModelResponsesApi(
|
|||
while (!conversationDone) {
|
||||
// Check if we've been interrupted
|
||||
if (signal?.aborted) {
|
||||
await eventReceiver?.on({ type: "interrupted" });
|
||||
throw new Error("Interrupted");
|
||||
}
|
||||
|
||||
|
|
@ -340,7 +345,6 @@ export async function callModelResponsesApi(
|
|||
|
||||
case "function_call": {
|
||||
if (signal?.aborted) {
|
||||
await eventReceiver?.on({ type: "interrupted" });
|
||||
throw new Error("Interrupted");
|
||||
}
|
||||
|
||||
|
|
@ -406,7 +410,6 @@ export async function callModelChatCompletionsApi(
|
|||
|
||||
while (!assistantResponded) {
|
||||
if (signal?.aborted) {
|
||||
await eventReceiver?.on({ type: "interrupted" });
|
||||
throw new Error("Interrupted");
|
||||
}
|
||||
|
||||
|
|
@ -456,7 +459,6 @@ export async function callModelChatCompletionsApi(
|
|||
for (const toolCall of message.tool_calls) {
|
||||
// Check if interrupted before executing tool
|
||||
if (signal?.aborted) {
|
||||
await eventReceiver?.on({ type: "interrupted" });
|
||||
throw new Error("Interrupted");
|
||||
}
|
||||
|
||||
|
|
@ -576,6 +578,7 @@ export class Agent {
|
|||
this.config.model,
|
||||
this.config.api,
|
||||
this.config.baseURL,
|
||||
this.abortController.signal,
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -601,9 +604,10 @@ export class Agent {
|
|||
);
|
||||
}
|
||||
} catch (e) {
|
||||
// Check if this was an interruption
|
||||
const errorMessage = e instanceof Error ? e.message : String(e);
|
||||
if (errorMessage === "Interrupted" || this.abortController.signal.aborted) {
|
||||
// Check if this was an interruption by checking the abort signal
|
||||
if (this.abortController.signal.aborted) {
|
||||
// Emit interrupted event so UI can clean up properly
|
||||
await this.comboReceiver?.on({ type: "interrupted" });
|
||||
return;
|
||||
}
|
||||
throw e;
|
||||
|
|
|
|||
|
|
@ -113,19 +113,8 @@ export class TuiRenderer implements AgentEventReceiver {
|
|||
this.onInterruptCallback();
|
||||
}
|
||||
|
||||
// Stop the loading animation immediately
|
||||
if (this.currentLoadingAnimation) {
|
||||
this.currentLoadingAnimation.stop();
|
||||
this.statusContainer.clear();
|
||||
this.currentLoadingAnimation = null;
|
||||
}
|
||||
|
||||
// Don't show message here - the interrupted event will handle it
|
||||
|
||||
// Re-enable editor submission
|
||||
this.editor.disableSubmit = false;
|
||||
|
||||
this.ui.requestRender();
|
||||
// Don't do any UI cleanup here - let the interrupted event handle it
|
||||
// This avoids race conditions and ensures the interrupted message is shown
|
||||
|
||||
// Don't forward to editor
|
||||
return false;
|
||||
|
|
@ -280,6 +269,8 @@ export class TuiRenderer implements AgentEventReceiver {
|
|||
this.chatContainer.addChild(new TextComponent(chalk.red("[Interrupted by user]"), { bottom: 1 }));
|
||||
// Re-enable editor submission
|
||||
this.editor.disableSubmit = false;
|
||||
// Explicitly request render to ensure message is displayed
|
||||
this.ui.requestRender();
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
# Analysis: Interrupted Message Not Showing in TUI
|
||||
|
||||
## Problem Summary
|
||||
When pressing ESC to interrupt the agent while it's working, the "interrupted" message is not appearing in the TUI interface.
|
||||
|
||||
## Research Findings
|
||||
|
||||
### Interrupt Handling Flow
|
||||
|
||||
1. **ESC Key Detection** (TuiRenderer line 110)
|
||||
- ESC key is detected as `\x1b` in the `onGlobalKeyPress` handler
|
||||
- Only triggers when `this.currentLoadingAnimation` is active (agent is processing)
|
||||
|
||||
2. **Immediate UI Cleanup** (TuiRenderer lines 112-128)
|
||||
- Calls `this.onInterruptCallback()` (which calls `agent.interrupt()`)
|
||||
- Stops loading animation and clears status container
|
||||
- Re-enables text editor submission
|
||||
- Requests UI render
|
||||
|
||||
3. **Agent Interruption** (Agent.ts line 615-617)
|
||||
- `agent.interrupt()` calls `this.abortController?.abort()`
|
||||
- This triggers AbortSignal in ongoing API calls
|
||||
|
||||
4. **Interrupted Event Generation** (Agent.ts multiple locations)
|
||||
- When signal is aborted, code checks `signal?.aborted`
|
||||
- Emits `{ type: "interrupted" }` event via `eventReceiver?.on()`
|
||||
- Throws `new Error("Interrupted")` to exit processing
|
||||
|
||||
5. **Message Display** (TuiRenderer line 272-283)
|
||||
- Handles `"interrupted"` event
|
||||
- Adds red "[Interrupted by user]" message to chat container
|
||||
- Requests render
|
||||
|
||||
### Root Cause Analysis
|
||||
|
||||
The issue appears to be a **race condition with duplicate cleanup**:
|
||||
|
||||
1. When ESC is pressed, the key handler **immediately** (lines 115-120):
|
||||
- Stops the loading animation
|
||||
- Clears the status container
|
||||
- Sets `currentLoadingAnimation = null`
|
||||
|
||||
2. Later, when the "interrupted" event arrives (lines 273-277), it tries to:
|
||||
- Stop the loading animation again (but it's already null)
|
||||
- Clear the status container again (already cleared)
|
||||
|
||||
3. The comment on line 123 says "Don't show message here - the interrupted event will handle it", but the event handler at line 280 **does** add the message to the chat container.
|
||||
|
||||
### The Actual Problem
|
||||
|
||||
Looking closely at the code flow:
|
||||
|
||||
1. ESC handler clears animation and calls `agent.interrupt()` (synchronous)
|
||||
2. Agent aborts the controller (synchronous)
|
||||
3. API call code detects abort and emits "interrupted" event (asynchronous)
|
||||
4. TUI renderer receives "interrupted" event and adds message (asynchronous)
|
||||
|
||||
The issue is likely that:
|
||||
- The interrupted event IS being emitted and handled
|
||||
- The message IS being added to the chat container
|
||||
- But the UI render might not be properly triggered or the differential rendering isn't detecting the change
|
||||
|
||||
### Additional Issues Found
|
||||
|
||||
1. **Duplicate Animation Cleanup**: The loading animation is stopped twice - once in the ESC handler and once in the interrupted event handler. This is redundant but shouldn't cause the missing message.
|
||||
|
||||
2. **Render Request Timing**: The ESC handler requests a render immediately after clearing the UI, then the interrupted event handler adds the message but doesn't explicitly request another render (it relies on the Container's automatic render request).
|
||||
|
||||
3. **Container Change Detection**: Recent commit 192d8d2 fixed container change detection issues. The interrupted message addition might not be triggering proper change detection.
|
||||
|
||||
## Solution Approach
|
||||
|
||||
The fix needs to ensure the interrupted message is properly displayed. Options:
|
||||
|
||||
1. **Add explicit render request** after adding the interrupted message
|
||||
2. **Remove duplicate cleanup** in the ESC handler and let the event handler do all the work
|
||||
3. **Ensure proper change detection** when adding the message to the chat container
|
||||
|
||||
The cleanest solution is likely option 2 - let the interrupted event handler do all the UI updates to avoid race conditions and ensure proper sequencing.
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# Fix interrupted message not showing when ESC pressed in agent TUI
|
||||
|
||||
**Status:** Done
|
||||
**Agent PID:** 47968
|
||||
|
||||
## Original Todo
|
||||
agent/tui: not seeing a read "interrupted" mesage anymore if i press ESC while agnet works
|
||||
|
||||
## Description
|
||||
Fix the issue where the "interrupted" message is not displayed in the TUI when pressing ESC to interrupt the agent while it's processing. The root cause is duplicate UI cleanup in the ESC key handler that interferes with the asynchronous interrupted event handler.
|
||||
|
||||
*Read [analysis.md](./analysis.md) in full for detailed codebase research and context*
|
||||
|
||||
## Implementation Plan
|
||||
Remove duplicate UI cleanup from ESC key handler and ensure interrupted event handler properly displays the message:
|
||||
- [x] Remove duplicate loading animation cleanup from ESC key handler in tui-renderer.ts (lines 115-120)
|
||||
- [x] Add explicit render request after adding interrupted message (line 280)
|
||||
- [x] Fix core issue: Emit "interrupted" event when API call is aborted (agent.ts line 606-607)
|
||||
- [x] Pass abort signal to preflight reasoning check
|
||||
- [x] Test interruption during API call (e.g., "write a poem")
|
||||
- [x] Verify "[Interrupted by user]" message appears and UI is restored
|
||||
|
||||
## Notes
|
||||
[Implementation notes]
|
||||
|
|
@ -1,3 +1,101 @@
|
|||
- agent/tui: interrupting requests to model via ESC doesn't work. Interrupting bash tool works.
|
||||
|
||||
- agent/tui: "read all README.md files except in node_modules", gpt-5. wait for completion, then send a new message. Getting garbled output. this happens for both of the renderDifferential and renderDifferentialSurgical methods. We need to emiulate this in a test and get to the bottom of it.
|
||||
```markdown
|
||||
>> pi interactive chat <<<
|
||||
Press Escape to interrupt while processing
|
||||
Press CTRL+C to clear the text editorM deployments and building AI agents.peScript API)
|
||||
Press CTRL+C twice quickly to exitdeterministic programs (via JSON mode in any language or the
|
||||
## PackagesAPI)
|
||||
[user]iding your own system prompts and tools
|
||||
read all README.md files except in node_modulesrminal UI library with differential rendering
|
||||
- **[@mariozechner/pi-agent](packages/agent)** - General-purpose agent with tool calling and session
|
||||
[assistant]re lines)
|
||||
[tool] glob({"pattern":"**/README.md"}) - CLI for managing vLLM deployments on GPU pods
|
||||
README.mdad({"path": "packages/pods/README.md"})
|
||||
node_modules/@biomejs/biome/README.md
|
||||
node_modules/@esbuild/darwin-arm64/README.md
|
||||
node_modules/@types/node/README.mdnt/README.md"}) vLLM configuration for agentic workloads.
|
||||
node_modules/@xterm/headless/README.md
|
||||
node_modules/@xterm/xterm/README.md
|
||||
node_modules/chalk/readme.md tool calling and session persistence, modeled after Claude Code but
|
||||
node_modules/esbuild/README.md. It comes with a built-in TUI (also modeled after Claude Code) for
|
||||
node_modules/fsevents/README.md
|
||||
node_modules/get-tsconfig/README.md
|
||||
... (59 more lines)ned to be easy:
|
||||
- Writing custom UIs on top of it (via JSON mode in any language or the TypeScript API)
|
||||
[tool] read({"path": "README.md"})deterministic programs (via JSON mode in any language or the
|
||||
# Pi MonorepoI)ath": "packages/tui/README.md"})
|
||||
- Providing your own system prompts and tools
|
||||
A collection of tools for managing LLM deployments and building AI agents.
|
||||
Terminal UI framework with surgical differential rendering for building flicker-free interactive CLI
|
||||
## Packagesre lines)
|
||||
|
||||
- **[@mariozechner/pi-tui](packages/tui)** - Terminal UI library with differential rendering
|
||||
- **[@mariozechner/pi-agent](packages/agent)** - General-purpose agent with tool calling and session
|
||||
persistencel Differential Rendering**: Three-strategy system that minimizes redraws to 1-2 lines for
|
||||
- **[@mariozechner/pi](packages/pods)** - CLI for managing vLLM deployments on GPU podsads.
|
||||
- **Scrollback Buffer Preservation**: Correctly maintains terminal history when content exceeds
|
||||
... (76 more lines)
|
||||
- **Zero Flicker**: Components like text editors remain perfectly still while other parts update
|
||||
[tool] read({"path": "packages/agent/README.md"})tocomplete, selection lists, markdown rendering
|
||||
# pi-agentl -g @mariozechner/pi
|
||||
```
|
||||
A general-purpose agent with tool calling and session persistence, modeled after Claude Code but
|
||||
extremely hackable and minimal. It comes with a built-in TUI (also modeled after Claude Code) for────╮
|
||||
interactive use. │
|
||||
[tool] read({"path": "packages/tui/README.md"})──────────────────────────────────────────────────────╯
|
||||
Everything is designed to be easy:
|
||||
- Writing custom UIs on top of it (via JSON mode in any language or the TypeScript API)
|
||||
- Using it for inference steps in deterministic programs (via JSON mode in any language or theve CLI
|
||||
TypeScript API)
|
||||
- Providing your own system prompts and tools
|
||||
- Working with various LLM providers or self-hosted LLMs
|
||||
|
||||
... (422 more lines)ntial Rendering**: Three-strategy system that minimizes redraws to 1-2 lines for
|
||||
typical updates
|
||||
[tool] read({"path": "packages/pods/README.md"})maintains terminal history when content exceeds
|
||||
# piport
|
||||
- **Zero Flicker**: Components like text editors remain perfectly still while other parts update
|
||||
Deploy and manage LLMs on GPU pods with automatic vLLM configuration for agentic workloads.ering
|
||||
... (570 more lines)
|
||||
## Installation
|
||||
[user]
|
||||
```bash
|
||||
npm install -g @mariozechner/pi
|
||||
```sistant]
|
||||
⠴ Thinking...
|
||||
... (501 more lines)
|
||||
|
||||
[tool] read({"path": "packages/tui/README.md"})──────────────────────────────────────────────────────╮
|
||||
# @mariozechner/pi-tui │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
Terminal UI framework with surgical differential rendering for building flicker-free interactive CLI
|
||||
applications.
|
||||
|
||||
## Features
|
||||
|
||||
- **Surgical Differential Rendering**: Three-strategy system that minimizes redraws to 1-2 lines for
|
||||
typical updates
|
||||
- **Scrollback Buffer Preservation**: Correctly maintains terminal history when content exceeds
|
||||
viewport
|
||||
- **Zero Flicker**: Components like text editors remain perfectly still while other parts update
|
||||
- **Interactive Components**: Text editor with autocomplete, selection lists, markdown rendering
|
||||
... (570 more lines)
|
||||
|
||||
[user]
|
||||
l
|
||||
|
||||
[assistant]
|
||||
Do you want me to list the current directory contents? If yes, should I include hidden files and subdir
|
||||
ectories?
|
||||
|
||||
|
||||
╭────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
||||
│ > │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
↑14,783 ↓160 ⚡128 ⚒ 5
|
||||
```
|
||||
|
||||
- pods: pi start outputs all models that can be run on the pod. however, it doesn't check the vllm version. e.g. gpt-oss can only run via vllm+gpt-oss. glm4.5 can only run on vllm nightly.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue