Rewrite RPC documentation with accurate types

This commit is contained in:
Mario Zechner 2025-12-09 14:48:37 +01:00
parent 70b84532bb
commit dc9a4b0fe4
4 changed files with 727 additions and 529 deletions

View file

@ -786,7 +786,19 @@ Execute shell commands directly and add output to the LLM context by prefixing w
- **History**: Commands are added to editor history (navigate with Up/Down arrows)
- **Visual feedback**: Editor border turns green in bash mode; cancelled commands show yellow warning
Output is automatically added to the conversation context, allowing the LLM to see command results without manual copy-paste.
**How bash output reaches the LLM:**
When you execute a bash command, the output is stored as a `BashExecutionMessage` in the conversation state. This message is transformed and sent to the LLM as part of your **next prompt**. The LLM sees it as a user message formatted like:
```
Ran `ls -la`
\`\`\`
total 48
drwxr-xr-x ...
\`\`\`
```
This means you can run multiple bash commands before sending a prompt, and all outputs will be included in the context together.
### Keyboard Shortcuts

File diff suppressed because it is too large Load diff

View file

@ -86,9 +86,8 @@ export async function runRpcMode(session: AgentSession): Promise<never> {
// =================================================================
case "get_state": {
const model = session.model;
const state: RpcSessionState = {
model: model ? { provider: model.provider, id: model.id, contextWindow: model.contextWindow } : null,
model: session.model,
thinkingLevel: session.thinkingLevel,
isStreaming: session.isStreaming,
queueMode: session.queueMode,
@ -112,7 +111,7 @@ export async function runRpcMode(session: AgentSession): Promise<never> {
return error(id, "set_model", `Model not found: ${command.provider}/${command.modelId}`);
}
await session.setModel(model);
return success(id, "set_model", { provider: model.provider, id: model.id });
return success(id, "set_model", model);
}
case "cycle_model": {
@ -120,23 +119,12 @@ export async function runRpcMode(session: AgentSession): Promise<never> {
if (!result) {
return success(id, "cycle_model", null);
}
return success(id, "cycle_model", {
model: { provider: result.model.provider, id: result.model.id },
thinkingLevel: result.thinkingLevel,
isScoped: result.isScoped,
});
return success(id, "cycle_model", result);
}
case "get_available_models": {
const models = await session.getAvailableModels();
return success(id, "get_available_models", {
models: models.map((m) => ({
provider: m.provider,
id: m.id,
contextWindow: m.contextWindow,
reasoning: !!m.reasoning,
})),
});
return success(id, "get_available_models", { models });
}
// =================================================================

View file

@ -6,6 +6,7 @@
*/
import type { AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { Model } from "@mariozechner/pi-ai";
import type { CompactionResult, SessionStats } from "../../core/agent-session.js";
import type { BashResult } from "../../core/bash-executor.js";
@ -59,7 +60,7 @@ export type RpcCommand =
// ============================================================================
export interface RpcSessionState {
model: { provider: string; id: string; contextWindow: number } | null;
model: Model<any> | null;
thinkingLevel: ThinkingLevel;
isStreaming: boolean;
queueMode: "all" | "one-at-a-time";
@ -91,21 +92,21 @@ export type RpcResponse =
type: "response";
command: "set_model";
success: true;
data: { provider: string; id: string };
data: Model<any>;
}
| {
id?: string;
type: "response";
command: "cycle_model";
success: true;
data: { model: { provider: string; id: string }; thinkingLevel: ThinkingLevel; isScoped: boolean } | null;
data: { model: Model<any>; thinkingLevel: ThinkingLevel; isScoped: boolean } | null;
}
| {
id?: string;
type: "response";
command: "get_available_models";
success: true;
data: { models: Array<{ provider: string; id: string; contextWindow: number; reasoning: boolean }> };
data: { models: Model<any>[] };
}
// Thinking