WIP: Refactor agent package - not compiling

- Renamed AppMessage to AgentMessage throughout
- New agent-loop.ts with AgentLoopContext, AgentLoopConfig
- Removed transport abstraction, Agent now takes streamFn directly
- Extracted streamProxy to proxy.ts utility
- Removed agent-loop from pi-ai (now in agent package)
- Updated consumers (coding-agent, mom) for AgentMessage rename
- Tests updated but some consumers still need migration

Known issues:
- AgentTool, AgentToolResult not exported from pi-ai
- Attachment not exported from pi-agent-core
- ProviderTransport removed but still referenced
- messageTransformer -> convertToLlm migration incomplete
- CustomMessages declaration merging not working properly
This commit is contained in:
Mario Zechner 2025-12-28 09:23:38 +01:00
parent f7ef44dc38
commit a055fd4481
32 changed files with 1312 additions and 2009 deletions

View file

@ -13,7 +13,14 @@
* Modes use this class and add their own I/O layer on top.
*/
import type { Agent, AgentEvent, AgentState, AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type {
Agent,
AgentEvent,
AgentMessage,
AgentState,
Attachment,
ThinkingLevel,
} from "@mariozechner/pi-agent-core";
import type { AssistantMessage, Message, Model, TextContent } from "@mariozechner/pi-ai";
import { isContextOverflow, modelsAreEqual, supportsXhigh } from "@mariozechner/pi-ai";
import { getAuthPath } from "../config.js";
@ -403,7 +410,7 @@ export class AgentSession {
}
/** All messages including custom types like BashExecutionMessage */
get messages(): AppMessage[] {
get messages(): AgentMessage[] {
return this.agent.state.messages;
}

View file

@ -5,17 +5,17 @@
* and after compaction the session is reloaded.
*/
import type { AppMessage } from "@mariozechner/pi-agent-core";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { AssistantMessage, Model, Usage } from "@mariozechner/pi-ai";
import { complete } from "@mariozechner/pi-ai";
import { messageTransformer } from "./messages.js";
import { type CompactionEntry, createSummaryMessage, type SessionEntry } from "./session-manager.js";
/**
* Extract AppMessage from an entry if it produces one.
* Extract AgentMessage from an entry if it produces one.
* Returns null for entries that don't contribute to LLM context.
*/
function getMessageFromEntry(entry: SessionEntry): AppMessage | null {
function getMessageFromEntry(entry: SessionEntry): AgentMessage | null {
if (entry.type === "message") {
return entry.message;
}
@ -73,7 +73,7 @@ export function calculateContextTokens(usage: Usage): number {
* Get usage from an assistant message if available.
* Skips aborted and error messages as they don't have valid usage data.
*/
function getAssistantUsage(msg: AppMessage): Usage | null {
function getAssistantUsage(msg: AgentMessage): Usage | null {
if (msg.role === "assistant" && "usage" in msg) {
const assistantMsg = msg as AssistantMessage;
if (assistantMsg.stopReason !== "aborted" && assistantMsg.stopReason !== "error" && assistantMsg.usage) {
@ -113,7 +113,7 @@ export function shouldCompact(contextTokens: number, contextWindow: number, sett
* Estimate token count for a message using chars/4 heuristic.
* This is conservative (overestimates tokens).
*/
export function estimateTokens(message: AppMessage): number {
export function estimateTokens(message: AgentMessage): number {
let chars = 0;
// Handle bashExecution messages
@ -323,7 +323,7 @@ Be concise, structured, and focused on helping the next LLM seamlessly continue
* Generate a summary of the conversation using the LLM.
*/
export async function generateSummary(
currentMessages: AppMessage[],
currentMessages: AgentMessage[],
model: Model<any>,
reserveTokens: number,
apiKey: string,
@ -371,9 +371,9 @@ export interface CompactionPreparation {
/** UUID of first entry to keep */
firstKeptEntryId: string;
/** Messages that will be summarized and discarded */
messagesToSummarize: AppMessage[];
messagesToSummarize: AgentMessage[];
/** Messages that will be kept after the summary (recent turns) */
messagesToKeep: AppMessage[];
messagesToKeep: AgentMessage[];
tokensBefore: number;
boundaryStart: number;
}
@ -408,14 +408,14 @@ export function prepareCompaction(entries: SessionEntry[], settings: CompactionS
const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptEntryIndex;
// Messages to summarize (will be discarded after summary)
const messagesToSummarize: AppMessage[] = [];
const messagesToSummarize: AgentMessage[] = [];
for (let i = boundaryStart; i < historyEnd; i++) {
const msg = getMessageFromEntry(entries[i]);
if (msg) messagesToSummarize.push(msg);
}
// Messages to keep (recent turns, kept after summary)
const messagesToKeep: AppMessage[] = [];
const messagesToKeep: AgentMessage[] = [];
for (let i = cutPoint.firstKeptEntryIndex; i < boundaryEnd; i++) {
const msg = getMessageFromEntry(entries[i]);
if (msg) messagesToKeep.push(msg);
@ -482,7 +482,7 @@ export async function compact(
// Extract messages for history summary (before the turn that contains the cut point)
const historyEnd = cutResult.isSplitTurn ? cutResult.turnStartIndex : cutResult.firstKeptEntryIndex;
const historyMessages: AppMessage[] = [];
const historyMessages: AgentMessage[] = [];
for (let i = boundaryStart; i < historyEnd; i++) {
const msg = getMessageFromEntry(entries[i]);
if (msg) historyMessages.push(msg);
@ -499,7 +499,7 @@ export async function compact(
}
// Extract messages for turn prefix summary (if splitting a turn)
const turnPrefixMessages: AppMessage[] = [];
const turnPrefixMessages: AgentMessage[] = [];
if (cutResult.isSplitTurn) {
for (let i = cutResult.turnStartIndex; i < cutResult.firstKeptEntryIndex; i++) {
const msg = getMessageFromEntry(entries[i]);
@ -550,7 +550,7 @@ export async function compact(
* Generate a summary for a turn prefix (when splitting a turn).
*/
async function generateTurnPrefixSummary(
messages: AppMessage[],
messages: AgentMessage[],
model: Model<any>,
reserveTokens: number,
apiKey: string,

View file

@ -5,7 +5,7 @@
* and interact with the user via UI primitives.
*/
import type { AppMessage } from "@mariozechner/pi-agent-core";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { ImageContent, Message, Model, TextContent, ToolResultMessage } from "@mariozechner/pi-ai";
import type { Component } from "@mariozechner/pi-tui";
import type { Theme } from "../../modes/interactive/theme/theme.js";
@ -151,7 +151,7 @@ export type SessionEvent =
* Event data for context event.
* Fired before each LLM call, allowing hooks to modify context non-destructively.
* Original session messages are NOT modified - only the messages sent to the LLM are affected.
* Messages are already in LLM format (Message[], not AppMessage[]).
* Messages are already in LLM format (Message[], not AgentMessage[]).
*/
export interface ContextEvent {
type: "context";
@ -172,7 +172,7 @@ export interface AgentStartEvent {
*/
export interface AgentEndEvent {
type: "agent_end";
messages: AppMessage[];
messages: AgentMessage[];
}
/**
@ -190,7 +190,7 @@ export interface TurnStartEvent {
export interface TurnEndEvent {
type: "turn_end";
turnIndex: number;
message: AppMessage;
message: AgentMessage;
toolResults: ToolResultMessage[];
}

View file

@ -1,11 +1,11 @@
/**
* Custom message types and transformers for the coding agent.
*
* Extends the base AppMessage type with coding-agent specific message types,
* Extends the base AgentMessage type with coding-agent specific message types,
* and provides a transformer to convert them to LLM-compatible messages.
*/
import type { AppMessage } from "@mariozechner/pi-agent-core";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { Message } from "@mariozechner/pi-ai";
// ============================================================================
@ -56,14 +56,14 @@ declare module "@mariozechner/pi-agent-core" {
/**
* Type guard for BashExecutionMessage.
*/
export function isBashExecutionMessage(msg: AppMessage | Message): msg is BashExecutionMessage {
export function isBashExecutionMessage(msg: AgentMessage | Message): msg is BashExecutionMessage {
return (msg as BashExecutionMessage).role === "bashExecution";
}
/**
* Type guard for HookAppMessage.
* Type guard for HookAgentMessage.
*/
export function isHookMessage(msg: AppMessage | Message): msg is HookMessage {
export function isHookMessage(msg: AgentMessage | Message): msg is HookMessage {
return (msg as HookMessage).role === "hookMessage";
}
@ -97,13 +97,13 @@ export function bashExecutionToText(msg: BashExecutionMessage): string {
// ============================================================================
/**
* Transform AppMessages (including custom types) to LLM-compatible Messages.
* Transform AgentMessages (including custom types) to LLM-compatible Messages.
*
* This is used by:
* - Agent's messageTransformer option (for prompt calls)
* - Compaction's generateSummary (for summarization)
*/
export function messageTransformer(messages: AppMessage[]): Message[] {
export function messageTransformer(messages: AgentMessage[]): Message[] {
return messages
.map((m): Message | null => {
if (isBashExecutionMessage(m)) {

View file

@ -1,4 +1,4 @@
import type { AppMessage } from "@mariozechner/pi-agent-core";
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { ImageContent, TextContent } from "@mariozechner/pi-ai";
import { randomUUID } from "crypto";
import {
@ -36,7 +36,7 @@ export interface SessionEntryBase {
export interface SessionMessageEntry extends SessionEntryBase {
type: "message";
message: AppMessage;
message: AgentMessage;
}
export interface ThinkingLevelChangeEntry extends SessionEntryBase {
@ -130,7 +130,7 @@ export interface SessionTreeNode {
}
export interface SessionContext {
messages: AppMessage[];
messages: AgentMessage[];
thinkingLevel: string;
model: { provider: string; modelId: string } | null;
}
@ -154,7 +154,7 @@ export const SUMMARY_SUFFIX = `
</summary>`;
/** Exported for compaction.test.ts */
export function createSummaryMessage(summary: string, timestamp: string): AppMessage {
export function createSummaryMessage(summary: string, timestamp: string): AgentMessage {
return {
role: "user",
content: SUMMARY_PREFIX + summary + SUMMARY_SUFFIX,
@ -162,8 +162,8 @@ export function createSummaryMessage(summary: string, timestamp: string): AppMes
};
}
/** Convert CustomMessageEntry to AppMessage format */
function createCustomMessage(entry: CustomMessageEntry): AppMessage {
/** Convert CustomMessageEntry to AgentMessage format */
function createCustomMessage(entry: CustomMessageEntry): AgentMessage {
return {
role: "user",
content: entry.content,
@ -323,7 +323,7 @@ export function buildSessionContext(
// 1. Emit summary first (entry = compaction)
// 2. Emit kept messages (from firstKeptEntryId up to compaction)
// 3. Emit messages after compaction
const messages: AppMessage[] = [];
const messages: AgentMessage[] = [];
if (compaction) {
// Emit summary first
@ -595,7 +595,7 @@ export class SessionManager {
}
/** Append a message as child of current leaf, then advance leaf. Returns entry id. */
appendMessage(message: AppMessage): string {
appendMessage(message: AgentMessage): string {
const entry: SessionMessageEntry = {
type: "message",
id: generateId(this.byId),

View file

@ -6,7 +6,7 @@
import * as fs from "node:fs";
import * as os from "node:os";
import * as path from "node:path";
import type { AgentState, AppMessage } from "@mariozechner/pi-agent-core";
import type { AgentMessage, AgentState } from "@mariozechner/pi-agent-core";
import type { AssistantMessage, Message, OAuthProvider } from "@mariozechner/pi-ai";
import type { SlashCommand } from "@mariozechner/pi-tui";
import {
@ -1051,7 +1051,7 @@ export class InteractiveMode {
this.ui.requestRender();
}
private addMessageToChat(message: AppMessage): void {
private addMessageToChat(message: AgentMessage): void {
if (isBashExecutionMessage(message)) {
const component = new BashExecutionComponent(message.command, this.ui);
if (message.output) {

View file

@ -6,7 +6,7 @@
import { type ChildProcess, spawn } from "node:child_process";
import * as readline from "node:readline";
import type { AgentEvent, AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { AgentEvent, AgentMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { SessionStats } from "../../core/agent-session.js";
import type { BashResult } from "../../core/bash-executor.js";
import type { CompactionResult } from "../../core/compaction.js";
@ -349,9 +349,9 @@ export class RpcClient {
/**
* Get all messages in the session.
*/
async getMessages(): Promise<AppMessage[]> {
async getMessages(): Promise<AgentMessage[]> {
const response = await this.send({ type: "get_messages" });
return this.getData<{ messages: AppMessage[] }>(response).messages;
return this.getData<{ messages: AgentMessage[] }>(response).messages;
}
// =========================================================================

View file

@ -5,7 +5,7 @@
* Responses and events are emitted as JSON lines on stdout.
*/
import type { AppMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { AgentMessage, Attachment, ThinkingLevel } from "@mariozechner/pi-agent-core";
import type { Model } from "@mariozechner/pi-ai";
import type { SessionStats } from "../../core/agent-session.js";
import type { BashResult } from "../../core/bash-executor.js";
@ -161,7 +161,7 @@ export type RpcResponse =
}
// Messages
| { id?: string; type: "response"; command: "get_messages"; success: true; data: { messages: AppMessage[] } }
| { id?: string; type: "response"; command: "get_messages"; success: true; data: { messages: AgentMessage[] } }
// Error response (any command can fail)
| { id?: string; type: "response"; command: string; success: false; error: string };