mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 10:02:23 +00:00
Wire context event to preprocessor for per-LLM-call execution
- Change from contextTransform (runs once at agent start) to preprocessor - preprocessor runs before EACH LLM call inside the agent loop - ContextEvent now uses Message[] (pi-ai format) instead of AppMessage[] - Deep copy handled by pi-ai preprocessor, not Agent This enables: - Pruning rules applied on every turn (not just agent start) - /prune during long agent loop takes effect immediately - Compaction can use same transforms (future work)
This commit is contained in:
parent
77fe3f1a13
commit
a2515cf43f
6 changed files with 22 additions and 28 deletions
|
|
@ -55,8 +55,8 @@ export interface AgentOptions {
|
||||||
transport: AgentTransport;
|
transport: AgentTransport;
|
||||||
// Transform app messages to LLM-compatible messages before sending to transport
|
// Transform app messages to LLM-compatible messages before sending to transport
|
||||||
messageTransformer?: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
|
messageTransformer?: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
|
||||||
// Called before messageTransformer - can modify messages before they're sent to LLM (non-destructive)
|
// Called before each LLM call inside the agent loop - can modify messages (e.g., for pruning)
|
||||||
contextTransform?: (messages: AppMessage[]) => Promise<AppMessage[] | undefined>;
|
preprocessor?: (messages: Message[]) => Promise<Message[]>;
|
||||||
// Queue mode: "all" = send all queued messages at once, "one-at-a-time" = send one queued message per turn
|
// Queue mode: "all" = send all queued messages at once, "one-at-a-time" = send one queued message per turn
|
||||||
queueMode?: "all" | "one-at-a-time";
|
queueMode?: "all" | "one-at-a-time";
|
||||||
}
|
}
|
||||||
|
|
@ -77,7 +77,7 @@ export class Agent {
|
||||||
private abortController?: AbortController;
|
private abortController?: AbortController;
|
||||||
private transport: AgentTransport;
|
private transport: AgentTransport;
|
||||||
private messageTransformer: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
|
private messageTransformer: (messages: AppMessage[]) => Message[] | Promise<Message[]>;
|
||||||
private contextTransform?: (messages: AppMessage[]) => Promise<AppMessage[] | undefined>;
|
private preprocessor?: (messages: Message[]) => Promise<Message[]>;
|
||||||
private messageQueue: Array<QueuedMessage<AppMessage>> = [];
|
private messageQueue: Array<QueuedMessage<AppMessage>> = [];
|
||||||
private queueMode: "all" | "one-at-a-time";
|
private queueMode: "all" | "one-at-a-time";
|
||||||
private runningPrompt?: Promise<void>;
|
private runningPrompt?: Promise<void>;
|
||||||
|
|
@ -87,7 +87,7 @@ export class Agent {
|
||||||
this._state = { ...this._state, ...opts.initialState };
|
this._state = { ...this._state, ...opts.initialState };
|
||||||
this.transport = opts.transport;
|
this.transport = opts.transport;
|
||||||
this.messageTransformer = opts.messageTransformer || defaultMessageTransformer;
|
this.messageTransformer = opts.messageTransformer || defaultMessageTransformer;
|
||||||
this.contextTransform = opts.contextTransform;
|
this.preprocessor = opts.preprocessor;
|
||||||
this.queueMode = opts.queueMode || "one-at-a-time";
|
this.queueMode = opts.queueMode || "one-at-a-time";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -286,6 +286,7 @@ export class Agent {
|
||||||
tools: this._state.tools,
|
tools: this._state.tools,
|
||||||
model,
|
model,
|
||||||
reasoning,
|
reasoning,
|
||||||
|
preprocessor: this.preprocessor,
|
||||||
getQueuedMessages: async <T>() => {
|
getQueuedMessages: async <T>() => {
|
||||||
if (this.queueMode === "one-at-a-time") {
|
if (this.queueMode === "one-at-a-time") {
|
||||||
if (this.messageQueue.length > 0) {
|
if (this.messageQueue.length > 0) {
|
||||||
|
|
@ -302,18 +303,7 @@ export class Agent {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Apply context transform (hooks can modify messages non-destructively)
|
const llmMessages = await this.messageTransformer(this._state.messages);
|
||||||
// Deep copy so modifications don't affect the original state
|
|
||||||
let messagesToSend = this._state.messages;
|
|
||||||
if (this.contextTransform) {
|
|
||||||
const messagesCopy = JSON.parse(JSON.stringify(messagesToSend)) as AppMessage[];
|
|
||||||
const transformed = await this.contextTransform(messagesCopy);
|
|
||||||
if (transformed) {
|
|
||||||
messagesToSend = transformed;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const llmMessages = await this.messageTransformer(messagesToSend);
|
|
||||||
|
|
||||||
return { llmMessages, cfg, model };
|
return { llmMessages, cfg, model };
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -60,6 +60,7 @@ export class ProviderTransport implements AgentTransport {
|
||||||
// Resolve API key per assistant response (important for expiring OAuth tokens)
|
// Resolve API key per assistant response (important for expiring OAuth tokens)
|
||||||
getApiKey: this.options.getApiKey,
|
getApiKey: this.options.getApiKey,
|
||||||
getQueuedMessages: cfg.getQueuedMessages,
|
getQueuedMessages: cfg.getQueuedMessages,
|
||||||
|
preprocessor: cfg.preprocessor,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,8 @@ export interface AgentRunConfig {
|
||||||
model: Model<any>;
|
model: Model<any>;
|
||||||
reasoning?: ReasoningEffort;
|
reasoning?: ReasoningEffort;
|
||||||
getQueuedMessages?: <T>() => Promise<QueuedMessage<T>[]>;
|
getQueuedMessages?: <T>() => Promise<QueuedMessage<T>[]>;
|
||||||
|
/** Called before each LLM call - can modify messages (e.g., for pruning) */
|
||||||
|
preprocessor?: (messages: Message[]) => Promise<Message[]>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
* Hook runner - executes hooks and manages their lifecycle.
|
* Hook runner - executes hooks and manages their lifecycle.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { AppMessage } from "@mariozechner/pi-agent-core";
|
import type { Message } from "@mariozechner/pi-ai";
|
||||||
import type { ModelRegistry } from "../model-registry.js";
|
import type { ModelRegistry } from "../model-registry.js";
|
||||||
import type { SessionManager } from "../session-manager.js";
|
import type { SessionManager } from "../session-manager.js";
|
||||||
import type { AppendEntryHandler, LoadedHook, SendMessageHandler } from "./loader.js";
|
import type { AppendEntryHandler, LoadedHook, SendMessageHandler } from "./loader.js";
|
||||||
|
|
@ -311,12 +311,13 @@ export class HookRunner {
|
||||||
/**
|
/**
|
||||||
* Emit a context event to all hooks.
|
* Emit a context event to all hooks.
|
||||||
* Handlers are chained - each gets the previous handler's output (if any).
|
* Handlers are chained - each gets the previous handler's output (if any).
|
||||||
* Returns the final modified messages, or undefined if no modifications.
|
* Returns the final modified messages, or the original if no modifications.
|
||||||
|
*
|
||||||
|
* Note: Messages are already deep-copied by the caller (pi-ai preprocessor).
|
||||||
*/
|
*/
|
||||||
async emitContext(messages: AppMessage[]): Promise<AppMessage[] | undefined> {
|
async emitContext(messages: Message[]): Promise<Message[]> {
|
||||||
const ctx = this.createContext();
|
const ctx = this.createContext();
|
||||||
let currentMessages = messages;
|
let currentMessages = messages;
|
||||||
let modified = false;
|
|
||||||
|
|
||||||
for (const hook of this.hooks) {
|
for (const hook of this.hooks) {
|
||||||
const handlers = hook.handlers.get("context");
|
const handlers = hook.handlers.get("context");
|
||||||
|
|
@ -331,7 +332,6 @@ export class HookRunner {
|
||||||
|
|
||||||
if (handlerResult && (handlerResult as ContextEventResult).messages) {
|
if (handlerResult && (handlerResult as ContextEventResult).messages) {
|
||||||
currentMessages = (handlerResult as ContextEventResult).messages!;
|
currentMessages = (handlerResult as ContextEventResult).messages!;
|
||||||
modified = true;
|
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
const message = err instanceof Error ? err.message : String(err);
|
const message = err instanceof Error ? err.message : String(err);
|
||||||
|
|
@ -344,6 +344,6 @@ export class HookRunner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return modified ? currentMessages : undefined;
|
return currentMessages;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { AppMessage } from "@mariozechner/pi-agent-core";
|
import type { AppMessage } from "@mariozechner/pi-agent-core";
|
||||||
import type { ImageContent, Model, TextContent, ToolResultMessage } from "@mariozechner/pi-ai";
|
import type { ImageContent, Message, Model, TextContent, ToolResultMessage } from "@mariozechner/pi-ai";
|
||||||
import type { Component } from "@mariozechner/pi-tui";
|
import type { Component } from "@mariozechner/pi-tui";
|
||||||
import type { Theme } from "../../modes/interactive/theme/theme.js";
|
import type { Theme } from "../../modes/interactive/theme/theme.js";
|
||||||
import type { CompactionPreparation, CompactionResult } from "../compaction.js";
|
import type { CompactionPreparation, CompactionResult } from "../compaction.js";
|
||||||
|
|
@ -148,13 +148,14 @@ export type SessionEvent =
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Event data for context event.
|
* Event data for context event.
|
||||||
* Fired before messages are sent to the LLM, allowing hooks to modify context non-destructively.
|
* Fired before each LLM call, allowing hooks to modify context non-destructively.
|
||||||
* Original session messages are NOT modified - only the messages sent to the LLM are affected.
|
* Original session messages are NOT modified - only the messages sent to the LLM are affected.
|
||||||
|
* Messages are already in LLM format (Message[], not AppMessage[]).
|
||||||
*/
|
*/
|
||||||
export interface ContextEvent {
|
export interface ContextEvent {
|
||||||
type: "context";
|
type: "context";
|
||||||
/** Messages about to be sent to the LLM */
|
/** Messages about to be sent to the LLM (deep copy, safe to modify) */
|
||||||
messages: AppMessage[];
|
messages: Message[];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -330,7 +331,7 @@ export type HookEvent =
|
||||||
*/
|
*/
|
||||||
export interface ContextEventResult {
|
export interface ContextEventResult {
|
||||||
/** Modified messages to send instead of the original */
|
/** Modified messages to send instead of the original */
|
||||||
messages?: AppMessage[];
|
messages?: Message[];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -589,7 +589,7 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {}
|
||||||
tools: allToolsArray,
|
tools: allToolsArray,
|
||||||
},
|
},
|
||||||
messageTransformer,
|
messageTransformer,
|
||||||
contextTransform: hookRunner
|
preprocessor: hookRunner
|
||||||
? async (messages) => {
|
? async (messages) => {
|
||||||
return hookRunner.emitContext(messages);
|
return hookRunner.emitContext(messages);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue