Use proper HookAppMessage type instead of _hookData marker

Following the same pattern as BashExecutionMessage:
- HookAppMessage has role: 'hookMessage' with customType, content, display, details
- isHookAppMessage() type guard for checking message type
- messageTransformer converts to user message for LLM context
- TUI checks isHookAppMessage() for rendering as CustomMessageComponent

This makes the API clean for anyone building on AgentSession - they can
use the type guard instead of knowing about internal marker fields.
This commit is contained in:
Mario Zechner 2025-12-27 01:42:00 +01:00
parent 357bd946c2
commit 75a9c3c714
4 changed files with 111 additions and 81 deletions

View file

@ -6359,23 +6359,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
@ -6410,6 +6393,23 @@ export const MODELS = {
contextWindow: 10000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -6546,6 +6546,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -6580,23 +6597,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
@ -6716,23 +6716,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 10,
output: 30,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
@ -6750,6 +6733,23 @@ export const MODELS = {
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 10,
output: 30,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-tiny": {
id: "mistralai/mistral-tiny",
name: "Mistral Tiny",

View file

@ -37,7 +37,7 @@ import {
type TurnEndEvent,
type TurnStartEvent,
} from "./hooks/index.js";
import type { BashExecutionMessage } from "./messages.js";
import { type BashExecutionMessage, type HookAppMessage, isHookAppMessage } from "./messages.js";
import type { ModelRegistry } from "./model-registry.js";
import type { CompactionEntry, SessionManager } from "./session-manager.js";
import type { SettingsManager, SkillsSettings } from "./settings-manager.js";
@ -111,12 +111,6 @@ export interface SessionStats {
}
/** Internal marker for hook messages queued through the agent loop */
interface HookMessageData {
customType: string;
display: boolean;
details?: unknown;
}
// ============================================================================
// Constants
// ============================================================================
@ -228,15 +222,13 @@ export class AgentSession {
// Handle session persistence
if (event.type === "message_end") {
// Check if this is a hook message (has _hookData marker)
type HookAppMessage = AppMessage & { _hookData?: HookMessageData; content: (TextContent | ImageContent)[] };
const hookMessage = event.message as HookAppMessage;
if (hookMessage._hookData) {
if (isHookAppMessage(event.message)) {
// Persist as CustomMessageEntry
this.sessionManager.appendCustomMessageEntry(
hookMessage._hookData.customType,
hookMessage.content,
hookMessage._hookData.display,
hookMessage._hookData.details,
event.message.customType,
event.message.content,
event.message.display,
event.message.details,
);
} else {
// Regular message - persist as SessionMessageEntry
@ -578,16 +570,14 @@ export class AgentSession {
const content: (TextContent | ImageContent)[] =
typeof message.content === "string" ? [{ type: "text", text: message.content }] : message.content;
// Create AppMessage with _hookData marker for routing in message_end handler
const appMessage: AppMessage & { _hookData: HookMessageData } = {
role: "user",
// Create HookAppMessage with proper role for type-safe handling
const appMessage: HookAppMessage = {
role: "hookMessage",
customType: message.customType,
content,
display: message.display,
details: message.details,
timestamp: Date.now(),
_hookData: {
customType: message.customType,
display: message.display,
details: message.details,
},
};
if (this.isStreaming) {
@ -596,12 +586,6 @@ export class AgentSession {
} else if (triggerTurn) {
// Append to agent state and session, then trigger a turn
this.agent.appendMessage(appMessage);
this.sessionManager.appendCustomMessageEntry(
message.customType,
message.content,
message.display,
message.details,
);
// Start a new turn - emit message events for the hook message so TUI can render it
await this.agent.continue(true);
} else {

View file

@ -26,10 +26,26 @@ export interface BashExecutionMessage {
timestamp: number;
}
import type { ImageContent, TextContent } from "@mariozechner/pi-ai";
/**
* Message type for hook-injected messages via sendMessage().
* These are custom messages that hooks can inject into the conversation.
*/
export interface HookAppMessage<T = unknown> {
role: "hookMessage";
customType: string;
content: (TextContent | ImageContent)[];
display: boolean;
details?: T;
timestamp: number;
}
// Extend CustomMessages via declaration merging
declare module "@mariozechner/pi-agent-core" {
interface CustomMessages {
bashExecution: BashExecutionMessage;
hookMessage: HookAppMessage;
}
}
@ -44,6 +60,13 @@ export function isBashExecutionMessage(msg: AppMessage | Message): msg is BashEx
return (msg as BashExecutionMessage).role === "bashExecution";
}
/**
* Type guard for HookAppMessage.
*/
export function isHookAppMessage(msg: AppMessage | Message): msg is HookAppMessage {
return (msg as HookAppMessage).role === "hookMessage";
}
// ============================================================================
// Message Formatting
// ============================================================================
@ -91,6 +114,14 @@ export function messageTransformer(messages: AppMessage[]): Message[] {
timestamp: m.timestamp,
};
}
if (isHookAppMessage(m)) {
// Convert hook message to user message
return {
role: "user",
content: m.content,
timestamp: m.timestamp,
};
}
// Pass through standard LLM roles
if (m.role === "user" || m.role === "assistant" || m.role === "toolResult") {
return m as Message;

View file

@ -28,7 +28,7 @@ import { APP_NAME, getAuthPath, getDebugLogPath } from "../../config.js";
import type { AgentSession, AgentSessionEvent } from "../../core/agent-session.js";
import type { LoadedCustomTool, SessionEvent as ToolSessionEvent } from "../../core/custom-tools/index.js";
import type { HookUIContext } from "../../core/hooks/index.js";
import { isBashExecutionMessage } from "../../core/messages.js";
import { isBashExecutionMessage, isHookAppMessage } from "../../core/messages.js";
import {
getLatestCompactionEntry,
type SessionContext,
@ -1016,7 +1016,22 @@ export class InteractiveMode {
return;
}
if (message.role === "user") {
if (isHookAppMessage(message)) {
// Render as custom message if display is true
if (message.display) {
const entry = {
type: "custom_message" as const,
customType: message.customType,
content: message.content,
display: true,
id: "",
parentId: null,
timestamp: new Date().toISOString(),
};
const renderer = this.session.hookRunner?.getCustomMessageRenderer(message.customType);
this.chatContainer.addChild(new CustomMessageComponent(entry, renderer));
}
} else if (message.role === "user") {
const textContent = this.getUserMessageText(message);
if (textContent) {
const userComponent = new UserMessageComponent(textContent, this.isFirstUserMessage);