WIP: Context compaction core logic (#92)

- Add CompactionEntry type with firstKeptEntryIndex
- Add loadSessionFromEntries() for compaction-aware loading
- Add compact() function that returns CompactionEntry
- Add token calculation and cut point detection
- Add tests with real session fixture and LLM integration

Still TODO: settings, /compact and /autocompact commands, auto-trigger in TUI, /branch rework
This commit is contained in:
Mario Zechner 2025-12-04 00:07:53 +01:00
parent f02194296d
commit 6c2360af28
4 changed files with 1876 additions and 66 deletions

View file

@ -0,0 +1,266 @@
/**
* Context compaction for long sessions.
*
* Pure functions for compaction logic. The session manager handles I/O,
* and after compaction the session is reloaded.
*/
import type { AppMessage } from "@mariozechner/pi-agent-core";
import type { AssistantMessage, Model, Usage } from "@mariozechner/pi-ai";
import { complete } from "@mariozechner/pi-ai";
import { type CompactionEntry, loadSessionFromEntries, type SessionEntry } from "./session-manager.js";
// ============================================================================
// Types
// ============================================================================
export interface CompactionSettings {
enabled: boolean;
reserveTokens: number;
keepRecentTokens: number;
}
export const DEFAULT_COMPACTION_SETTINGS: CompactionSettings = {
enabled: true,
reserveTokens: 16384,
keepRecentTokens: 20000,
};
// ============================================================================
// Token calculation
// ============================================================================
/**
* Calculate total context tokens from usage.
*/
export function calculateContextTokens(usage: Usage): number {
return usage.input + usage.output + usage.cacheRead + usage.cacheWrite;
}
/**
* Get usage from an assistant message if available.
*/
function getAssistantUsage(msg: AppMessage): Usage | null {
if (msg.role === "assistant" && "usage" in msg) {
const assistantMsg = msg as AssistantMessage;
if (assistantMsg.stopReason !== "aborted" && assistantMsg.usage) {
return assistantMsg.usage;
}
}
return null;
}
/**
* Find the last non-aborted assistant message usage from session entries.
*/
export function getLastAssistantUsage(entries: SessionEntry[]): Usage | null {
for (let i = entries.length - 1; i >= 0; i--) {
const entry = entries[i];
if (entry.type === "message") {
const usage = getAssistantUsage(entry.message);
if (usage) return usage;
}
}
return null;
}
/**
* Check if compaction should trigger based on context usage.
*/
export function shouldCompact(contextTokens: number, contextWindow: number, settings: CompactionSettings): boolean {
if (!settings.enabled) return false;
return contextTokens > contextWindow - settings.reserveTokens;
}
// ============================================================================
// Cut point detection
// ============================================================================
/**
* Find indices of message entries that are user messages (turn boundaries).
*/
function findTurnBoundaries(entries: SessionEntry[], startIndex: number, endIndex: number): number[] {
const boundaries: number[] = [];
for (let i = startIndex; i < endIndex; i++) {
const entry = entries[i];
if (entry.type === "message" && entry.message.role === "user") {
boundaries.push(i);
}
}
return boundaries;
}
/**
* Find the cut point in session entries that keeps approximately `keepRecentTokens`.
* Returns the entry index of the first message to keep (a user message for turn integrity).
*
* Only considers entries between `startIndex` and `endIndex` (exclusive).
*/
export function findCutPoint(
entries: SessionEntry[],
startIndex: number,
endIndex: number,
keepRecentTokens: number,
): number {
const boundaries = findTurnBoundaries(entries, startIndex, endIndex);
if (boundaries.length === 0) {
return startIndex; // No user messages, keep everything in range
}
// Collect assistant usages walking backwards from endIndex
const assistantUsages: Array<{ index: number; tokens: number }> = [];
for (let i = endIndex - 1; i >= startIndex; i--) {
const entry = entries[i];
if (entry.type === "message") {
const usage = getAssistantUsage(entry.message);
if (usage) {
assistantUsages.push({
index: i,
tokens: calculateContextTokens(usage),
});
}
}
}
if (assistantUsages.length === 0) {
// No usage info, keep last turn only
return boundaries[boundaries.length - 1];
}
// Walk through and find where cumulative token difference exceeds keepRecentTokens
const newestTokens = assistantUsages[0].tokens;
let cutIndex = startIndex; // Default: keep everything in range
for (let i = 1; i < assistantUsages.length; i++) {
const tokenDiff = newestTokens - assistantUsages[i].tokens;
if (tokenDiff >= keepRecentTokens) {
// Find the turn boundary at or before the assistant we want to keep
const lastKeptAssistantIndex = assistantUsages[i - 1].index;
for (let b = boundaries.length - 1; b >= 0; b--) {
if (boundaries[b] <= lastKeptAssistantIndex) {
cutIndex = boundaries[b];
break;
}
}
break;
}
}
return cutIndex;
}
// ============================================================================
// Summarization
// ============================================================================
const SUMMARIZATION_PROMPT = `You are performing a CONTEXT CHECKPOINT COMPACTION. Create a handoff summary for another LLM that will resume the task.
Include:
- Current progress and key decisions made
- Important context, constraints, or user preferences
- Absolute file paths of any relevant files that were read or modified
- What remains to be done (clear next steps)
- Any critical data, examples, or references needed to continue
Be concise, structured, and focused on helping the next LLM seamlessly continue the work.`;
/**
* Generate a summary of the conversation using the LLM.
*/
export async function generateSummary(
currentMessages: AppMessage[],
model: Model<any>,
reserveTokens: number,
apiKey: string,
signal?: AbortSignal,
customInstructions?: string,
): Promise<string> {
const maxTokens = Math.floor(0.8 * reserveTokens);
const prompt = customInstructions
? `${SUMMARIZATION_PROMPT}\n\nAdditional focus: ${customInstructions}`
: SUMMARIZATION_PROMPT;
const summarizationMessages = [
...currentMessages,
{
role: "user" as const,
content: prompt,
timestamp: Date.now(),
},
];
const response = await complete(model, { messages: summarizationMessages }, { maxTokens, signal, apiKey });
const textContent = response.content
.filter((c): c is { type: "text"; text: string } => c.type === "text")
.map((c) => c.text)
.join("\n");
return textContent;
}
// ============================================================================
// Main compaction function
// ============================================================================
/**
* Calculate compaction and generate summary.
* Returns the CompactionEntry to append to the session file.
*
* @param entries - All session entries
* @param model - Model to use for summarization
* @param settings - Compaction settings
* @param apiKey - API key for LLM
* @param signal - Optional abort signal
* @param customInstructions - Optional custom focus for the summary
*/
export async function compact(
entries: SessionEntry[],
model: Model<any>,
settings: CompactionSettings,
apiKey: string,
signal?: AbortSignal,
customInstructions?: string,
): Promise<CompactionEntry> {
// Reconstruct current messages from entries
const { messages: currentMessages } = loadSessionFromEntries(entries);
// Find previous compaction boundary
let prevCompactionIndex = -1;
for (let i = entries.length - 1; i >= 0; i--) {
if (entries[i].type === "compaction") {
prevCompactionIndex = i;
break;
}
}
const boundaryStart = prevCompactionIndex + 1;
const boundaryEnd = entries.length;
// Get token count before compaction
const lastUsage = getLastAssistantUsage(entries);
const tokensBefore = lastUsage ? calculateContextTokens(lastUsage) : 0;
// Find cut point (entry index) within the valid range
const firstKeptEntryIndex = findCutPoint(entries, boundaryStart, boundaryEnd, settings.keepRecentTokens);
// Generate summary from the full current context
const summary = await generateSummary(
currentMessages,
model,
settings.reserveTokens,
apiKey,
signal,
customInstructions,
);
return {
type: "compaction",
timestamp: new Date().toISOString(),
summary,
firstKeptEntryIndex,
tokensBefore,
};
}

View file

@ -12,6 +12,10 @@ function uuidv4(): string {
return `${hex.slice(0, 8)}-${hex.slice(8, 12)}-${hex.slice(12, 16)}-${hex.slice(16, 20)}-${hex.slice(20, 32)}`;
}
// ============================================================================
// Session entry types
// ============================================================================
export interface SessionHeader {
type: "session";
id: string;
@ -20,7 +24,7 @@ export interface SessionHeader {
provider: string;
modelId: string;
thinkingLevel: string;
branchedFrom?: string; // Path to the session file this was branched from
branchedFrom?: string;
}
export interface SessionMessageEntry {
@ -42,6 +46,129 @@ export interface ModelChangeEntry {
modelId: string;
}
export interface CompactionEntry {
type: "compaction";
timestamp: string;
summary: string;
firstKeptEntryIndex: number; // Index into session entries where we start keeping
tokensBefore: number;
}
/** Union of all session entry types */
export type SessionEntry =
| SessionHeader
| SessionMessageEntry
| ThinkingLevelChangeEntry
| ModelChangeEntry
| CompactionEntry;
// ============================================================================
// Session loading with compaction support
// ============================================================================
export interface LoadedSession {
messages: AppMessage[];
thinkingLevel: string;
model: { provider: string; modelId: string } | null;
}
const SUMMARY_PREFIX = `Another language model worked on this task and produced a summary. Use this to continue the work without duplicating effort:
`;
/**
* Create a user message containing the summary with the standard prefix.
*/
export function createSummaryMessage(summary: string): AppMessage {
return {
role: "user",
content: SUMMARY_PREFIX + summary,
timestamp: Date.now(),
};
}
/**
* Parse session file content into entries.
*/
export function parseSessionEntries(content: string): SessionEntry[] {
const entries: SessionEntry[] = [];
const lines = content.trim().split("\n");
for (const line of lines) {
if (!line.trim()) continue;
try {
const entry = JSON.parse(line) as SessionEntry;
entries.push(entry);
} catch {
// Skip malformed lines
}
}
return entries;
}
/**
* Load session from entries, handling compaction events.
*
* Algorithm:
* 1. Find latest compaction event (if any)
* 2. Keep all entries from firstKeptEntryIndex onwards (extracting messages)
* 3. Prepend summary as user message
*/
export function loadSessionFromEntries(entries: SessionEntry[]): LoadedSession {
// Find model and thinking level (always scan all entries)
let thinkingLevel = "off";
let model: { provider: string; modelId: string } | null = null;
for (const entry of entries) {
if (entry.type === "session") {
thinkingLevel = entry.thinkingLevel;
model = { provider: entry.provider, modelId: entry.modelId };
} else if (entry.type === "thinking_level_change") {
thinkingLevel = entry.thinkingLevel;
} else if (entry.type === "model_change") {
model = { provider: entry.provider, modelId: entry.modelId };
}
}
// Find latest compaction event
let latestCompactionIndex = -1;
for (let i = entries.length - 1; i >= 0; i--) {
if (entries[i].type === "compaction") {
latestCompactionIndex = i;
break;
}
}
// No compaction: return all messages
if (latestCompactionIndex === -1) {
const messages: AppMessage[] = [];
for (const entry of entries) {
if (entry.type === "message") {
messages.push(entry.message);
}
}
return { messages, thinkingLevel, model };
}
const compactionEvent = entries[latestCompactionIndex] as CompactionEntry;
// Extract messages from firstKeptEntryIndex to end (skipping compaction entries)
const keptMessages: AppMessage[] = [];
for (let i = compactionEvent.firstKeptEntryIndex; i < entries.length; i++) {
const entry = entries[i];
if (entry.type === "message") {
keptMessages.push(entry.message);
}
}
// Build final messages: summary + kept messages
const summaryMessage = createSummaryMessage(compactionEvent.summary);
const messages = [summaryMessage, ...keptMessages];
return { messages, thinkingLevel, model };
}
export class SessionManager {
private sessionId!: string;
private sessionFile!: string;
@ -208,77 +335,38 @@ export class SessionManager {
}
}
loadMessages(): any[] {
if (!existsSync(this.sessionFile)) return [];
const messages: any[] = [];
const lines = readFileSync(this.sessionFile, "utf8").trim().split("\n");
for (const line of lines) {
try {
const entry = JSON.parse(line);
if (entry.type === "message") {
messages.push(entry.message);
}
} catch {
// Skip malformed lines
}
}
return messages;
saveCompaction(entry: CompactionEntry): void {
if (!this.enabled) return;
appendFileSync(this.sessionFile, JSON.stringify(entry) + "\n");
}
/**
* Load session data (messages, model, thinking level) with compaction support.
*/
loadSession(): LoadedSession {
const entries = this.loadEntries();
return loadSessionFromEntries(entries);
}
/**
* @deprecated Use loadSession().messages instead
*/
loadMessages(): AppMessage[] {
return this.loadSession().messages;
}
/**
* @deprecated Use loadSession().thinkingLevel instead
*/
loadThinkingLevel(): string {
if (!existsSync(this.sessionFile)) return "off";
const lines = readFileSync(this.sessionFile, "utf8").trim().split("\n");
// Find the most recent thinking level (from session header or change event)
let lastThinkingLevel = "off";
for (const line of lines) {
try {
const entry = JSON.parse(line);
if (entry.type === "session" && entry.thinkingLevel) {
lastThinkingLevel = entry.thinkingLevel;
} else if (entry.type === "thinking_level_change" && entry.thinkingLevel) {
lastThinkingLevel = entry.thinkingLevel;
}
} catch {
// Skip malformed lines
}
}
return lastThinkingLevel;
return this.loadSession().thinkingLevel;
}
/**
* @deprecated Use loadSession().model instead
*/
loadModel(): { provider: string; modelId: string } | null {
if (!existsSync(this.sessionFile)) return null;
const lines = readFileSync(this.sessionFile, "utf8").trim().split("\n");
// Find the most recent model (from session header or change event)
let lastProvider: string | null = null;
let lastModelId: string | null = null;
for (const line of lines) {
try {
const entry = JSON.parse(line);
if (entry.type === "session" && entry.provider && entry.modelId) {
lastProvider = entry.provider;
lastModelId = entry.modelId;
} else if (entry.type === "model_change" && entry.provider && entry.modelId) {
lastProvider = entry.provider;
lastModelId = entry.modelId;
}
} catch {
// Skip malformed lines
}
}
if (lastProvider && lastModelId) {
return { provider: lastProvider, modelId: lastModelId };
}
return null;
return this.loadSession().model;
}
getSessionId(): string {
@ -289,6 +377,29 @@ export class SessionManager {
return this.sessionFile;
}
/**
* Load all entries from the session file.
*/
loadEntries(): SessionEntry[] {
if (!existsSync(this.sessionFile)) return [];
const content = readFileSync(this.sessionFile, "utf8");
const entries: SessionEntry[] = [];
const lines = content.trim().split("\n");
for (const line of lines) {
if (!line.trim()) continue;
try {
const entry = JSON.parse(line) as SessionEntry;
entries.push(entry);
} catch {
// Skip malformed lines
}
}
return entries;
}
/**
* Load all sessions for the current directory with metadata
*/