move pi-mono into companion-cloud as apps/companion-os

- Copy all pi-mono source into apps/companion-os/
- Update Dockerfile to COPY pre-built binary instead of downloading from GitHub Releases
- Update deploy-staging.yml to build pi from source (bun compile) before Docker build
- Add apps/companion-os/** to path triggers
- No more cross-repo dispatch needed

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Harivansh Rathi 2026-03-07 09:22:50 -08:00
commit 0250f72976
579 changed files with 206942 additions and 0 deletions

View file

@ -0,0 +1,4 @@
import { complete, getModel } from "@mariozechner/pi-ai";
const model = getModel("google", "gemini-2.5-flash");
console.log(model.id, typeof complete);

172
scripts/build-binaries.sh Executable file
View file

@ -0,0 +1,172 @@
#!/usr/bin/env bash
#
# Build pi binaries for all platforms locally.
# Mirrors .github/workflows/build-binaries.yml
#
# Usage:
# ./scripts/build-binaries.sh [--skip-deps] [--platform <platform>]
#
# Options:
# --skip-deps Skip installing cross-platform dependencies
# --platform <name> Build only for specified platform (darwin-arm64, darwin-x64, linux-x64, linux-arm64, windows-x64)
#
# Output:
# packages/coding-agent/binaries/
# pi-darwin-arm64.tar.gz
# pi-darwin-x64.tar.gz
# pi-linux-x64.tar.gz
# pi-linux-arm64.tar.gz
# pi-windows-x64.zip
set -euo pipefail
cd "$(dirname "$0")/.."
SKIP_DEPS=false
PLATFORM=""
while [[ $# -gt 0 ]]; do
case $1 in
--skip-deps)
SKIP_DEPS=true
shift
;;
--platform)
PLATFORM="$2"
shift 2
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
# Validate platform if specified
if [[ -n "$PLATFORM" ]]; then
case "$PLATFORM" in
darwin-arm64|darwin-x64|linux-x64|linux-arm64|windows-x64)
;;
*)
echo "Invalid platform: $PLATFORM"
echo "Valid platforms: darwin-arm64, darwin-x64, linux-x64, linux-arm64, windows-x64"
exit 1
;;
esac
fi
echo "==> Installing dependencies..."
npm ci
if [[ "$SKIP_DEPS" == "false" ]]; then
echo "==> Installing cross-platform native bindings..."
# npm ci only installs optional deps for the current platform
# We need all platform bindings for bun cross-compilation
# Use --force to bypass platform checks (os/cpu restrictions in package.json)
# Install all in one command to avoid npm removing packages from previous installs
npm install --no-save --force \
@mariozechner/clipboard-darwin-arm64@0.3.0 \
@mariozechner/clipboard-darwin-x64@0.3.0 \
@mariozechner/clipboard-linux-x64-gnu@0.3.0 \
@mariozechner/clipboard-linux-arm64-gnu@0.3.0 \
@mariozechner/clipboard-win32-x64-msvc@0.3.0 \
@img/sharp-darwin-arm64@0.34.5 \
@img/sharp-darwin-x64@0.34.5 \
@img/sharp-linux-x64@0.34.5 \
@img/sharp-linux-arm64@0.34.5 \
@img/sharp-win32-x64@0.34.5 \
@img/sharp-libvips-darwin-arm64@1.2.4 \
@img/sharp-libvips-darwin-x64@1.2.4 \
@img/sharp-libvips-linux-x64@1.2.4 \
@img/sharp-libvips-linux-arm64@1.2.4
else
echo "==> Skipping cross-platform native bindings (--skip-deps)"
fi
echo "==> Building all packages..."
npm run build
echo "==> Building binaries..."
cd packages/coding-agent
# Clean previous builds
rm -rf binaries
mkdir -p binaries/{darwin-arm64,darwin-x64,linux-x64,linux-arm64,windows-x64}
# Determine which platforms to build
if [[ -n "$PLATFORM" ]]; then
PLATFORMS=("$PLATFORM")
else
PLATFORMS=(darwin-arm64 darwin-x64 linux-x64 linux-arm64 windows-x64)
fi
for platform in "${PLATFORMS[@]}"; do
echo "Building for $platform..."
# Externalize koffi to avoid embedding all 18 platform .node files (~74MB)
# into every binary. Koffi is only used on Windows for VT input and the
# call site has a try/catch fallback. For Windows builds, we copy the
# appropriate .node file alongside the binary below.
if [[ "$platform" == "windows-x64" ]]; then
bun build --compile --external koffi --target=bun-$platform ./dist/cli.js --outfile binaries/$platform/pi.exe
else
bun build --compile --external koffi --target=bun-$platform ./dist/cli.js --outfile binaries/$platform/pi
fi
done
echo "==> Creating release archives..."
# Copy shared files to each platform directory
for platform in "${PLATFORMS[@]}"; do
cp package.json binaries/$platform/
cp README.md binaries/$platform/
cp CHANGELOG.md binaries/$platform/
cp ../../node_modules/@silvia-odwyer/photon-node/photon_rs_bg.wasm binaries/$platform/
mkdir -p binaries/$platform/theme
cp dist/modes/interactive/theme/*.json binaries/$platform/theme/
cp -r dist/core/export-html binaries/$platform/
cp -r docs binaries/$platform/
# Copy koffi native module for Windows (needed for VT input support)
if [[ "$platform" == "windows-x64" ]]; then
mkdir -p binaries/$platform/node_modules/koffi/build/koffi/win32_x64
cp ../../node_modules/koffi/index.js binaries/$platform/node_modules/koffi/
cp ../../node_modules/koffi/package.json binaries/$platform/node_modules/koffi/
cp ../../node_modules/koffi/build/koffi/win32_x64/koffi.node binaries/$platform/node_modules/koffi/build/koffi/win32_x64/
fi
done
# Create archives
cd binaries
for platform in "${PLATFORMS[@]}"; do
if [[ "$platform" == "windows-x64" ]]; then
# Windows (zip)
echo "Creating pi-$platform.zip..."
(cd $platform && zip -r ../pi-$platform.zip .)
else
# Unix platforms (tar.gz) - use wrapper directory for mise compatibility
echo "Creating pi-$platform.tar.gz..."
mv $platform pi && tar -czf pi-$platform.tar.gz pi && mv pi $platform
fi
done
# Extract archives for easy local testing
echo "==> Extracting archives for testing..."
for platform in "${PLATFORMS[@]}"; do
rm -rf $platform
if [[ "$platform" == "windows-x64" ]]; then
mkdir -p $platform && (cd $platform && unzip -q ../pi-$platform.zip)
else
tar -xzf pi-$platform.tar.gz && mv pi $platform
fi
done
echo ""
echo "==> Build complete!"
echo "Archives available in packages/coding-agent/binaries/"
ls -lh *.tar.gz *.zip 2>/dev/null || true
echo ""
echo "Extracted directories for testing:"
for platform in "${PLATFORMS[@]}"; do
echo " binaries/$platform/pi"
done

199
scripts/cost.ts Executable file
View file

@ -0,0 +1,199 @@
#!/usr/bin/env npx tsx
import * as fs from "fs";
import * as path from "path";
// Parse args
const args = process.argv.slice(2);
let directory: string | undefined;
let days: number | undefined;
for (let i = 0; i < args.length; i++) {
if (args[i] === "--dir" || args[i] === "-d") {
directory = args[++i];
} else if (args[i] === "--days" || args[i] === "-n") {
days = parseInt(args[++i], 10);
} else if (args[i] === "--help" || args[i] === "-h") {
console.log(`Usage: cost.ts -d <path> -n <days>
-d, --dir <path> Directory path (required)
-n, --days <num> Number of days to track (required)
-h, --help Show this help`);
process.exit(0);
}
}
if (!directory || !days) {
console.error("Error: both --dir and --days are required");
console.error("Run with --help for usage");
process.exit(1);
}
// Encode directory path to session folder name
function encodeSessionDir(dir: string): string {
// Remove leading slash, replace remaining slashes with dashes
const normalized = dir.startsWith("/") ? dir.slice(1) : dir;
return "--" + normalized.replace(/\//g, "-") + "--";
}
const sessionsBase = path.join(process.env.HOME!, ".pi/agent/sessions");
const encodedDir = encodeSessionDir(directory);
const sessionsDir = path.join(sessionsBase, encodedDir);
if (!fs.existsSync(sessionsDir)) {
console.error(`Sessions directory not found: ${sessionsDir}`);
process.exit(1);
}
// Get cutoff date
const cutoff = new Date();
cutoff.setDate(cutoff.getDate() - days);
cutoff.setHours(0, 0, 0, 0);
interface DayCost {
total: number;
input: number;
output: number;
cacheRead: number;
cacheWrite: number;
requests: number;
}
interface Stats {
[day: string]: {
[provider: string]: DayCost;
};
}
const stats: Stats = {};
// Process session files
const files = fs.readdirSync(sessionsDir).filter((f) => f.endsWith(".jsonl"));
for (const file of files) {
// Extract timestamp from filename: <timestamp>_<uuid>.jsonl
// Format: 2025-12-17T08-25-07-381Z (dashes instead of colons)
const timestamp = file.split("_")[0];
// Convert back to valid ISO: replace T08-25-07-381Z with T08:25:07.381Z
const isoTimestamp = timestamp.replace(
/T(\d{2})-(\d{2})-(\d{2})-(\d{3})Z/,
"T$1:$2:$3.$4Z",
);
const fileDate = new Date(isoTimestamp);
if (fileDate < cutoff) continue;
const filepath = path.join(sessionsDir, file);
const content = fs.readFileSync(filepath, "utf8");
const lines = content.trim().split("\n");
for (const line of lines) {
if (!line) continue;
try {
const entry = JSON.parse(line);
if (entry.type !== "message") continue;
if (entry.message?.role !== "assistant") continue;
if (!entry.message?.usage?.cost) continue;
const { provider, usage } = entry.message;
const { cost } = usage;
const entryDate = new Date(entry.timestamp);
const day = entryDate.toISOString().split("T")[0];
if (!stats[day]) stats[day] = {};
if (!stats[day][provider]) {
stats[day][provider] = {
total: 0,
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
requests: 0,
};
}
stats[day][provider].total += cost.total || 0;
stats[day][provider].input += cost.input || 0;
stats[day][provider].output += cost.output || 0;
stats[day][provider].cacheRead += cost.cacheRead || 0;
stats[day][provider].cacheWrite += cost.cacheWrite || 0;
stats[day][provider].requests += 1;
} catch {
// Skip malformed lines
}
}
}
// Sort days and output
const sortedDays = Object.keys(stats).sort();
if (sortedDays.length === 0) {
console.log(`No sessions found in the last ${days} days for: ${directory}`);
process.exit(0);
}
console.log(`\nCost breakdown for: ${directory}`);
console.log(
`Period: last ${days} days (since ${cutoff.toISOString().split("T")[0]})`,
);
console.log("=".repeat(80));
let grandTotal = 0;
const providerTotals: { [p: string]: DayCost } = {};
for (const day of sortedDays) {
console.log(`\n${day}`);
console.log("-".repeat(40));
let dayTotal = 0;
const providers = Object.keys(stats[day]).sort();
for (const provider of providers) {
const s = stats[day][provider];
dayTotal += s.total;
if (!providerTotals[provider]) {
providerTotals[provider] = {
total: 0,
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
requests: 0,
};
}
providerTotals[provider].total += s.total;
providerTotals[provider].input += s.input;
providerTotals[provider].output += s.output;
providerTotals[provider].cacheRead += s.cacheRead;
providerTotals[provider].cacheWrite += s.cacheWrite;
providerTotals[provider].requests += s.requests;
console.log(
` ${provider.padEnd(15)} $${s.total.toFixed(4).padStart(8)} (${s.requests} reqs, in: $${s.input.toFixed(4)}, out: $${s.output.toFixed(4)}, cache: $${(s.cacheRead + s.cacheWrite).toFixed(4)})`,
);
}
console.log(
` ${"Day total:".padEnd(15)} $${dayTotal.toFixed(4).padStart(8)}`,
);
grandTotal += dayTotal;
}
console.log("\n" + "=".repeat(80));
console.log("TOTALS BY PROVIDER");
console.log("-".repeat(40));
for (const provider of Object.keys(providerTotals).sort()) {
const t = providerTotals[provider];
console.log(
` ${provider.padEnd(15)} $${t.total.toFixed(4).padStart(8)} (${t.requests} reqs, in: $${t.input.toFixed(4)}, out: $${t.output.toFixed(4)}, cache: $${(t.cacheRead + t.cacheWrite).toFixed(4)})`,
);
}
console.log("-".repeat(40));
console.log(
` ${"GRAND TOTAL:".padEnd(15)} $${grandTotal.toFixed(4).padStart(8)}`,
);
console.log();

145
scripts/release.mjs Executable file
View file

@ -0,0 +1,145 @@
#!/usr/bin/env node
/**
* Release script for pi
*
* Usage: node scripts/release.mjs <major|minor|patch>
*
* Steps:
* 1. Check for uncommitted changes
* 2. Bump version via npm run version:xxx
* 3. Update CHANGELOG.md files: [Unreleased] -> [version] - date
* 4. Commit and tag
* 5. Publish to npm
* 6. Add new [Unreleased] section to changelogs
* 7. Commit
*/
import { execSync } from "child_process";
import { readFileSync, writeFileSync, readdirSync, existsSync } from "fs";
import { join } from "path";
const BUMP_TYPE = process.argv[2];
if (!["major", "minor", "patch"].includes(BUMP_TYPE)) {
console.error("Usage: node scripts/release.mjs <major|minor|patch>");
process.exit(1);
}
function run(cmd, options = {}) {
console.log(`$ ${cmd}`);
try {
return execSync(cmd, { encoding: "utf-8", stdio: options.silent ? "pipe" : "inherit", ...options });
} catch (e) {
if (!options.ignoreError) {
console.error(`Command failed: ${cmd}`);
process.exit(1);
}
return null;
}
}
function getVersion() {
const pkg = JSON.parse(readFileSync("packages/ai/package.json", "utf-8"));
return pkg.version;
}
function getChangelogs() {
const packagesDir = "packages";
const packages = readdirSync(packagesDir);
return packages
.map((pkg) => join(packagesDir, pkg, "CHANGELOG.md"))
.filter((path) => existsSync(path));
}
function updateChangelogsForRelease(version) {
const date = new Date().toISOString().split("T")[0];
const changelogs = getChangelogs();
for (const changelog of changelogs) {
const content = readFileSync(changelog, "utf-8");
if (!content.includes("## [Unreleased]")) {
console.log(` Skipping ${changelog}: no [Unreleased] section`);
continue;
}
const updated = content.replace(
"## [Unreleased]",
`## [${version}] - ${date}`
);
writeFileSync(changelog, updated);
console.log(` Updated ${changelog}`);
}
}
function addUnreleasedSection() {
const changelogs = getChangelogs();
const unreleasedSection = "## [Unreleased]\n\n";
for (const changelog of changelogs) {
const content = readFileSync(changelog, "utf-8");
// Insert after "# Changelog\n\n"
const updated = content.replace(
/^(# Changelog\n\n)/,
`$1${unreleasedSection}`
);
writeFileSync(changelog, updated);
console.log(` Added [Unreleased] to ${changelog}`);
}
}
// Main flow
console.log("\n=== Release Script ===\n");
// 1. Check for uncommitted changes
console.log("Checking for uncommitted changes...");
const status = run("git status --porcelain", { silent: true });
if (status && status.trim()) {
console.error("Error: Uncommitted changes detected. Commit or stash first.");
console.error(status);
process.exit(1);
}
console.log(" Working directory clean\n");
// 2. Bump version
console.log(`Bumping version (${BUMP_TYPE})...`);
run(`npm run version:${BUMP_TYPE}`);
const version = getVersion();
console.log(` New version: ${version}\n`);
// 3. Update changelogs
console.log("Updating CHANGELOG.md files...");
updateChangelogsForRelease(version);
console.log();
// 4. Commit and tag
console.log("Committing and tagging...");
run("git add .");
run(`git commit -m "Release v${version}"`);
run(`git tag v${version}`);
console.log();
// 5. Publish
console.log("Publishing to npm...");
run("npm run publish");
console.log();
// 6. Add new [Unreleased] sections
console.log("Adding [Unreleased] sections for next cycle...");
addUnreleasedSection();
console.log();
// 7. Commit
console.log("Committing changelog updates...");
run("git add .");
run(`git commit -m "Add [Unreleased] section for next cycle"`);
console.log();
// 8. Push
console.log("Pushing to remote...");
run("git push origin main");
run(`git push origin v${version}`);
console.log();
console.log(`=== Released v${version} ===`);

View file

@ -0,0 +1,451 @@
#!/usr/bin/env npx tsx
/**
* Extracts session transcripts for a given cwd, splits into context-sized files,
* optionally spawns subagents to analyze patterns.
*
* Usage: npx tsx scripts/session-transcripts.ts [--analyze] [--output <dir>] [cwd]
* --analyze Spawn pi subagents to analyze each transcript file
* --output <dir> Output directory for transcript files (defaults to ./session-transcripts)
* cwd Working directory to extract sessions for (defaults to current)
*/
import {
readFileSync,
readdirSync,
writeFileSync,
existsSync,
mkdirSync,
} from "fs";
import { spawn } from "child_process";
import { createInterface } from "readline";
import { homedir } from "os";
import { join, resolve } from "path";
import {
parseSessionEntries,
type SessionMessageEntry,
} from "../packages/coding-agent/src/core/session-manager.js";
import chalk from "chalk";
const MAX_CHARS_PER_FILE = 100_000; // ~20k tokens, leaving room for prompt + analysis + output
function cwdToSessionDir(cwd: string): string {
const normalized = resolve(cwd).replace(/\//g, "-");
return `--${normalized.slice(1)}--`; // Remove leading slash, wrap with --
}
function extractTextContent(
content: string | Array<{ type: string; text?: string }>,
): string {
if (typeof content === "string") return content;
if (!Array.isArray(content)) return "";
return content
.filter((c) => c.type === "text" && c.text)
.map((c) => c.text!)
.join("\n");
}
function parseSession(filePath: string): string[] {
const content = readFileSync(filePath, "utf8");
const entries = parseSessionEntries(content);
const messages: string[] = [];
for (const entry of entries) {
if (entry.type !== "message") continue;
const msgEntry = entry as SessionMessageEntry;
const { role, content } = msgEntry.message;
if (role !== "user" && role !== "assistant") continue;
const text = extractTextContent(
content as string | Array<{ type: string; text?: string }>,
);
if (!text.trim()) continue;
messages.push(`[${role.toUpperCase()}]\n${text}`);
}
return messages;
}
const MAX_DISPLAY_WIDTH = 100;
function truncateLine(text: string, maxWidth: number): string {
const singleLine = text.replace(/\n/g, " ").replace(/\s+/g, " ").trim();
if (singleLine.length <= maxWidth) return singleLine;
return singleLine.slice(0, maxWidth - 3) + "...";
}
interface JsonEvent {
type: string;
assistantMessageEvent?: { type: string; delta?: string };
toolName?: string;
args?: {
path?: string;
offset?: number;
limit?: number;
content?: string;
};
}
function runSubagent(
prompt: string,
cwd: string,
): Promise<{ success: boolean }> {
return new Promise((resolve) => {
const child = spawn(
"pi",
["--mode", "json", "--tools", "read,write", "-p", prompt],
{
cwd,
stdio: ["ignore", "pipe", "pipe"],
},
);
let textBuffer = "";
const rl = createInterface({ input: child.stdout });
rl.on("line", (line) => {
try {
const event: JsonEvent = JSON.parse(line);
if (event.type === "message_update" && event.assistantMessageEvent) {
const msgEvent = event.assistantMessageEvent;
if (msgEvent.type === "text_delta" && msgEvent.delta) {
textBuffer += msgEvent.delta;
}
} else if (event.type === "tool_execution_start" && event.toolName) {
// Print accumulated text before tool starts
if (textBuffer.trim()) {
console.log(
chalk.dim(" " + truncateLine(textBuffer, MAX_DISPLAY_WIDTH)),
);
textBuffer = "";
}
// Format tool call with args
let argsStr = "";
if (event.args) {
if (event.toolName === "read") {
argsStr = event.args.path || "";
if (event.args.offset) argsStr += ` offset=${event.args.offset}`;
if (event.args.limit) argsStr += ` limit=${event.args.limit}`;
} else if (event.toolName === "write") {
argsStr = event.args.path || "";
}
}
console.log(chalk.cyan(` [${event.toolName}] ${argsStr}`));
} else if (event.type === "turn_end") {
// Print any remaining text at turn end
if (textBuffer.trim()) {
console.log(
chalk.dim(" " + truncateLine(textBuffer, MAX_DISPLAY_WIDTH)),
);
}
textBuffer = "";
}
} catch {
// Ignore malformed JSON
}
});
child.stderr.on("data", (data) => {
process.stderr.write(chalk.red(data.toString()));
});
child.on("close", (code) => {
resolve({ success: code === 0 });
});
child.on("error", (err) => {
console.error(chalk.red(` Failed to spawn pi: ${err.message}`));
resolve({ success: false });
});
});
}
async function main() {
const args = process.argv.slice(2);
const analyzeFlag = args.includes("--analyze");
// Parse --output <dir>
const outputIdx = args.indexOf("--output");
let outputDir = resolve("./session-transcripts");
if (outputIdx !== -1 && args[outputIdx + 1]) {
outputDir = resolve(args[outputIdx + 1]);
}
// Find cwd (positional arg that's not a flag or flag value)
const flagIndices = new Set<number>();
flagIndices.add(args.indexOf("--analyze"));
if (outputIdx !== -1) {
flagIndices.add(outputIdx);
flagIndices.add(outputIdx + 1);
}
const cwdArg = args.find(
(a, i) => !flagIndices.has(i) && !a.startsWith("--"),
);
const cwd = resolve(cwdArg || process.cwd());
mkdirSync(outputDir, { recursive: true });
const sessionsBase = join(homedir(), ".pi/agent/sessions");
const sessionDirName = cwdToSessionDir(cwd);
const sessionDir = join(sessionsBase, sessionDirName);
if (!existsSync(sessionDir)) {
console.error(`No sessions found for ${cwd}`);
console.error(`Expected: ${sessionDir}`);
process.exit(1);
}
const sessionFiles = readdirSync(sessionDir)
.filter((f) => f.endsWith(".jsonl"))
.sort();
console.log(`Found ${sessionFiles.length} session files in ${sessionDir}`);
// Collect all transcripts
const allTranscripts: string[] = [];
for (const file of sessionFiles) {
const filePath = join(sessionDir, file);
const messages = parseSession(filePath);
if (messages.length > 0) {
allTranscripts.push(
`=== SESSION: ${file} ===\n${messages.join("\n---\n")}\n=== END SESSION ===`,
);
}
}
if (allTranscripts.length === 0) {
console.error("No transcripts found");
process.exit(1);
}
// Split into files respecting MAX_CHARS_PER_FILE
const outputFiles: string[] = [];
let currentContent = "";
let fileIndex = 0;
for (const transcript of allTranscripts) {
// If adding this transcript would exceed limit, write current and start new
if (
currentContent.length > 0 &&
currentContent.length + transcript.length + 2 > MAX_CHARS_PER_FILE
) {
const filename = `session-transcripts-${String(fileIndex).padStart(3, "0")}.txt`;
writeFileSync(join(outputDir, filename), currentContent);
outputFiles.push(filename);
console.log(`Wrote ${filename} (${currentContent.length} chars)`);
currentContent = "";
fileIndex++;
}
// If this single transcript exceeds limit, write it to its own file
if (transcript.length > MAX_CHARS_PER_FILE) {
// Write any pending content first
if (currentContent.length > 0) {
const filename = `session-transcripts-${String(fileIndex).padStart(3, "0")}.txt`;
writeFileSync(join(outputDir, filename), currentContent);
outputFiles.push(filename);
console.log(`Wrote ${filename} (${currentContent.length} chars)`);
currentContent = "";
fileIndex++;
}
// Write the large transcript to its own file
const filename = `session-transcripts-${String(fileIndex).padStart(3, "0")}.txt`;
writeFileSync(join(outputDir, filename), transcript);
outputFiles.push(filename);
console.log(
chalk.yellow(
`Wrote ${filename} (${transcript.length} chars) - oversized`,
),
);
fileIndex++;
continue;
}
currentContent += (currentContent ? "\n\n" : "") + transcript;
}
// Write remaining content
if (currentContent.length > 0) {
const filename = `session-transcripts-${String(fileIndex).padStart(3, "0")}.txt`;
writeFileSync(join(outputDir, filename), currentContent);
outputFiles.push(filename);
console.log(`Wrote ${filename} (${currentContent.length} chars)`);
}
console.log(
`\nCreated ${outputFiles.length} transcript file(s) in ${outputDir}`,
);
if (!analyzeFlag) {
console.log(
"\nRun with --analyze to spawn pi subagents for pattern analysis.",
);
return;
}
// Find AGENTS.md files to compare against
const globalAgentsMd = join(homedir(), ".pi/agent/AGENTS.md");
const localAgentsMd = join(cwd, "AGENTS.md");
const agentsMdFiles = [globalAgentsMd, localAgentsMd].filter(existsSync);
const agentsMdSection =
agentsMdFiles.length > 0
? `STEP 1: Read the existing AGENTS.md file(s) to see what's already encoded:\n${agentsMdFiles.join("\n")}\n\nSTEP 2: `
: "";
// Spawn subagents to analyze each file
const analysisPrompt = `You are analyzing session transcripts to identify recurring user instructions that could be automated.
${agentsMdSection}READING THE TRANSCRIPT:
The transcript file is large. Read it in chunks of 1000 lines using offset/limit parameters:
1. First: read with limit=1000 (lines 1-1000)
2. Then: read with offset=1001, limit=1000 (lines 1001-2000)
3. Continue incrementing offset by 1000 until you reach the end
4. Only after reading the ENTIRE file, perform the analysis and write the summary
ANALYSIS TASK:
Look for patterns where the user repeatedly gives similar instructions. These could become:
- AGENTS.md entries: coding style rules, behavior guidelines, project conventions
- Skills: multi-step workflows with external tools (search, browser, APIs)
- Prompt templates: reusable prompts for common tasks
Compare each pattern against the existing AGENTS.md content to determine if it's NEW or EXISTING.
OUTPUT FORMAT (strict):
Write a file with exactly this structure. Use --- as separator between patterns.
PATTERN: <short descriptive name>
STATUS: NEW | EXISTING
TYPE: agents-md | skill | prompt-template
FREQUENCY: <number of times observed>
EVIDENCE:
- "<exact quote 1>"
- "<exact quote 2>"
- "<exact quote 3>"
DRAFT:
<proposed content for AGENTS.md entry, SKILL.md, or prompt template>
---
Rules:
- Only include patterns that appear 2+ times
- STATUS is NEW if not in AGENTS.md, EXISTING if already covered
- EVIDENCE must contain exact quotes from the transcripts
- DRAFT must be ready-to-use content
- If no patterns found, write "NO PATTERNS FOUND"
- Do not include any other text outside this format`;
console.log("\nSpawning subagents for analysis...");
for (const file of outputFiles) {
const summaryFile = file.replace(".txt", ".summary.txt");
const filePath = join(outputDir, file);
const summaryPath = join(outputDir, summaryFile);
const fileContent = readFileSync(filePath, "utf8");
const fileSize = fileContent.length;
console.log(`Analyzing ${file} (${fileSize} chars)...`);
const lineCount = fileContent.split("\n").length;
const fullPrompt = `${analysisPrompt}\n\nThe file ${filePath} has ${lineCount} lines. Read it in full using chunked reads, then write your analysis to ${summaryPath}`;
const result = await runSubagent(fullPrompt, outputDir);
if (result.success && existsSync(summaryPath)) {
console.log(chalk.green(` -> ${summaryFile}`));
} else if (result.success) {
console.error(
chalk.yellow(` Agent finished but did not write ${summaryFile}`),
);
} else {
console.error(chalk.red(` Failed to analyze ${file}`));
}
}
// Collect all created summary files
const summaryFiles = readdirSync(outputDir)
.filter((f) => f.endsWith(".summary.txt"))
.sort();
console.log(`\n=== Individual Analysis Complete ===`);
console.log(`Created ${summaryFiles.length} summary files`);
if (summaryFiles.length === 0) {
console.log(
chalk.yellow("No summary files created. Nothing to aggregate."),
);
return;
}
// Final aggregation step
console.log("\nAggregating findings into final summary...");
const summaryPaths = summaryFiles.map((f) => join(outputDir, f)).join("\n");
const finalSummaryPath = join(outputDir, "FINAL-SUMMARY.txt");
const aggregationPrompt = `You are aggregating pattern analysis results from multiple summary files.
STEP 1: Read the existing AGENTS.md file(s) to understand what patterns are already encoded:
${agentsMdFiles.length > 0 ? agentsMdFiles.join("\n") : "(no AGENTS.md files found)"}
STEP 2: Read ALL of the following summary files:
${summaryPaths}
STEP 3: Create a consolidated final summary that:
1. Merges duplicate patterns (same pattern found in multiple files)
2. Ranks patterns by total frequency across all files
3. Groups by status (NEW first, then EXISTING) and type
4. Provides the best/most complete DRAFT for each unique pattern
5. Verify STATUS against AGENTS.md content (pattern may be marked NEW in summaries but actually exists)
OUTPUT FORMAT (strict):
Write the final summary with this structure:
# NEW PATTERNS (not yet in AGENTS.md)
## AGENTS.MD: <pattern name>
Total Frequency: <sum across all files>
Evidence:
- "<best quotes>"
Draft:
<consolidated draft>
## SKILL: <pattern name>
...
## PROMPT-TEMPLATE: <pattern name>
...
---
# EXISTING PATTERNS (already in AGENTS.md, for reference)
## <pattern name>
Total Frequency: <N>
Already covered by: <quote relevant section from AGENTS.md>
---
# SUMMARY
- New patterns to add: <N>
- Already covered: <N>
- Top 3 new patterns by frequency: <list>
Write the final summary to ${finalSummaryPath}`;
const aggregateResult = await runSubagent(aggregationPrompt, outputDir);
if (aggregateResult.success && existsSync(finalSummaryPath)) {
console.log(chalk.green(`\n=== Final Summary Created ===`));
console.log(chalk.green(` ${finalSummaryPath}`));
} else if (aggregateResult.success) {
console.error(
chalk.yellow(`Agent finished but did not write final summary`),
);
} else {
console.error(chalk.red(`Failed to create final summary`));
}
}
main().catch(console.error);

102
scripts/sync-versions.js Normal file
View file

@ -0,0 +1,102 @@
#!/usr/bin/env node
/**
* Syncs ALL @mariozechner/* package dependency versions to match their current versions.
* This ensures lockstep versioning across the monorepo.
*/
import { readFileSync, writeFileSync, readdirSync } from "fs";
import { join } from "path";
const packagesDir = join(process.cwd(), "packages");
const packageDirs = readdirSync(packagesDir, { withFileTypes: true })
.filter((dirent) => dirent.isDirectory())
.map((dirent) => dirent.name);
// Read all package.json files and build version map
const packages = {};
const versionMap = {};
for (const dir of packageDirs) {
const pkgPath = join(packagesDir, dir, "package.json");
try {
const pkg = JSON.parse(readFileSync(pkgPath, "utf8"));
packages[dir] = { path: pkgPath, data: pkg };
versionMap[pkg.name] = pkg.version;
} catch (e) {
console.error(`Failed to read ${pkgPath}:`, e.message);
}
}
console.log("Current versions:");
for (const [name, version] of Object.entries(versionMap).sort()) {
console.log(` ${name}: ${version}`);
}
// Verify all versions are the same (lockstep)
const versions = new Set(Object.values(versionMap));
if (versions.size > 1) {
console.error("\n❌ ERROR: Not all packages have the same version!");
console.error("Expected lockstep versioning. Run one of:");
console.error(" npm run version:patch");
console.error(" npm run version:minor");
console.error(" npm run version:major");
process.exit(1);
}
console.log("\n✅ All packages at same version (lockstep)");
// Update all inter-package dependencies
let totalUpdates = 0;
for (const [dir, pkg] of Object.entries(packages)) {
let updated = false;
// Check dependencies
if (pkg.data.dependencies) {
for (const [depName, currentVersion] of Object.entries(
pkg.data.dependencies,
)) {
if (versionMap[depName]) {
const newVersion = `^${versionMap[depName]}`;
if (currentVersion !== newVersion) {
console.log(`\n${pkg.data.name}:`);
console.log(` ${depName}: ${currentVersion}${newVersion}`);
pkg.data.dependencies[depName] = newVersion;
updated = true;
totalUpdates++;
}
}
}
}
// Check devDependencies
if (pkg.data.devDependencies) {
for (const [depName, currentVersion] of Object.entries(
pkg.data.devDependencies,
)) {
if (versionMap[depName]) {
const newVersion = `^${versionMap[depName]}`;
if (currentVersion !== newVersion) {
console.log(`\n${pkg.data.name}:`);
console.log(
` ${depName}: ${currentVersion}${newVersion} (devDependencies)`,
);
pkg.data.devDependencies[depName] = newVersion;
updated = true;
totalUpdates++;
}
}
}
}
// Write if updated
if (updated) {
writeFileSync(pkg.path, JSON.stringify(pkg.data, null, "\t") + "\n");
}
}
if (totalUpdates === 0) {
console.log("\nAll inter-package dependencies already in sync.");
} else {
console.log(`\n✅ Updated ${totalUpdates} dependency version(s)`);
}