mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 08:03:39 +00:00
Add --mode flag for CLI output control (text/json/rpc)
- text mode: only outputs final assistant message text (default) - json mode: streams all events as JSON (same as session manager writes) - rpc mode: JSON output + listens for JSON input on stdin for headless operation - Suppress informational messages in json/rpc modes
This commit is contained in:
parent
c75f53f6f2
commit
68092ccf01
8 changed files with 800 additions and 286 deletions
14
package-lock.json
generated
14
package-lock.json
generated
|
|
@ -500,6 +500,13 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/diff": {
|
||||
"version": "7.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/diff/-/diff-7.0.2.tgz",
|
||||
"integrity": "sha512-JSWRMozjFKsGlEjiiKajUjIJVKuKdE3oVy2DNtK+fUo8q82nhFZ2CPQwicAIkXrofahDXrWJ7mjelvZphMS98Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/estree": {
|
||||
"version": "1.0.8",
|
||||
"dev": true,
|
||||
|
|
@ -1102,7 +1109,8 @@
|
|||
},
|
||||
"node_modules/diff": {
|
||||
"version": "8.0.2",
|
||||
"dev": true,
|
||||
"resolved": "https://registry.npmjs.org/diff/-/diff-8.0.2.tgz",
|
||||
"integrity": "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==",
|
||||
"license": "BSD-3-Clause",
|
||||
"engines": {
|
||||
"node": ">=0.3.1"
|
||||
|
|
@ -3262,12 +3270,13 @@
|
|||
},
|
||||
"packages/coding-agent": {
|
||||
"name": "@mariozechner/coding-agent",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.1",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@mariozechner/pi-agent": "^0.6.0",
|
||||
"@mariozechner/pi-ai": "^0.6.0",
|
||||
"chalk": "^5.5.0",
|
||||
"diff": "^8.0.2",
|
||||
"glob": "^11.0.3"
|
||||
},
|
||||
"bin": {
|
||||
|
|
@ -3275,6 +3284,7 @@
|
|||
"pi": "dist/cli.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/node": "^24.3.0",
|
||||
"typescript": "^5.7.3",
|
||||
"vitest": "^3.2.4"
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
119
packages/calculator.py
Normal file
119
packages/calculator.py
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
An advanced calculator module with comprehensive operations.
|
||||
Calculator module for mathematical operations.
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
class Calculator:
|
||||
def __init__(self):
|
||||
self.result = 0
|
||||
self.history = []
|
||||
|
||||
def _record(self, operation, result):
|
||||
"""Record operation in history."""
|
||||
self.history.append(f"{operation} = {result}")
|
||||
return result
|
||||
|
||||
def add(self, a, b):
|
||||
"""Add two numbers together."""
|
||||
result = a + b
|
||||
return self._record(f"{a} + {b}", result)
|
||||
|
||||
def subtract(self, a, b):
|
||||
"""Subtract second number from first."""
|
||||
result = a - b
|
||||
return self._record(f"{a} - {b}", result)
|
||||
|
||||
def multiply(self, a, b):
|
||||
"""Multiply two numbers together."""
|
||||
result = a * b
|
||||
return self._record(f"{a} * {b}", result)
|
||||
|
||||
def divide(self, a, b):
|
||||
"""Divide a by b."""
|
||||
if b == 0:
|
||||
raise ValueError("Cannot divide by zero")
|
||||
return a / b
|
||||
|
||||
def power(self, base, exponent):
|
||||
"""Raise base to the power of exponent."""
|
||||
return base ** exponent
|
||||
|
||||
def modulo(self, a, b):
|
||||
"""Return the remainder of a divided by b."""
|
||||
if b == 0:
|
||||
raise ValueError("Cannot modulo by zero")
|
||||
return a % b
|
||||
|
||||
def square_root(self, n):
|
||||
"""Calculate the square root of n."""
|
||||
if n < 0:
|
||||
raise ValueError("Cannot calculate square root of negative number")
|
||||
return math.sqrt(n)
|
||||
|
||||
def absolute(self, n):
|
||||
"""Return the absolute value of n."""
|
||||
return abs(n)
|
||||
|
||||
def sin(self, angle_degrees):
|
||||
"""Calculate sine of angle in degrees."""
|
||||
radians = math.radians(angle_degrees)
|
||||
return math.sin(radians)
|
||||
|
||||
def cos(self, angle_degrees):
|
||||
"""Calculate cosine of angle in degrees."""
|
||||
radians = math.radians(angle_degrees)
|
||||
return math.cos(radians)
|
||||
|
||||
def factorial(self, n):
|
||||
"""Calculate factorial of n."""
|
||||
if n < 0:
|
||||
raise ValueError("Factorial not defined for negative numbers")
|
||||
return math.factorial(int(n))
|
||||
|
||||
def get_history(self):
|
||||
"""Return calculation history."""
|
||||
return self.history
|
||||
|
||||
def clear_history(self):
|
||||
"""Clear calculation history."""
|
||||
self.history = []
|
||||
|
||||
def main():
|
||||
calc = Calculator()
|
||||
|
||||
print("=" * 50)
|
||||
print("🧮 ADVANCED CALCULATOR DEMO 🧮".center(50))
|
||||
print("=" * 50)
|
||||
|
||||
# Basic operations
|
||||
print("\n📊 Basic Operations:")
|
||||
print(f" Addition: 5 + 3 = {calc.add(5, 3)}")
|
||||
print(f" Subtraction: 10 - 4 = {calc.subtract(10, 4)}")
|
||||
print(f" Multiplication: 6 * 7 = {calc.multiply(6, 7)}")
|
||||
print(f" Division: 20 / 4 = {calc.divide(20, 4)}")
|
||||
|
||||
# Advanced operations
|
||||
print("\n🚀 Advanced Operations:")
|
||||
print(f" Power: 2 ^ 8 = {calc.power(2, 8)}")
|
||||
print(f" Modulo: 17 % 5 = {calc.modulo(17, 5)}")
|
||||
print(f" Square Root: √144 = {calc.square_root(144)}")
|
||||
print(f" Absolute: |-42| = {calc.absolute(-42)}")
|
||||
|
||||
# Trigonometric and special functions
|
||||
print("\n📐 Trigonometry & Special:")
|
||||
print(f" Sin(30°): = {calc.sin(30):.4f}")
|
||||
print(f" Cos(60°): = {calc.cos(60):.4f}")
|
||||
print(f" Factorial(5): 5! = {calc.factorial(5)}")
|
||||
|
||||
# Show history
|
||||
print("\n📜 Calculation History:")
|
||||
for i, entry in enumerate(calc.get_history(), 1):
|
||||
print(f" {i}. {entry}")
|
||||
|
||||
print("\n" + "=" * 50)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,268 +1,199 @@
|
|||
# @mariozechner/pi-coding-agent
|
||||
# @mariozechner/coding-agent
|
||||
|
||||
AI coding assistant with file system access, code execution, and precise editing tools. Built on pi-ai for tool-enabled LLM workflows.
|
||||
Interactive CLI coding assistant powered by multiple LLM providers. Chat with AI models that can read files, execute commands, and make precise edits to your codebase.
|
||||
|
||||
**Note**: Designed for local development environments. Use with caution—tools can modify your filesystem.
|
||||
**Note**: This tool can modify your filesystem. Use with caution in production environments.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @mariozechner/pi-coding-agent
|
||||
npm install -g @mariozechner/coding-agent
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```typescript
|
||||
import { getModel, CodingAgent, read, bash, edit, write } from '@mariozechner/pi-coding-agent';
|
||||
|
||||
// Define tools for the agent
|
||||
const tools = [
|
||||
read({ description: 'Read file contents (text or images)' }),
|
||||
bash({ description: 'Execute bash commands (ls, grep, etc.)' }),
|
||||
edit({ description: 'Edit files by replacing exact text matches' }),
|
||||
write({ description: 'Write or overwrite files, creates directories' })
|
||||
];
|
||||
|
||||
// Create coding agent with model
|
||||
const agent = new CodingAgent({
|
||||
model: getModel('openai', 'gpt-4o-mini'),
|
||||
tools,
|
||||
systemPrompt: 'You are an expert coding assistant. Use tools to read/edit files, run commands. Be precise and safe.'
|
||||
});
|
||||
|
||||
// Run agent with a task
|
||||
const task = { role: 'user', content: 'Create a simple Express server in src/server.ts' };
|
||||
|
||||
const stream = agent.run(task);
|
||||
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case 'agent_start':
|
||||
console.log('Agent started');
|
||||
break;
|
||||
case 'message_update':
|
||||
if (event.message.role === 'assistant') {
|
||||
console.log('Agent:', event.message.content.map(c => c.type === 'text' ? c.text : '[Tool Call]').join(''));
|
||||
}
|
||||
break;
|
||||
case 'tool_execution_start':
|
||||
console.log(`Executing: ${event.toolName}(${JSON.stringify(event.args)})`);
|
||||
break;
|
||||
case 'tool_execution_end':
|
||||
if (event.isError) {
|
||||
console.error('Tool error:', event.result);
|
||||
} else {
|
||||
console.log('Tool result:', event.result.output);
|
||||
}
|
||||
break;
|
||||
case 'agent_end':
|
||||
console.log('Task complete');
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Get final messages
|
||||
const messages = await stream.result();
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
The agent uses specialized tools for coding tasks. All tools are type-safe with TypeBox schemas and validated at runtime.
|
||||
|
||||
### File Reading
|
||||
|
||||
```typescript
|
||||
import { read } from '@mariozechner/pi-coding-agent';
|
||||
|
||||
const readTool = read({
|
||||
description: 'Read file contents',
|
||||
parameters: Type.Object({
|
||||
path: Type.String({ description: 'File path (relative or absolute)' })
|
||||
})
|
||||
});
|
||||
|
||||
// In agent context
|
||||
const context = {
|
||||
systemPrompt: 'You are a coding assistant.',
|
||||
messages: [{ role: 'user', content: 'What\'s in package.json?' }],
|
||||
tools: [readTool]
|
||||
};
|
||||
```
|
||||
|
||||
### Bash Execution
|
||||
|
||||
```typescript
|
||||
import { bash } from '@mariozechner/pi-coding-agent';
|
||||
|
||||
const bashTool = bash({
|
||||
description: 'Run bash commands',
|
||||
parameters: Type.Object({
|
||||
command: Type.String({ description: 'Bash command to execute' })
|
||||
}),
|
||||
timeout: 30000 // 30s default
|
||||
});
|
||||
|
||||
// Example: List files
|
||||
// Agent calls: bash({ command: 'ls -la' })
|
||||
// Returns stdout/stderr
|
||||
```
|
||||
|
||||
### Precise Editing
|
||||
|
||||
For surgical code changes without overwriting entire files:
|
||||
|
||||
```typescript
|
||||
import { edit } from '@mariozechner/pi-coding-agent';
|
||||
|
||||
const editTool = edit({
|
||||
description: 'Replace exact text in files',
|
||||
parameters: Type.Object({
|
||||
path: Type.String({ description: 'File path' }),
|
||||
oldText: Type.String({ description: 'Exact text to find (including whitespace)' }),
|
||||
newText: Type.String({ description: 'Replacement text' })
|
||||
})
|
||||
});
|
||||
|
||||
// Example: Update import in src/index.ts
|
||||
// edit({ path: 'src/index.ts', oldText: 'import { foo }', newText: 'import { foo, bar }' })
|
||||
```
|
||||
|
||||
### File Writing
|
||||
|
||||
```typescript
|
||||
import { write } from '@mariozechner/pi-coding-agent';
|
||||
|
||||
const writeTool = write({
|
||||
description: 'Write file content',
|
||||
parameters: Type.Object({
|
||||
path: Type.String({ description: 'File path' }),
|
||||
content: Type.String({ description: 'File content' })
|
||||
})
|
||||
});
|
||||
|
||||
// Creates directories if needed, overwrites existing files
|
||||
// write({ path: 'src/utils/helper.ts', content: 'export const helper = () => { ... };' })
|
||||
```
|
||||
|
||||
### Custom Tools
|
||||
|
||||
Extend with custom tools using the pi-ai AgentTool interface:
|
||||
|
||||
```typescript
|
||||
import { Type, AgentTool } from '@mariozechner/pi-ai';
|
||||
|
||||
const gitTool: AgentTool<typeof Type.Object({ command: Type.String() })> = {
|
||||
name: 'git',
|
||||
description: 'Run git commands',
|
||||
parameters: Type.Object({ command: Type.String() }),
|
||||
execute: async (toolCallId, args) => {
|
||||
const { stdout } = await exec(`git ${args.command}`);
|
||||
return { output: stdout };
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Agent Workflow
|
||||
|
||||
The coding agent runs in loops until completion:
|
||||
|
||||
1. **Task Input**: User provides coding task (e.g., "Implement user auth")
|
||||
2. **Planning**: Agent may think/reason (if model supports)
|
||||
3. **Tool Calls**: Agent reads files, runs commands, proposes edits
|
||||
4. **Execution**: Tools run safely; results fed back to agent
|
||||
5. **Iteration**: Agent reviews outputs, makes adjustments
|
||||
6. **Completion**: Agent signals done or asks for clarification
|
||||
|
||||
### Streaming Events
|
||||
|
||||
Monitor progress with detailed events:
|
||||
|
||||
- `agent_start` / `agent_end`: Session boundaries
|
||||
- `turn_start` / `turn_end`: LLM-tool cycles
|
||||
- `message_update`: Streaming assistant responses and tool calls
|
||||
- `tool_execution_start` / `tool_execution_end`: Tool runs with args/results
|
||||
- `error`: Validation failures or execution errors
|
||||
|
||||
### Safety Features
|
||||
|
||||
- **Read-Only Mode**: Set `readOnly: true` to disable writes/edits
|
||||
- **Path Validation**: Restrict to project directory (configurable)
|
||||
- **Timeout**: 30s default for bash commands
|
||||
- **Validation**: All tool args validated against schemas
|
||||
- **Dry Run**: Log actions without executing (for review)
|
||||
|
||||
```typescript
|
||||
const agent = new CodingAgent({
|
||||
model: getModel('openai', 'gpt-4o-mini'),
|
||||
tools,
|
||||
readOnly: process.env.NODE_ENV === 'production', // Disable writes in prod
|
||||
allowedPaths: ['./src', './test'], // Restrict file access
|
||||
dryRun: true // Log without executing
|
||||
});
|
||||
```
|
||||
|
||||
## Example Tasks
|
||||
|
||||
```typescript
|
||||
// Refactor code
|
||||
agent.run({ role: 'user', content: 'Convert src/index.ts to use async/await instead of callbacks' });
|
||||
|
||||
// Debug
|
||||
agent.run({ role: 'user', content: 'Fix the TypeScript error in test/utils.test.ts: "Cannot find name \'describe\'"' });
|
||||
|
||||
// New feature
|
||||
agent.run({ role: 'user', content: 'Add a REST API endpoint to src/server.ts for /users with GET/POST' });
|
||||
|
||||
// Analyze
|
||||
agent.run({ role: 'user', content: 'Review src/ and suggest performance improvements' });
|
||||
```
|
||||
|
||||
## Integration with pi-ai
|
||||
|
||||
Built on `@mariozechner/pi-ai`. Use existing Context/Agent APIs:
|
||||
|
||||
```typescript
|
||||
import { CodingAgent, Context } from '@mariozechner/pi-coding-agent';
|
||||
import { getModel } from '@mariozechner/pi-ai';
|
||||
|
||||
const context: Context = {
|
||||
systemPrompt: 'Expert TypeScript developer.',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Optimize this loop in src/data.ts' },
|
||||
{ role: 'assistant', content: [{ type: 'text', text: 'First, let me read the file...' }] }
|
||||
],
|
||||
tools: [read({}), edit({})]
|
||||
};
|
||||
|
||||
const agent = new CodingAgent({ model: getModel('anthropic', 'claude-3-5-sonnet-20240620'), tools: context.tools });
|
||||
const continuation = await agent.continue(context); // Resume from existing context
|
||||
```
|
||||
|
||||
## Environment
|
||||
|
||||
Set up your working directory (current dir becomes project root):
|
||||
|
||||
```bash
|
||||
# Clone or navigate to your project
|
||||
cd my-project
|
||||
# Set your API key (see API Keys section)
|
||||
export ANTHROPIC_API_KEY=sk-ant-...
|
||||
|
||||
# Install and run agent
|
||||
npm install @mariozechner/pi-coding-agent
|
||||
node -e "
|
||||
const { CodingAgent } = require('@mariozechner/pi-coding-agent');
|
||||
const agent = new CodingAgent({ model: getModel('openai', 'gpt-4o-mini') });
|
||||
await agent.run({ role: 'user', content: process.argv[1] }, { cwd: process.cwd() });
|
||||
" "Implement fizzbuzz in src/index.ts"
|
||||
# Start the interactive CLI
|
||||
pi
|
||||
|
||||
# Or use the full command name
|
||||
coding-agent
|
||||
```
|
||||
|
||||
Once in the CLI, you can chat with the AI:
|
||||
|
||||
```
|
||||
You: Create a simple Express server in src/server.ts
|
||||
```
|
||||
|
||||
The agent will use its tools to read, write, and edit files as needed.
|
||||
|
||||
## API Keys
|
||||
|
||||
Uses pi-ai's key management:
|
||||
The CLI supports multiple LLM providers. Set the appropriate environment variable for your chosen provider:
|
||||
|
||||
```bash
|
||||
OPENAI_API_KEY=sk-...
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
# etc.
|
||||
# Anthropic (Claude)
|
||||
export ANTHROPIC_API_KEY=sk-ant-...
|
||||
# Or use OAuth token (retrieved via: claude setup-token)
|
||||
export ANTHROPIC_OAUTH_TOKEN=...
|
||||
|
||||
# OpenAI (GPT)
|
||||
export OPENAI_API_KEY=sk-...
|
||||
|
||||
# Google (Gemini)
|
||||
export GEMINI_API_KEY=...
|
||||
|
||||
# Groq
|
||||
export GROQ_API_KEY=gsk_...
|
||||
|
||||
# Cerebras
|
||||
export CEREBRAS_API_KEY=csk-...
|
||||
|
||||
# xAI (Grok)
|
||||
export XAI_API_KEY=xai-...
|
||||
|
||||
# OpenRouter
|
||||
export OPENROUTER_API_KEY=sk-or-...
|
||||
|
||||
# ZAI
|
||||
export ZAI_API_KEY=...
|
||||
```
|
||||
|
||||
If no API key is set, the CLI will prompt you to configure one on first run.
|
||||
|
||||
## Slash Commands
|
||||
|
||||
The CLI supports several commands to control its behavior:
|
||||
|
||||
### /model
|
||||
|
||||
Switch models mid-session. Opens an interactive selector where you can type to search (by provider or model name), use arrow keys to navigate, Enter to select, or Escape to cancel.
|
||||
|
||||
### /thinking
|
||||
|
||||
Adjust thinking/reasoning level for supported models (Claude Sonnet 4, GPT-5, Gemini 2.5). Opens an interactive selector where you can use arrow keys to navigate, Enter to select, or Escape to cancel.
|
||||
|
||||
### /export [filename]
|
||||
|
||||
Export the current session to a self-contained HTML file:
|
||||
|
||||
```
|
||||
/export # Auto-generates filename
|
||||
/export my-session.html # Custom filename
|
||||
```
|
||||
|
||||
The HTML file includes the full conversation with syntax highlighting and is viewable in any browser.
|
||||
|
||||
## Image Support
|
||||
|
||||
Send images to vision-capable models by providing file paths:
|
||||
|
||||
```
|
||||
You: What is in this screenshot? /path/to/image.png
|
||||
```
|
||||
|
||||
Supported formats: `.jpg`, `.jpeg`, `.png`, `.gif`, `.webp`, `.bmp`, `.svg`
|
||||
|
||||
The image will be automatically encoded and sent with your message. Vision-capable models include:
|
||||
- GPT-4o, GPT-4o-mini (OpenAI)
|
||||
- Claude 3.5 Sonnet, Claude 3.5 Haiku (Anthropic)
|
||||
- Gemini 2.5 Flash, Gemini 2.5 Pro (Google)
|
||||
|
||||
## Available Tools
|
||||
|
||||
The agent has access to four core tools for working with your codebase:
|
||||
|
||||
### read
|
||||
|
||||
Read file contents. Supports text files and images (jpg, png, gif, webp, bmp, svg). Images are sent as attachments. For text files, defaults to first 2000 lines. Use offset/limit parameters for large files. Lines longer than 2000 characters are truncated.
|
||||
|
||||
### write
|
||||
|
||||
Write content to a file. Creates the file if it doesn't exist, overwrites if it does. Automatically creates parent directories.
|
||||
|
||||
### edit
|
||||
|
||||
Edit a file by replacing exact text. The oldText must match exactly (including whitespace). Use this for precise, surgical edits. Returns an error if the text appears multiple times or isn't found.
|
||||
|
||||
### bash
|
||||
|
||||
Execute a bash command in the current working directory. Returns stdout and stderr. Commands run with a 30 second timeout.
|
||||
|
||||
## Session Management
|
||||
|
||||
Sessions are automatically saved in `~/.pi/agent/sessions/` organized by working directory. Each session is stored as a JSONL file with a unique timestamp-based ID.
|
||||
|
||||
To continue the most recent session:
|
||||
|
||||
```bash
|
||||
pi --continue
|
||||
# or
|
||||
pi -c
|
||||
```
|
||||
|
||||
To browse and select from past sessions:
|
||||
|
||||
```bash
|
||||
pi --resume
|
||||
# or
|
||||
pi -r
|
||||
```
|
||||
|
||||
This opens an interactive session selector where you can:
|
||||
- Type to search through session messages
|
||||
- Use arrow keys to navigate the list
|
||||
- Press Enter to resume a session
|
||||
- Press Escape to cancel
|
||||
|
||||
Sessions include all conversation messages, tool calls and results, model switches, and thinking level changes.
|
||||
|
||||
## CLI Options
|
||||
|
||||
```bash
|
||||
pi [options] [messages...]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
**--provider <name>**
|
||||
Provider name. Available: `anthropic`, `openai`, `google`, `xai`, `groq`, `cerebras`, `openrouter`, `zai`. Default: `anthropic`
|
||||
|
||||
**--model <id>**
|
||||
Model ID. Default: `claude-sonnet-4-5`
|
||||
|
||||
**--api-key <key>**
|
||||
API key (overrides environment variables)
|
||||
|
||||
**--system-prompt <text>**
|
||||
Custom system prompt (overrides default coding assistant prompt)
|
||||
|
||||
**--continue, -c**
|
||||
Continue the most recent session
|
||||
|
||||
**--resume, -r**
|
||||
Select a session to resume (opens interactive selector)
|
||||
|
||||
**--help, -h**
|
||||
Show help message
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Start interactive mode
|
||||
pi
|
||||
|
||||
# Single message mode
|
||||
pi "List all .ts files in src/"
|
||||
|
||||
# Continue previous session
|
||||
pi -c "What did we discuss?"
|
||||
|
||||
# Use different model
|
||||
pi --provider openai --model gpt-4o "Help me refactor this code"
|
||||
```
|
||||
|
||||
## License
|
||||
|
|
@ -271,4 +202,5 @@ MIT
|
|||
|
||||
## See Also
|
||||
|
||||
- [@mariozechner/pi-ai](https://www.npmjs.com/package/@mariozechner/pi-ai): Core LLM toolkit
|
||||
- [@mariozechner/pi-ai](https://www.npmjs.com/package/@mariozechner/pi-ai): Core LLM toolkit with multi-provider support
|
||||
- [@mariozechner/pi-agent](https://www.npmjs.com/package/@mariozechner/pi-agent): Agent framework with tool execution
|
||||
|
|
|
|||
|
|
@ -4,8 +4,7 @@
|
|||
"description": "Coding agent CLI with read, bash, edit, write tools and session management",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"pi": "dist/cli.js",
|
||||
"coding-agent": "dist/cli.js"
|
||||
"pi": "dist/cli.js"
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
|
|
@ -24,9 +23,11 @@
|
|||
"@mariozechner/pi-agent": "^0.6.0",
|
||||
"@mariozechner/pi-ai": "^0.6.0",
|
||||
"chalk": "^5.5.0",
|
||||
"diff": "^8.0.2",
|
||||
"glob": "^11.0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/node": "^24.3.0",
|
||||
"typescript": "^5.7.3",
|
||||
"vitest": "^3.2.4"
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ const envApiKeyMap: Record<KnownProvider, string[]> = {
|
|||
zai: ["ZAI_API_KEY"],
|
||||
};
|
||||
|
||||
type Mode = "text" | "json" | "rpc";
|
||||
|
||||
interface Args {
|
||||
provider?: string;
|
||||
model?: string;
|
||||
|
|
@ -35,6 +37,7 @@ interface Args {
|
|||
continue?: boolean;
|
||||
resume?: boolean;
|
||||
help?: boolean;
|
||||
mode?: Mode;
|
||||
messages: string[];
|
||||
}
|
||||
|
||||
|
|
@ -48,6 +51,11 @@ function parseArgs(args: string[]): Args {
|
|||
|
||||
if (arg === "--help" || arg === "-h") {
|
||||
result.help = true;
|
||||
} else if (arg === "--mode" && i + 1 < args.length) {
|
||||
const mode = args[++i];
|
||||
if (mode === "text" || mode === "json" || mode === "rpc") {
|
||||
result.mode = mode;
|
||||
}
|
||||
} else if (arg === "--continue" || arg === "-c") {
|
||||
result.continue = true;
|
||||
} else if (arg === "--resume" || arg === "-r") {
|
||||
|
|
@ -79,6 +87,7 @@ ${chalk.bold("Options:")}
|
|||
--model <id> Model ID (default: gemini-2.5-flash)
|
||||
--api-key <key> API key (defaults to env vars)
|
||||
--system-prompt <text> System prompt (default: coding assistant prompt)
|
||||
--mode <mode> Output mode: text (default), json, or rpc
|
||||
--continue, -c Continue previous session
|
||||
--resume, -r Select a session to resume
|
||||
--help, -h Show this help
|
||||
|
|
@ -194,12 +203,26 @@ async function runInteractiveMode(agent: Agent, sessionManager: SessionManager,
|
|||
}
|
||||
}
|
||||
|
||||
async function runSingleShotMode(agent: Agent, sessionManager: SessionManager, messages: string[]): Promise<void> {
|
||||
for (const message of messages) {
|
||||
console.log(chalk.blue(`\n> ${message}\n`));
|
||||
await agent.prompt(message);
|
||||
async function runSingleShotMode(
|
||||
agent: Agent,
|
||||
_sessionManager: SessionManager,
|
||||
messages: string[],
|
||||
mode: "text" | "json",
|
||||
): Promise<void> {
|
||||
if (mode === "json") {
|
||||
// Subscribe to all events and output as JSON
|
||||
agent.subscribe((event) => {
|
||||
// Output event as JSON (same format as session manager)
|
||||
console.log(JSON.stringify(event));
|
||||
});
|
||||
}
|
||||
|
||||
// Print response
|
||||
for (const message of messages) {
|
||||
await agent.prompt(message);
|
||||
}
|
||||
|
||||
// In text mode, only output the final assistant message
|
||||
if (mode === "text") {
|
||||
const lastMessage = agent.state.messages[agent.state.messages.length - 1];
|
||||
if (lastMessage.role === "assistant") {
|
||||
for (const content of lastMessage.content) {
|
||||
|
|
@ -209,8 +232,40 @@ async function runSingleShotMode(agent: Agent, sessionManager: SessionManager, m
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(chalk.dim(`\nSession saved to: ${sessionManager.getSessionFile()}`));
|
||||
async function runRpcMode(agent: Agent, _sessionManager: SessionManager): Promise<void> {
|
||||
// Subscribe to all events and output as JSON
|
||||
agent.subscribe((event) => {
|
||||
console.log(JSON.stringify(event));
|
||||
});
|
||||
|
||||
// Listen for JSON input on stdin
|
||||
const readline = require("readline");
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
terminal: false,
|
||||
});
|
||||
|
||||
rl.on("line", async (line: string) => {
|
||||
try {
|
||||
const input = JSON.parse(line);
|
||||
|
||||
// Handle different RPC commands
|
||||
if (input.type === "prompt" && input.message) {
|
||||
await agent.prompt(input.message);
|
||||
} else if (input.type === "abort") {
|
||||
agent.abort();
|
||||
}
|
||||
} catch (error: any) {
|
||||
// Output error as JSON
|
||||
console.log(JSON.stringify({ type: "error", error: error.message }));
|
||||
}
|
||||
});
|
||||
|
||||
// Keep process alive
|
||||
return new Promise(() => {});
|
||||
}
|
||||
|
||||
export async function main(args: string[]) {
|
||||
|
|
@ -295,11 +350,18 @@ export async function main(args: string[]) {
|
|||
}),
|
||||
});
|
||||
|
||||
// Determine mode early to know if we should print messages
|
||||
const isInteractive = parsed.messages.length === 0;
|
||||
const mode = parsed.mode || "text";
|
||||
const shouldPrintMessages = isInteractive || mode === "text";
|
||||
|
||||
// Load previous messages if continuing or resuming
|
||||
if (parsed.continue || parsed.resume) {
|
||||
const messages = sessionManager.loadMessages();
|
||||
if (messages.length > 0) {
|
||||
console.log(chalk.dim(`Loaded ${messages.length} messages from previous session`));
|
||||
if (shouldPrintMessages) {
|
||||
console.log(chalk.dim(`Loaded ${messages.length} messages from previous session`));
|
||||
}
|
||||
agent.replaceMessages(messages);
|
||||
}
|
||||
|
||||
|
|
@ -312,9 +374,13 @@ export async function main(args: string[]) {
|
|||
try {
|
||||
const restoredModel = getModel(savedProvider as any, savedModelId);
|
||||
agent.setModel(restoredModel);
|
||||
console.log(chalk.dim(`Restored model: ${savedModel}`));
|
||||
if (shouldPrintMessages) {
|
||||
console.log(chalk.dim(`Restored model: ${savedModel}`));
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error(chalk.yellow(`Warning: Could not restore model ${savedModel}: ${error.message}`));
|
||||
if (shouldPrintMessages) {
|
||||
console.error(chalk.yellow(`Warning: Could not restore model ${savedModel}: ${error.message}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -323,7 +389,9 @@ export async function main(args: string[]) {
|
|||
const thinkingLevel = sessionManager.loadThinkingLevel() as ThinkingLevel;
|
||||
if (thinkingLevel) {
|
||||
agent.setThinkingLevel(thinkingLevel);
|
||||
console.log(chalk.dim(`Restored thinking level: ${thinkingLevel}`));
|
||||
if (shouldPrintMessages) {
|
||||
console.log(chalk.dim(`Restored thinking level: ${thinkingLevel}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -343,12 +411,16 @@ export async function main(args: string[]) {
|
|||
}
|
||||
});
|
||||
|
||||
// Determine mode: interactive if no messages provided
|
||||
const isInteractive = parsed.messages.length === 0;
|
||||
|
||||
// Route to appropriate mode
|
||||
if (isInteractive) {
|
||||
// No mode flag in interactive - always use TUI
|
||||
await runInteractiveMode(agent, sessionManager, VERSION);
|
||||
} else {
|
||||
await runSingleShotMode(agent, sessionManager, parsed.messages);
|
||||
// CLI mode with messages
|
||||
if (mode === "rpc") {
|
||||
await runRpcMode(agent, sessionManager);
|
||||
} else {
|
||||
await runSingleShotMode(agent, sessionManager, parsed.messages, mode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
50
packages/sample.txt
Normal file
50
packages/sample.txt
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
Line 1: The beginning of our story
|
||||
Line 2: Once upon a time
|
||||
Line 3: In a land far away
|
||||
Line 4: There lived a brave knight
|
||||
Line 5: Who sought adventure daily
|
||||
Line 6: Mountains rose in the distance
|
||||
Line 7: Rivers flowed through valleys
|
||||
Line 8: Birds sang in the morning
|
||||
Line 9: The sun rose over the horizon
|
||||
Line 10: Illuminating the world with warmth
|
||||
Line 11: People gathered in the marketplace
|
||||
Line 12: Trading goods and stories
|
||||
Line 13: Children played in the streets
|
||||
Line 14: Laughter echoed through the town
|
||||
Line 15: Old wise men sat watching
|
||||
Line 16: Remembering days gone by
|
||||
Line 17: The castle stood tall and proud
|
||||
Line 18: Guarding the kingdom below
|
||||
Line 19: Flags waved in the breeze
|
||||
Line 20: Colors bright and bold
|
||||
Line 21: Halfway through our tale
|
||||
Line 22: The plot begins to thicken
|
||||
Line 23: A terrible storm approaches quickly
|
||||
Line 24: Lightning strikes and thunder roars
|
||||
Line 25: Our hero stands ready for combat
|
||||
Line 26: Armor gleaming in the light
|
||||
Line 27: Sword sharp and ready
|
||||
Line 28: Shield painted with his crest
|
||||
Line 29: He rides out to face danger
|
||||
Line 30: Determined and brave
|
||||
Line 31: The journey takes him far
|
||||
Line 32: Through forests deep and dark
|
||||
Line 33: Across bridges old and creaky
|
||||
Line 34: Past caverns filled with ancient magic
|
||||
Line 35: Along cliffs steep and dangerous
|
||||
Line 36: Through storms and wind and rain
|
||||
Line 37: He never loses hope
|
||||
Line 38: His quest drives him forward
|
||||
Line 39: Finally he reaches his goal
|
||||
Line 40: The dragon's lair appears
|
||||
Line 41: Smoke rises from within
|
||||
Line 42: The ground trembles beneath
|
||||
Line 43: A roar shakes the very air
|
||||
Line 44: The battle begins at last
|
||||
Line 45: Steel clashes against scales
|
||||
Line 46: Fire meets courage head on
|
||||
Line 47: The fight rages for hours
|
||||
Line 48: Until glory and honor are won
|
||||
Line 49: The knight returns home triumphant
|
||||
Line 50: And that's the end of our tale
|
||||
|
|
@ -1,20 +1,5 @@
|
|||
Line 1: The quick brown fox jumps over the lazy dog
|
||||
Line 2: Testing the TUI renderer with multiple lines
|
||||
Line 3: Each line contains some different text
|
||||
Line 4: This is line number four
|
||||
Line 5: Halfway through the first ten lines
|
||||
Line 6: More content for testing purposes
|
||||
Line 7: Seven is a lucky number
|
||||
Line 8: Eight lines down, twelve to go
|
||||
Line 9: Almost at line ten
|
||||
Line 10: Double digits now
|
||||
Line 11: Starting the second half
|
||||
Line 12: A dozen lines complete
|
||||
Line 13: Unlucky for some, lucky for others
|
||||
Line 14: Fortnight means fourteen nights
|
||||
Line 15: Three quarters of the way there
|
||||
Line 16: Sweet sixteen
|
||||
Line 17: Just a few more lines left
|
||||
Line 18: Penultimate line coming up
|
||||
Line 19: This is the second to last line
|
||||
Line 20: The final line of this test file
|
||||
hello
|
||||
world
|
||||
hello
|
||||
world
|
||||
hello
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue