mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 07:04:45 +00:00
Update READMEs: remove agent section from pi-ai, rewrite pi-agent-core
- Removed Agent API section from pi-ai README (moved to agent package) - Rewrote agent package README for new architecture: - No more transports (ProviderTransport, AppTransport removed) - Uses streamFn directly with streamProxy for proxy usage - Documents convertToLlm and transformContext - Documents low-level agentLoop/agentLoopContinue API - Updated custom message types documentation
This commit is contained in:
parent
a055fd4481
commit
fa22595f25
3 changed files with 111 additions and 435 deletions
|
|
@ -1,6 +1,6 @@
|
|||
# @mariozechner/pi-agent-core
|
||||
|
||||
Stateful agent abstraction with transport layer for LLM interactions. Provides a reactive `Agent` class that manages conversation state, emits granular events, and supports pluggable transports for different deployment scenarios.
|
||||
Stateful agent with tool execution, event streaming, and extensible message types. Built on `@mariozechner/pi-ai`.
|
||||
|
||||
## Installation
|
||||
|
||||
|
|
@ -11,12 +11,10 @@ npm install @mariozechner/pi-agent-core
|
|||
## Quick Start
|
||||
|
||||
```typescript
|
||||
import { Agent, ProviderTransport } from '@mariozechner/pi-agent-core';
|
||||
import { Agent } from '@mariozechner/pi-agent-core';
|
||||
import { getModel } from '@mariozechner/pi-ai';
|
||||
|
||||
// Create agent with direct provider transport
|
||||
const agent = new Agent({
|
||||
transport: new ProviderTransport(),
|
||||
initialState: {
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
model: getModel('anthropic', 'claude-sonnet-4-20250514'),
|
||||
|
|
@ -29,37 +27,48 @@ const agent = new Agent({
|
|||
agent.subscribe((event) => {
|
||||
switch (event.type) {
|
||||
case 'message_update':
|
||||
// Stream text to UI
|
||||
const content = event.message.content;
|
||||
for (const block of content) {
|
||||
if (block.type === 'text') console.log(block.text);
|
||||
for (const block of event.message.content) {
|
||||
if (block.type === 'text') process.stdout.write(block.text);
|
||||
}
|
||||
break;
|
||||
case 'tool_execution_start':
|
||||
console.log(`Calling ${event.toolName}...`);
|
||||
break;
|
||||
case 'tool_execution_update':
|
||||
// Stream tool output (e.g., bash stdout)
|
||||
console.log('Progress:', event.partialResult.content);
|
||||
break;
|
||||
case 'tool_execution_end':
|
||||
console.log(`Result:`, event.result.content);
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
// Send a prompt
|
||||
await agent.prompt('Hello, world!');
|
||||
|
||||
// Access conversation state
|
||||
console.log(agent.state.messages);
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
## Agent Options
|
||||
|
||||
### Agent State
|
||||
```typescript
|
||||
interface AgentOptions {
|
||||
initialState?: Partial<AgentState>;
|
||||
|
||||
The `Agent` maintains reactive state:
|
||||
// Converts AgentMessage[] to LLM-compatible Message[] before each call.
|
||||
// Default: filters to user/assistant/toolResult and converts attachments.
|
||||
convertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;
|
||||
|
||||
// Transform context before convertToLlm (for pruning, injecting context, etc.)
|
||||
transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
|
||||
|
||||
// Queue mode: 'all' sends all queued messages, 'one-at-a-time' sends one per turn
|
||||
queueMode?: 'all' | 'one-at-a-time';
|
||||
|
||||
// Custom stream function (for proxy backends). Default: streamSimple from pi-ai
|
||||
streamFn?: StreamFn;
|
||||
|
||||
// Dynamic API key resolution (useful for expiring OAuth tokens)
|
||||
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
|
||||
}
|
||||
```
|
||||
|
||||
## Agent State
|
||||
|
||||
```typescript
|
||||
interface AgentState {
|
||||
|
|
@ -67,17 +76,17 @@ interface AgentState {
|
|||
model: Model<any>;
|
||||
thinkingLevel: ThinkingLevel; // 'off' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
||||
tools: AgentTool<any>[];
|
||||
messages: AppMessage[];
|
||||
messages: AgentMessage[];
|
||||
isStreaming: boolean;
|
||||
streamMessage: Message | null;
|
||||
streamMessage: AgentMessage | null;
|
||||
pendingToolCalls: Set<string>;
|
||||
error?: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Events
|
||||
## Events
|
||||
|
||||
Events provide fine-grained lifecycle information:
|
||||
Events provide fine-grained lifecycle information for building reactive UIs:
|
||||
|
||||
| Event | Description |
|
||||
|-------|-------------|
|
||||
|
|
@ -89,30 +98,40 @@ Events provide fine-grained lifecycle information:
|
|||
| `message_update` | Assistant message streaming update |
|
||||
| `message_end` | Message completes |
|
||||
| `tool_execution_start` | Tool begins execution |
|
||||
| `tool_execution_update` | Tool streams progress (e.g., bash output) |
|
||||
| `tool_execution_update` | Tool streams progress |
|
||||
| `tool_execution_end` | Tool completes with result |
|
||||
|
||||
### Transports
|
||||
## Custom Message Types
|
||||
|
||||
Transports abstract LLM communication:
|
||||
|
||||
- **`ProviderTransport`**: Direct API calls using `@mariozechner/pi-ai`
|
||||
- **`AppTransport`**: Proxy through a backend server (for browser apps)
|
||||
Extend `AgentMessage` for app-specific messages via declaration merging:
|
||||
|
||||
```typescript
|
||||
// Direct provider access (Node.js)
|
||||
const agent = new Agent({
|
||||
transport: new ProviderTransport({
|
||||
apiKey: process.env.ANTHROPIC_API_KEY
|
||||
})
|
||||
});
|
||||
declare module '@mariozechner/pi-agent-core' {
|
||||
interface CustomMessages {
|
||||
artifact: { role: 'artifact'; code: string; language: string; timestamp: number };
|
||||
notification: { role: 'notification'; text: string; timestamp: number };
|
||||
}
|
||||
}
|
||||
|
||||
// Via proxy (browser)
|
||||
// AgentMessage now includes your custom types
|
||||
const msg: AgentMessage = { role: 'artifact', code: '...', language: 'typescript', timestamp: Date.now() };
|
||||
```
|
||||
|
||||
Custom messages are stored in state but filtered out by the default `convertToLlm`. Provide your own converter to handle them:
|
||||
|
||||
```typescript
|
||||
const agent = new Agent({
|
||||
transport: new AppTransport({
|
||||
endpoint: '/api/agent',
|
||||
headers: { 'Authorization': 'Bearer ...' }
|
||||
})
|
||||
convertToLlm: (messages) => {
|
||||
return messages
|
||||
.filter(m => m.role === 'user' || m.role === 'assistant' || m.role === 'toolResult')
|
||||
.map(m => {
|
||||
// Convert custom types or pass through
|
||||
if (m.role === 'artifact') {
|
||||
return { role: 'user', content: `[Artifact: ${m.language}]\n${m.code}`, timestamp: m.timestamp };
|
||||
}
|
||||
return m;
|
||||
});
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
|
|
@ -121,20 +140,21 @@ const agent = new Agent({
|
|||
Queue messages to inject at the next turn:
|
||||
|
||||
```typescript
|
||||
// Queue mode: 'all' or 'one-at-a-time'
|
||||
agent.setQueueMode('one-at-a-time');
|
||||
|
||||
// Queue a message while agent is streaming
|
||||
await agent.queueMessage({
|
||||
// Queue while agent is streaming
|
||||
agent.queueMessage({
|
||||
role: 'user',
|
||||
content: 'Additional context...',
|
||||
content: 'Stop what you are doing and focus on this instead.',
|
||||
timestamp: Date.now()
|
||||
});
|
||||
```
|
||||
|
||||
When queued messages are detected after a tool call, remaining tool calls are skipped with error results.
|
||||
|
||||
## Attachments
|
||||
|
||||
User messages can include attachments:
|
||||
User messages can include attachments (images, documents):
|
||||
|
||||
```typescript
|
||||
await agent.prompt('What is in this image?', [{
|
||||
|
|
@ -143,23 +163,57 @@ await agent.prompt('What is in this image?', [{
|
|||
fileName: 'photo.jpg',
|
||||
mimeType: 'image/jpeg',
|
||||
size: 102400,
|
||||
content: base64ImageData
|
||||
content: base64ImageData // base64 without data URL prefix
|
||||
}]);
|
||||
```
|
||||
|
||||
## Custom Message Types
|
||||
## Proxy Usage
|
||||
|
||||
Extend `AppMessage` for app-specific messages via declaration merging:
|
||||
For browser apps that need to proxy through a backend, use `streamProxy`:
|
||||
|
||||
```typescript
|
||||
declare module '@mariozechner/pi-agent-core' {
|
||||
interface CustomMessages {
|
||||
artifact: { role: 'artifact'; code: string; language: string };
|
||||
}
|
||||
import { Agent, streamProxy } from '@mariozechner/pi-agent-core';
|
||||
|
||||
const agent = new Agent({
|
||||
streamFn: (model, context, options) => streamProxy(
|
||||
'/api/agent',
|
||||
model,
|
||||
context,
|
||||
options,
|
||||
{ 'Authorization': 'Bearer ...' }
|
||||
)
|
||||
});
|
||||
```
|
||||
|
||||
## Low-Level API
|
||||
|
||||
For more control, use `agentLoop` and `agentLoopContinue` directly:
|
||||
|
||||
```typescript
|
||||
import { agentLoop, agentLoopContinue, AgentLoopContext, AgentLoopConfig } from '@mariozechner/pi-agent-core';
|
||||
import { getModel, streamSimple } from '@mariozechner/pi-ai';
|
||||
|
||||
const context: AgentLoopContext = {
|
||||
systemPrompt: 'You are helpful.',
|
||||
messages: [],
|
||||
tools: [myTool]
|
||||
};
|
||||
|
||||
const config: AgentLoopConfig = {
|
||||
model: getModel('openai', 'gpt-4o-mini'),
|
||||
convertToLlm: (msgs) => msgs.filter(m => ['user', 'assistant', 'toolResult'].includes(m.role))
|
||||
};
|
||||
|
||||
const userMessage = { role: 'user', content: 'Hello', timestamp: Date.now() };
|
||||
|
||||
for await (const event of agentLoop(userMessage, context, config, undefined, streamSimple)) {
|
||||
console.log(event.type);
|
||||
}
|
||||
|
||||
// Now AppMessage includes your custom type
|
||||
const msg: AppMessage = { role: 'artifact', code: '...', language: 'typescript' };
|
||||
// Continue from existing context (e.g., after overflow recovery)
|
||||
for await (const event of agentLoopContinue(context, config, undefined, streamSimple)) {
|
||||
console.log(event.type);
|
||||
}
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
|
@ -169,9 +223,10 @@ const msg: AppMessage = { role: 'artifact', code: '...', language: 'typescript'
|
|||
| Method | Description |
|
||||
|--------|-------------|
|
||||
| `prompt(text, attachments?)` | Send a user prompt |
|
||||
| `continue()` | Continue from current context (for retry after overflow) |
|
||||
| `prompt(message)` | Send an AgentMessage directly |
|
||||
| `continue()` | Continue from current context |
|
||||
| `abort()` | Abort current operation |
|
||||
| `waitForIdle()` | Returns promise that resolves when agent is idle |
|
||||
| `waitForIdle()` | Promise that resolves when agent is idle |
|
||||
| `reset()` | Clear all messages and state |
|
||||
| `subscribe(fn)` | Subscribe to events, returns unsubscribe function |
|
||||
| `queueMessage(msg)` | Queue message for next turn |
|
||||
|
|
@ -184,7 +239,7 @@ const msg: AppMessage = { role: 'artifact', code: '...', language: 'typescript'
|
|||
| `setSystemPrompt(v)` | Update system prompt |
|
||||
| `setModel(m)` | Switch model |
|
||||
| `setThinkingLevel(l)` | Set reasoning level |
|
||||
| `setQueueMode(m)` | Set queue mode ('all' or 'one-at-a-time') |
|
||||
| `setQueueMode(m)` | Set queue mode |
|
||||
| `setTools(t)` | Update available tools |
|
||||
| `replaceMessages(ms)` | Replace all messages |
|
||||
| `appendMessage(m)` | Append a message |
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue