mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 23:01:30 +00:00
- Change from contextTransform (runs once at agent start) to preprocessor - preprocessor runs before EACH LLM call inside the agent loop - ContextEvent now uses Message[] (pi-ai format) instead of AppMessage[] - Deep copy handled by pi-ai preprocessor, not Agent This enables: - Pruning rules applied on every turn (not just agent start) - /prune during long agent loop takes effect immediately - Compaction can use same transforms (future work)
86 lines
2.4 KiB
TypeScript
86 lines
2.4 KiB
TypeScript
import {
|
|
type AgentContext,
|
|
type AgentLoopConfig,
|
|
agentLoop,
|
|
agentLoopContinue,
|
|
type Message,
|
|
type UserMessage,
|
|
} from "@mariozechner/pi-ai";
|
|
import type { AgentRunConfig, AgentTransport } from "./types.js";
|
|
|
|
export interface ProviderTransportOptions {
|
|
/**
|
|
* Function to retrieve API key for a given provider.
|
|
* If not provided, transport will try to use environment variables.
|
|
*/
|
|
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
|
|
|
|
/**
|
|
* Optional CORS proxy URL for browser environments.
|
|
* If provided, all requests will be routed through this proxy.
|
|
* Format: "https://proxy.example.com"
|
|
*/
|
|
corsProxyUrl?: string;
|
|
}
|
|
|
|
/**
|
|
* Transport that calls LLM providers directly.
|
|
* Optionally routes calls through a CORS proxy if configured.
|
|
*/
|
|
export class ProviderTransport implements AgentTransport {
|
|
private options: ProviderTransportOptions;
|
|
|
|
constructor(options: ProviderTransportOptions = {}) {
|
|
this.options = options;
|
|
}
|
|
|
|
private getModel(cfg: AgentRunConfig) {
|
|
let model = cfg.model;
|
|
if (this.options.corsProxyUrl && cfg.model.baseUrl) {
|
|
model = {
|
|
...cfg.model,
|
|
baseUrl: `${this.options.corsProxyUrl}/?url=${encodeURIComponent(cfg.model.baseUrl)}`,
|
|
};
|
|
}
|
|
return model;
|
|
}
|
|
|
|
private buildContext(messages: Message[], cfg: AgentRunConfig): AgentContext {
|
|
return {
|
|
systemPrompt: cfg.systemPrompt,
|
|
messages,
|
|
tools: cfg.tools,
|
|
};
|
|
}
|
|
|
|
private buildLoopConfig(model: AgentRunConfig["model"], cfg: AgentRunConfig): AgentLoopConfig {
|
|
return {
|
|
model,
|
|
reasoning: cfg.reasoning,
|
|
// Resolve API key per assistant response (important for expiring OAuth tokens)
|
|
getApiKey: this.options.getApiKey,
|
|
getQueuedMessages: cfg.getQueuedMessages,
|
|
preprocessor: cfg.preprocessor,
|
|
};
|
|
}
|
|
|
|
async *run(messages: Message[], userMessage: Message, cfg: AgentRunConfig, signal?: AbortSignal) {
|
|
const model = this.getModel(cfg);
|
|
const context = this.buildContext(messages, cfg);
|
|
const pc = this.buildLoopConfig(model, cfg);
|
|
|
|
for await (const ev of agentLoop(userMessage as unknown as UserMessage, context, pc, signal)) {
|
|
yield ev;
|
|
}
|
|
}
|
|
|
|
async *continue(messages: Message[], cfg: AgentRunConfig, signal?: AbortSignal) {
|
|
const model = this.getModel(cfg);
|
|
const context = this.buildContext(messages, cfg);
|
|
const pc = this.buildLoopConfig(model, cfg);
|
|
|
|
for await (const ev of agentLoopContinue(context, pc, signal)) {
|
|
yield ev;
|
|
}
|
|
}
|
|
}
|