mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-15 09:01:14 +00:00
feat(ai): Add OpenAI Completions and Responses API providers
- Implement OpenAICompletionsLLM for Chat Completions API with streaming - Implement OpenAIResponsesLLM for Responses API with reasoning support - Update types to use LLM/Context instead of AI/Request - Add support for reasoning tokens, tool calls, and streaming - Create test examples for both OpenAI providers - Update Anthropic provider to match new interface
This commit is contained in:
parent
e5aedfed29
commit
8364ecde4a
7 changed files with 722 additions and 39 deletions
|
|
@ -5,9 +5,18 @@ import type {
|
|||
MessageParam,
|
||||
Tool,
|
||||
} from "@anthropic-ai/sdk/resources/messages.js";
|
||||
import type { AI, AssistantMessage, Event, Message, Request, StopReason, TokenUsage, ToolCall } from "../types.js";
|
||||
import type {
|
||||
AssistantMessage,
|
||||
Context,
|
||||
LLM,
|
||||
LLMOptions,
|
||||
Message,
|
||||
StopReason,
|
||||
TokenUsage,
|
||||
ToolCall,
|
||||
} from "../types.js";
|
||||
|
||||
export interface AnthropicOptions {
|
||||
export interface AnthropicLLMOptions extends LLMOptions {
|
||||
thinking?: {
|
||||
enabled: boolean;
|
||||
budgetTokens?: number;
|
||||
|
|
@ -15,7 +24,7 @@ export interface AnthropicOptions {
|
|||
toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string };
|
||||
}
|
||||
|
||||
export class AnthropicAI implements AI<AnthropicOptions> {
|
||||
export class AnthropicLLM implements LLM<AnthropicLLMOptions> {
|
||||
private client: Anthropic;
|
||||
private model: string;
|
||||
|
||||
|
|
@ -28,31 +37,56 @@ export class AnthropicAI implements AI<AnthropicOptions> {
|
|||
}
|
||||
apiKey = process.env.ANTHROPIC_API_KEY;
|
||||
}
|
||||
this.client = new Anthropic({ apiKey, baseURL: baseUrl });
|
||||
if (apiKey.includes("sk-ant-oat")) {
|
||||
const defaultHeaders = {
|
||||
accept: "application/json",
|
||||
"anthropic-beta": "oauth-2025-04-20,fine-grained-tool-streaming-2025-05-14",
|
||||
};
|
||||
|
||||
process.env.ANTHROPIC_API_KEY = undefined;
|
||||
this.client = new Anthropic({ apiKey: null, authToken: apiKey, baseURL: baseUrl, defaultHeaders });
|
||||
} else {
|
||||
this.client = new Anthropic({ apiKey, baseURL: baseUrl });
|
||||
}
|
||||
this.model = model;
|
||||
}
|
||||
|
||||
async complete(request: Request, options?: AnthropicOptions): Promise<AssistantMessage> {
|
||||
async complete(context: Context, options?: AnthropicLLMOptions): Promise<AssistantMessage> {
|
||||
try {
|
||||
const messages = this.convertMessages(request.messages);
|
||||
const messages = this.convertMessages(context.messages);
|
||||
|
||||
const params: MessageCreateParamsStreaming = {
|
||||
model: this.model,
|
||||
messages,
|
||||
max_tokens: request.maxTokens || 4096,
|
||||
max_tokens: options?.maxTokens || 4096,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
if (request.systemPrompt) {
|
||||
params.system = request.systemPrompt;
|
||||
if (context.systemPrompt) {
|
||||
params.system = [
|
||||
{
|
||||
type: "text",
|
||||
text: "You are Claude Code, Anthropic's official CLI for Claude.",
|
||||
cache_control: {
|
||||
type: "ephemeral",
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "text",
|
||||
text: context.systemPrompt,
|
||||
cache_control: {
|
||||
type: "ephemeral",
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) {
|
||||
params.temperature = request.temperature;
|
||||
if (options?.temperature !== undefined) {
|
||||
params.temperature = options?.temperature;
|
||||
}
|
||||
|
||||
if (request.tools) {
|
||||
params.tools = this.convertTools(request.tools);
|
||||
if (context.tools) {
|
||||
params.tools = this.convertTools(context.tools);
|
||||
}
|
||||
|
||||
if (options?.thinking?.enabled) {
|
||||
|
|
@ -76,17 +110,17 @@ export class AnthropicAI implements AI<AnthropicOptions> {
|
|||
stream: true,
|
||||
},
|
||||
{
|
||||
signal: request.signal,
|
||||
signal: options?.signal,
|
||||
},
|
||||
);
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.type === "content_block_delta") {
|
||||
if (event.delta.type === "text_delta") {
|
||||
request.onText?.(event.delta.text);
|
||||
options?.onText?.(event.delta.text);
|
||||
}
|
||||
if (event.delta.type === "thinking_delta") {
|
||||
request.onThinking?.(event.delta.thinking);
|
||||
options?.onThinking?.(event.delta.thinking);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -211,7 +245,7 @@ export class AnthropicAI implements AI<AnthropicOptions> {
|
|||
return params;
|
||||
}
|
||||
|
||||
private convertTools(tools: Request["tools"]): Tool[] {
|
||||
private convertTools(tools: Context["tools"]): Tool[] {
|
||||
if (!tools) return [];
|
||||
|
||||
return tools.map((tool) => ({
|
||||
|
|
|
|||
249
packages/ai/src/providers/openai-completions.ts
Normal file
249
packages/ai/src/providers/openai-completions.ts
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
import OpenAI from "openai";
|
||||
import type { ChatCompletionChunk, ChatCompletionMessageParam } from "openai/resources/chat/completions.js";
|
||||
import type {
|
||||
AssistantMessage,
|
||||
Context,
|
||||
LLM,
|
||||
LLMOptions,
|
||||
Message,
|
||||
StopReason,
|
||||
TokenUsage,
|
||||
Tool,
|
||||
ToolCall,
|
||||
} from "../types.js";
|
||||
|
||||
export interface OpenAICompletionsLLMOptions extends LLMOptions {
|
||||
toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } };
|
||||
reasoningEffort?: "low" | "medium" | "high";
|
||||
}
|
||||
|
||||
export class OpenAICompletionsLLM implements LLM<OpenAICompletionsLLMOptions> {
|
||||
private client: OpenAI;
|
||||
private model: string;
|
||||
|
||||
constructor(model: string, apiKey?: string, baseUrl?: string) {
|
||||
if (!apiKey) {
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
throw new Error(
|
||||
"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.",
|
||||
);
|
||||
}
|
||||
apiKey = process.env.OPENAI_API_KEY;
|
||||
}
|
||||
this.client = new OpenAI({ apiKey, baseURL: baseUrl });
|
||||
this.model = model;
|
||||
}
|
||||
|
||||
async complete(request: Context, options?: OpenAICompletionsLLMOptions): Promise<AssistantMessage> {
|
||||
try {
|
||||
const messages = this.convertMessages(request.messages, request.systemPrompt);
|
||||
|
||||
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
|
||||
model: this.model,
|
||||
messages,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
store: false,
|
||||
};
|
||||
|
||||
if (options?.maxTokens) {
|
||||
params.max_completion_tokens = options?.maxTokens;
|
||||
}
|
||||
|
||||
if (options?.temperature !== undefined) {
|
||||
params.temperature = options?.temperature;
|
||||
}
|
||||
|
||||
if (request.tools) {
|
||||
params.tools = this.convertTools(request.tools);
|
||||
}
|
||||
|
||||
if (options?.toolChoice) {
|
||||
params.tool_choice = options.toolChoice;
|
||||
}
|
||||
|
||||
if (options?.reasoningEffort && this.isReasoningModel()) {
|
||||
params.reasoning_effort = options.reasoningEffort;
|
||||
}
|
||||
|
||||
const stream = await this.client.chat.completions.create(params, {
|
||||
signal: options?.signal,
|
||||
});
|
||||
|
||||
let content = "";
|
||||
const toolCallsMap = new Map<
|
||||
number,
|
||||
{
|
||||
id: string;
|
||||
name: string;
|
||||
arguments: string;
|
||||
}
|
||||
>();
|
||||
let usage: TokenUsage = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
let finishReason: ChatCompletionChunk.Choice["finish_reason"] | null = null;
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const choice = chunk.choices[0];
|
||||
|
||||
// Handle text content
|
||||
if (choice?.delta?.content) {
|
||||
content += choice.delta.content;
|
||||
options?.onText?.(choice.delta.content);
|
||||
}
|
||||
|
||||
// Handle tool calls
|
||||
if (choice?.delta?.tool_calls) {
|
||||
for (const toolCall of choice.delta.tool_calls) {
|
||||
const index = toolCall.index;
|
||||
|
||||
if (!toolCallsMap.has(index)) {
|
||||
toolCallsMap.set(index, {
|
||||
id: toolCall.id || "",
|
||||
name: toolCall.function?.name || "",
|
||||
arguments: "",
|
||||
});
|
||||
}
|
||||
|
||||
const existing = toolCallsMap.get(index)!;
|
||||
if (toolCall.id) existing.id = toolCall.id;
|
||||
if (toolCall.function?.name) existing.name = toolCall.function.name;
|
||||
if (toolCall.function?.arguments) {
|
||||
existing.arguments += toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Capture finish reason
|
||||
if (choice?.finish_reason) {
|
||||
finishReason = choice.finish_reason;
|
||||
}
|
||||
|
||||
// Capture usage
|
||||
if (chunk.usage) {
|
||||
usage = {
|
||||
input: chunk.usage.prompt_tokens || 0,
|
||||
output: chunk.usage.completion_tokens || 0,
|
||||
cacheRead: chunk.usage.prompt_tokens_details?.cached_tokens || 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
// Note: reasoning tokens are in completion_tokens_details?.reasoning_tokens
|
||||
// but we don't have actual thinking content from Chat Completions API
|
||||
}
|
||||
}
|
||||
|
||||
// Convert tool calls map to array
|
||||
const toolCalls: ToolCall[] = Array.from(toolCallsMap.values()).map((tc) => ({
|
||||
id: tc.id,
|
||||
name: tc.name,
|
||||
arguments: JSON.parse(tc.arguments),
|
||||
}));
|
||||
|
||||
return {
|
||||
role: "assistant",
|
||||
content: content || undefined,
|
||||
thinking: undefined, // Chat Completions doesn't provide actual thinking content
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
model: this.model,
|
||||
usage,
|
||||
stopResaon: this.mapStopReason(finishReason),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
role: "assistant",
|
||||
model: this.model,
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
stopResaon: "error",
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private convertMessages(messages: Message[], systemPrompt?: string): ChatCompletionMessageParam[] {
|
||||
const params: ChatCompletionMessageParam[] = [];
|
||||
|
||||
// Add system prompt if provided
|
||||
if (systemPrompt) {
|
||||
const role = this.isReasoningModel() ? "developer" : "system";
|
||||
params.push({ role: role, content: systemPrompt });
|
||||
}
|
||||
|
||||
// Convert messages
|
||||
for (const msg of messages) {
|
||||
if (msg.role === "user") {
|
||||
params.push({
|
||||
role: "user",
|
||||
content: msg.content,
|
||||
});
|
||||
} else if (msg.role === "assistant") {
|
||||
const assistantMsg: ChatCompletionMessageParam = {
|
||||
role: "assistant",
|
||||
content: msg.content || null,
|
||||
};
|
||||
|
||||
if (msg.toolCalls) {
|
||||
assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: JSON.stringify(tc.arguments),
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
params.push(assistantMsg);
|
||||
} else if (msg.role === "toolResult") {
|
||||
params.push({
|
||||
role: "tool",
|
||||
content: msg.content,
|
||||
tool_call_id: msg.toolCallId,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
private convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
return tools.map((tool) => ({
|
||||
type: "function",
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
private mapStopReason(reason: ChatCompletionChunk.Choice["finish_reason"] | null): StopReason {
|
||||
switch (reason) {
|
||||
case "stop":
|
||||
return "stop";
|
||||
case "length":
|
||||
return "length";
|
||||
case "function_call":
|
||||
case "tool_calls":
|
||||
return "toolUse";
|
||||
case "content_filter":
|
||||
return "safety";
|
||||
default:
|
||||
return "stop";
|
||||
}
|
||||
}
|
||||
|
||||
private isReasoningModel(): boolean {
|
||||
// TODO base on models.dev data
|
||||
return this.model.includes("o1") || this.model.includes("o3");
|
||||
}
|
||||
}
|
||||
268
packages/ai/src/providers/openai-responses.ts
Normal file
268
packages/ai/src/providers/openai-responses.ts
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
import OpenAI from "openai";
|
||||
import type {
|
||||
Tool as OpenAITool,
|
||||
ResponseCreateParamsStreaming,
|
||||
ResponseInput,
|
||||
ResponseReasoningItem,
|
||||
} from "openai/resources/responses/responses.js";
|
||||
import type {
|
||||
AssistantMessage,
|
||||
Context,
|
||||
LLM,
|
||||
LLMOptions,
|
||||
Message,
|
||||
StopReason,
|
||||
TokenUsage,
|
||||
Tool,
|
||||
ToolCall,
|
||||
} from "../types.js";
|
||||
|
||||
export interface OpenAIResponsesLLMOptions extends LLMOptions {
|
||||
reasoningEffort?: "minimal" | "low" | "medium" | "high";
|
||||
reasoningSummary?: "auto" | "detailed" | "concise" | null;
|
||||
}
|
||||
|
||||
export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
|
||||
private client: OpenAI;
|
||||
private model: string;
|
||||
|
||||
constructor(model: string, apiKey?: string, baseUrl?: string) {
|
||||
if (!apiKey) {
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
throw new Error(
|
||||
"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.",
|
||||
);
|
||||
}
|
||||
apiKey = process.env.OPENAI_API_KEY;
|
||||
}
|
||||
this.client = new OpenAI({ apiKey, baseURL: baseUrl });
|
||||
this.model = model;
|
||||
}
|
||||
|
||||
async complete(request: Context, options?: OpenAIResponsesLLMOptions): Promise<AssistantMessage> {
|
||||
try {
|
||||
const input = this.convertToInput(request.messages, request.systemPrompt);
|
||||
|
||||
const params: ResponseCreateParamsStreaming = {
|
||||
model: this.model,
|
||||
input,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
if (options?.maxTokens) {
|
||||
params.max_output_tokens = options?.maxTokens;
|
||||
}
|
||||
|
||||
if (options?.temperature !== undefined) {
|
||||
params.temperature = options?.temperature;
|
||||
}
|
||||
|
||||
if (request.tools) {
|
||||
params.tools = this.convertTools(request.tools);
|
||||
}
|
||||
|
||||
// Add reasoning options for models that support it
|
||||
if (this.supportsReasoning() && (options?.reasoningEffort || options?.reasoningSummary)) {
|
||||
params.reasoning = {
|
||||
effort: options?.reasoningEffort || "medium",
|
||||
summary: options?.reasoningSummary || "auto",
|
||||
};
|
||||
params.include = ["reasoning.encrypted_content"];
|
||||
}
|
||||
|
||||
const stream = await this.client.responses.create(params, {
|
||||
signal: options?.signal,
|
||||
});
|
||||
|
||||
let content = "";
|
||||
let thinking = "";
|
||||
const toolCalls: ToolCall[] = [];
|
||||
const reasoningItems: ResponseReasoningItem[] = [];
|
||||
let usage: TokenUsage = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
let stopReason: StopReason = "stop";
|
||||
|
||||
for await (const event of stream) {
|
||||
// Handle reasoning summary for models that support it
|
||||
if (event.type === "response.reasoning_summary_text.delta") {
|
||||
const delta = event.delta;
|
||||
thinking += delta;
|
||||
options?.onThinking?.(delta);
|
||||
} else if (event.type === "response.reasoning_summary_text.done") {
|
||||
if (event.text) {
|
||||
thinking = event.text;
|
||||
}
|
||||
}
|
||||
// Handle main text output
|
||||
else if (event.type === "response.output_text.delta") {
|
||||
const delta = event.delta;
|
||||
content += delta;
|
||||
options?.onText?.(delta);
|
||||
} else if (event.type === "response.output_text.done") {
|
||||
if (event.text) {
|
||||
content = event.text;
|
||||
}
|
||||
}
|
||||
// Handle function calls
|
||||
else if (event.type === "response.output_item.done") {
|
||||
const item = event.item;
|
||||
if (item?.type === "function_call") {
|
||||
toolCalls.push({
|
||||
id: item.call_id + "|" + item.id,
|
||||
name: item.name,
|
||||
arguments: JSON.parse(item.arguments),
|
||||
});
|
||||
}
|
||||
if (item.type === "reasoning") {
|
||||
reasoningItems.push(item);
|
||||
}
|
||||
}
|
||||
// Handle completion
|
||||
else if (event.type === "response.completed") {
|
||||
const response = event.response;
|
||||
if (response?.usage) {
|
||||
usage = {
|
||||
input: response.usage.input_tokens || 0,
|
||||
output: response.usage.output_tokens || 0,
|
||||
cacheRead: response.usage.input_tokens_details?.cached_tokens || 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
}
|
||||
|
||||
// Map status to stop reason
|
||||
stopReason = this.mapStopReason(response?.status);
|
||||
}
|
||||
// Handle errors
|
||||
else if (event.type === "error") {
|
||||
return {
|
||||
role: "assistant",
|
||||
model: this.model,
|
||||
usage,
|
||||
stopResaon: "error",
|
||||
error: `Code ${event.code}: ${event.message}` || "Unknown error",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
role: "assistant",
|
||||
content: content || undefined,
|
||||
thinking: thinking || undefined,
|
||||
thinkingSignature: JSON.stringify(reasoningItems) || undefined,
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
model: this.model,
|
||||
usage,
|
||||
stopResaon: stopReason,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
role: "assistant",
|
||||
model: this.model,
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
stopResaon: "error",
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private convertToInput(messages: Message[], systemPrompt?: string): ResponseInput {
|
||||
const input: ResponseInput = [];
|
||||
|
||||
// Add system prompt if provided
|
||||
if (systemPrompt) {
|
||||
const role = this.supportsReasoning() ? "developer" : "system";
|
||||
input.push({
|
||||
role,
|
||||
content: systemPrompt,
|
||||
});
|
||||
}
|
||||
|
||||
// Convert messages
|
||||
for (const msg of messages) {
|
||||
if (msg.role === "user") {
|
||||
input.push({
|
||||
role: "user",
|
||||
content: [{ type: "input_text", text: msg.content }],
|
||||
});
|
||||
} else if (msg.role === "assistant") {
|
||||
// Assistant messages - add both content and tool calls to output
|
||||
const output: ResponseInput = [];
|
||||
if (msg.thinkingSignature) {
|
||||
output.push(...JSON.parse(msg.thinkingSignature));
|
||||
}
|
||||
if (msg.toolCalls) {
|
||||
for (const toolCall of msg.toolCalls) {
|
||||
output.push({
|
||||
type: "function_call",
|
||||
id: toolCall.id.split("|")[1], // Extract original ID
|
||||
call_id: toolCall.id.split("|")[0], // Extract call session ID
|
||||
name: toolCall.name,
|
||||
arguments: JSON.stringify(toolCall.arguments),
|
||||
});
|
||||
}
|
||||
}
|
||||
if (msg.content) {
|
||||
output.push({
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
content: [{ type: "input_text", text: msg.content }],
|
||||
});
|
||||
}
|
||||
// Add all output items to input
|
||||
input.push(...output);
|
||||
} else if (msg.role === "toolResult") {
|
||||
// Tool results are sent as function_call_output
|
||||
input.push({
|
||||
type: "function_call_output",
|
||||
call_id: msg.toolCallId.split("|")[0], // Extract call session ID
|
||||
output: msg.content,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
private convertTools(tools: Tool[]): OpenAITool[] {
|
||||
return tools.map((tool) => ({
|
||||
type: "function",
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
strict: null,
|
||||
}));
|
||||
}
|
||||
|
||||
private mapStopReason(status: string | undefined): StopReason {
|
||||
switch (status) {
|
||||
case "completed":
|
||||
return "stop";
|
||||
case "incomplete":
|
||||
return "length";
|
||||
case "failed":
|
||||
case "cancelled":
|
||||
return "error";
|
||||
default:
|
||||
return "stop";
|
||||
}
|
||||
}
|
||||
|
||||
private supportsReasoning(): boolean {
|
||||
// TODO base on models.dev
|
||||
return (
|
||||
this.model.includes("o1") ||
|
||||
this.model.includes("o3") ||
|
||||
this.model.includes("gpt-5") ||
|
||||
this.model.includes("gpt-4o")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,13 @@
|
|||
export interface AI<T = any> {
|
||||
complete(request: Request, options?: T): Promise<AssistantMessage>;
|
||||
export interface LLMOptions {
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
onText?: (text: string) => void;
|
||||
onThinking?: (thinking: string) => void;
|
||||
signal?: AbortSignal;
|
||||
}
|
||||
|
||||
export interface LLM<T extends LLMOptions> {
|
||||
complete(request: Context, options?: T): Promise<AssistantMessage>;
|
||||
}
|
||||
|
||||
export interface ModelInfo {
|
||||
|
|
@ -62,15 +70,10 @@ export interface Tool {
|
|||
parameters: Record<string, any>; // JSON Schema
|
||||
}
|
||||
|
||||
export interface Request {
|
||||
export interface Context {
|
||||
systemPrompt?: string;
|
||||
messages: Message[];
|
||||
tools?: Tool[];
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
onText?: (text: string) => void;
|
||||
onThinking?: (thinking: string) => void;
|
||||
signal?: AbortSignal;
|
||||
}
|
||||
|
||||
export type Event =
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
import Anthropic from "@anthropic-ai/sdk";
|
||||
import { MessageCreateParamsBase } from "@anthropic-ai/sdk/resources/messages.mjs";
|
||||
import chalk from "chalk";
|
||||
import { AnthropicAI } from "../../src/providers/anthropic";
|
||||
import { Request, Message, Tool } from "../../src/types";
|
||||
|
||||
const anthropic = new Anthropic();
|
||||
import { readFileSync } from "fs";
|
||||
import { fileURLToPath } from "url";
|
||||
import { dirname, join } from "path";
|
||||
import { AnthropicLLM, AnthropicLLMOptions } from "../../src/providers/anthropic";
|
||||
import { Context, Tool } from "../../src/types";
|
||||
|
||||
// Define a simple calculator tool
|
||||
const tools: Tool[] = [
|
||||
|
|
@ -24,23 +23,27 @@ const tools: Tool[] = [
|
|||
}
|
||||
];
|
||||
|
||||
const ai = new AnthropicAI("claude-sonnet-4-0");
|
||||
const context: Request = {
|
||||
const options: AnthropicLLMOptions = {
|
||||
onText: (t) => process.stdout.write(t),
|
||||
onThinking: (t) => process.stdout.write(chalk.dim(t)),
|
||||
thinking: { enabled: true }
|
||||
};
|
||||
const ai = new AnthropicLLM("claude-sonnet-4-0", process.env.ANTHROPIC_OAUTH_TOKEN ?? process.env.ANTHROPIC_API_KEY);
|
||||
const context: Context = {
|
||||
systemPrompt: "You are a helpful assistant that can use tools to answer questions.",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Think about birds briefly. Then give me a list of 10 birds. Finally, calculate 42 * 17 + 123 and 453 + 434 in parallel using the calculator tool.",
|
||||
}
|
||||
],
|
||||
tools,
|
||||
onText: (t) => process.stdout.write(t),
|
||||
onThinking: (t) => process.stdout.write(chalk.dim(t))
|
||||
tools
|
||||
}
|
||||
|
||||
const options = {thinking: { enabled: true }};
|
||||
let msg = await ai.complete(context, options)
|
||||
context.messages.push(msg);
|
||||
console.log(JSON.stringify(msg, null, 2));
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
|
||||
for (const toolCall of msg.toolCalls || []) {
|
||||
if (toolCall.name === "calculate") {
|
||||
|
|
@ -56,7 +59,8 @@ for (const toolCall of msg.toolCalls || []) {
|
|||
}
|
||||
|
||||
msg = await ai.complete(context, options);
|
||||
console.log(JSON.stringify(msg, null, 2));
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
65
packages/ai/test/examples/openai-completions.ts
Normal file
65
packages/ai/test/examples/openai-completions.ts
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
import chalk from "chalk";
|
||||
import { Context, Tool } from "../../src/types";
|
||||
import { OpenAICompletionsLLM, OpenAICompletionsLLMOptions } from "../../src/providers/openai-completions";
|
||||
|
||||
// Define a simple calculator tool
|
||||
const tools: Tool[] = [
|
||||
{
|
||||
name: "calculate",
|
||||
description: "Perform a mathematical calculation",
|
||||
parameters: {
|
||||
type: "object" as const,
|
||||
properties: {
|
||||
expression: {
|
||||
type: "string",
|
||||
description: "The mathematical expression to evaluate"
|
||||
}
|
||||
},
|
||||
required: ["expression"]
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const options: OpenAICompletionsLLMOptions = {
|
||||
onText: (t) => process.stdout.write(t),
|
||||
onThinking: (t) => process.stdout.write(chalk.dim(t)),
|
||||
reasoningEffort: "medium",
|
||||
toolChoice: "auto"
|
||||
};
|
||||
const ai = new OpenAICompletionsLLM("gpt-5-mini");
|
||||
const context: Context = {
|
||||
systemPrompt: "You are a helpful assistant that can use tools to answer questions.",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Think about birds briefly. Then give me a list of 10 birds. Finally, calculate 42 * 17 + 123 and 453 + 434 in parallel using the calculator tool.",
|
||||
}
|
||||
],
|
||||
tools
|
||||
}
|
||||
|
||||
let msg = await ai.complete(context, options)
|
||||
context.messages.push(msg);
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
|
||||
for (const toolCall of msg.toolCalls || []) {
|
||||
if (toolCall.name === "calculate") {
|
||||
const expression = toolCall.arguments.expression;
|
||||
const result = eval(expression);
|
||||
context.messages.push({
|
||||
role: "toolResult",
|
||||
content: `The result of ${expression} is ${result}.`,
|
||||
toolCallId: toolCall.id,
|
||||
isError: false
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
msg = await ai.complete(context, options);
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
|
||||
|
||||
|
||||
|
||||
60
packages/ai/test/examples/openai-responses.ts
Normal file
60
packages/ai/test/examples/openai-responses.ts
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
import chalk from "chalk";
|
||||
import { OpenAIResponsesLLMOptions, OpenAIResponsesLLM } from "../../src/providers/openai-responses.js";
|
||||
import type { Context, Tool } from "../../src/types.js";
|
||||
|
||||
// Define a simple calculator tool
|
||||
const tools: Tool[] = [
|
||||
{
|
||||
name: "calculate",
|
||||
description: "Perform a mathematical calculation",
|
||||
parameters: {
|
||||
type: "object" as const,
|
||||
properties: {
|
||||
expression: {
|
||||
type: "string",
|
||||
description: "The mathematical expression to evaluate"
|
||||
}
|
||||
},
|
||||
required: ["expression"]
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const ai = new OpenAIResponsesLLM("gpt-5");
|
||||
const context: Context = {
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Think about birds briefly. Then give me a list of 10 birds. Finally, calculate 42 * 17 + 123 and 453 + 434 in parallel using the calculator tool.",
|
||||
}
|
||||
],
|
||||
tools,
|
||||
}
|
||||
|
||||
const options: OpenAIResponsesLLMOptions = {
|
||||
onText: (t) => process.stdout.write(t),
|
||||
onThinking: (t) => process.stdout.write(chalk.dim(t)),
|
||||
reasoningEffort: "low",
|
||||
reasoningSummary: "auto"
|
||||
};
|
||||
let msg = await ai.complete(context, options)
|
||||
context.messages.push(msg);
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
|
||||
for (const toolCall of msg.toolCalls || []) {
|
||||
if (toolCall.name === "calculate") {
|
||||
const expression = toolCall.arguments.expression;
|
||||
const result = eval(expression);
|
||||
context.messages.push({
|
||||
role: "toolResult",
|
||||
content: `The result of ${expression} is ${result}.`,
|
||||
toolCallId: toolCall.id,
|
||||
isError: false
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
msg = await ai.complete(context, options);
|
||||
console.log();
|
||||
console.log(chalk.yellow(JSON.stringify(msg, null, 2)));
|
||||
Loading…
Add table
Add a link
Reference in a new issue