feat(ai): Implement Zod-based tool validation and improve Agent API

- Replace JSON Schema with Zod schemas for tool parameter definitions
- Add runtime validation for all tool calls at provider level
- Create shared validation module with detailed error formatting
- Update Agent API with comprehensive event system
- Add agent tests with calculator tool for multi-turn execution
- Add abort test to verify proper handling of aborted requests
- Update documentation with detailed event flow examples
- Rename generate.ts to stream.ts for clarity
This commit is contained in:
Mario Zechner 2025-09-09 14:58:54 +02:00
parent 594b0dac6c
commit 35fe8f21e9
24 changed files with 1069 additions and 221 deletions

View file

@ -1,97 +1,68 @@
import { EventStream } from "../event-stream";
import { streamSimple } from "../generate.js";
import type {
AssistantMessage,
Context,
Message,
Model,
SimpleGenerateOptions,
ToolResultMessage,
UserMessage,
} from "../types.js";
import type { AgentContext, AgentTool, AgentToolResult } from "./types";
// Event types
export type AgentEvent =
| { type: "message_start"; message: Message }
| { type: "message_update"; message: AssistantMessage }
| { type: "message_complete"; message: Message }
| { type: "tool_execution_start"; toolCallId: string; toolName: string; args: any }
| {
type: "tool_execution_complete";
toolCallId: string;
toolName: string;
result: AgentToolResult<any> | string;
isError: boolean;
}
| { type: "turn_complete"; messages: AgentContext["messages"] };
// Configuration for prompt execution
export interface PromptConfig {
model: Model<any>;
apiKey: string;
enableThinking?: boolean;
preprocessor?: (messages: AgentContext["messages"], abortSignal?: AbortSignal) => Promise<AgentContext["messages"]>;
}
import { streamSimple } from "../stream.js";
import type { AssistantMessage, Context, Message, ToolResultMessage, UserMessage } from "../types.js";
import { validateToolArguments } from "../validation.js";
import type { AgentContext, AgentEvent, AgentTool, AgentToolResult, PromptConfig } from "./types";
// Main prompt function - returns a stream of events
export function prompt(
prompt: UserMessage,
context: AgentContext,
config: PromptConfig,
prompt: UserMessage,
signal?: AbortSignal,
): EventStream<AgentEvent, AgentContext["messages"]> {
const stream = new EventStream<AgentEvent, AgentContext["messages"]>(
(event) => event.type === "turn_complete",
(event) => (event.type === "turn_complete" ? event.messages : []),
(event) => event.type === "agent_end",
(event) => (event.type === "agent_end" ? event.messages : []),
);
// Run the prompt async
(async () => {
try {
// Track new messages generated during this prompt
const newMessages: AgentContext["messages"] = [];
// Track new messages generated during this prompt
const newMessages: AgentContext["messages"] = [];
// Create user message for the prompt
const messages = [...context.messages, prompt];
newMessages.push(prompt);
// Create user message
const messages = [...context.messages, prompt];
newMessages.push(prompt);
stream.push({ type: "agent_start" });
stream.push({ type: "turn_start" });
stream.push({ type: "message_start", message: prompt });
stream.push({ type: "message_end", message: prompt });
stream.push({ type: "message_start", message: prompt });
stream.push({ type: "message_complete", message: prompt });
// Update context with new messages
const currentContext: AgentContext = {
...context,
messages,
};
// Update context with new messages
const currentContext: AgentContext = {
...context,
messages,
};
// Keep looping while we have tool calls
let hasMoreToolCalls = true;
while (hasMoreToolCalls) {
// Stream assistant response
const assistantMessage = await streamAssistantResponse(currentContext, config, signal, stream);
newMessages.push(assistantMessage);
// Check for tool calls
const toolCalls = assistantMessage.content.filter((c) => c.type === "toolCall");
hasMoreToolCalls = toolCalls.length > 0;
if (hasMoreToolCalls) {
// Execute tool calls
const toolResults = await executeToolCalls(currentContext.tools, assistantMessage, signal, stream);
newMessages.push(...toolResults);
// Add tool results to context
currentContext.messages = [...currentContext.messages, ...toolResults];
}
// Keep looping while we have tool calls
let hasMoreToolCalls = true;
let firstTurn = true;
while (hasMoreToolCalls) {
if (!firstTurn) {
stream.push({ type: "turn_start" });
} else {
firstTurn = false;
}
// Stream assistant response
const assistantMessage = await streamAssistantResponse(currentContext, config, signal, stream);
newMessages.push(assistantMessage);
stream.push({ type: "turn_complete", messages: newMessages });
} catch (error) {
// End stream on error
stream.end([]);
throw error;
// Check for tool calls
const toolCalls = assistantMessage.content.filter((c) => c.type === "toolCall");
hasMoreToolCalls = toolCalls.length > 0;
const toolResults: ToolResultMessage[] = [];
if (hasMoreToolCalls) {
// Execute tool calls
toolResults.push(...(await executeToolCalls(currentContext.tools, assistantMessage, signal, stream)));
currentContext.messages.push(...toolResults);
newMessages.push(...toolResults);
}
stream.push({ type: "turn_end", assistantMessage, toolResults: toolResults });
}
stream.push({ type: "agent_end", messages: newMessages });
stream.end(newMessages);
})();
return stream;
@ -122,16 +93,7 @@ async function streamAssistantResponse(
tools: context.tools, // AgentTool extends Tool, so this works
};
const options: SimpleGenerateOptions = {
apiKey: config.apiKey,
signal,
};
if (config.model.reasoning && config.enableThinking) {
options.reasoning = "medium";
}
const response = await streamSimple(config.model, processedContext, options);
const response = await streamSimple(config.model, processedContext, { ...config, signal });
let partialMessage: AssistantMessage | null = null;
let addedPartial = false;
@ -147,14 +109,17 @@ async function streamAssistantResponse(
case "text_start":
case "text_delta":
case "text_end":
case "thinking_start":
case "thinking_delta":
case "thinking_end":
case "toolcall_start":
case "toolcall_delta":
case "toolcall_end":
if (partialMessage) {
partialMessage = event.partial;
context.messages[context.messages.length - 1] = partialMessage;
stream.push({ type: "message_update", message: { ...partialMessage } });
stream.push({ type: "message_update", assistantMessageEvent: event, message: { ...partialMessage } });
}
break;
@ -166,7 +131,7 @@ async function streamAssistantResponse(
} else {
context.messages.push(finalMessage);
}
stream.push({ type: "message_complete", message: finalMessage });
stream.push({ type: "message_end", message: finalMessage });
return finalMessage;
}
}
@ -176,7 +141,7 @@ async function streamAssistantResponse(
}
async function executeToolCalls<T>(
tools: AgentTool<T>[] | undefined,
tools: AgentTool<any, T>[] | undefined,
assistantMessage: AssistantMessage,
signal: AbortSignal | undefined,
stream: EventStream<AgentEvent, Message[]>,
@ -199,14 +164,19 @@ async function executeToolCalls<T>(
try {
if (!tool) throw new Error(`Tool ${toolCall.name} not found`);
resultOrError = await tool.execute(toolCall.arguments, toolCall.id, signal);
// Validate arguments using shared validation function
const validatedArgs = validateToolArguments(tool, toolCall);
// Execute with validated, typed arguments
resultOrError = await tool.execute(toolCall.id, validatedArgs, signal);
} catch (e) {
resultOrError = `Error: ${e instanceof Error ? e.message : String(e)}`;
resultOrError = e instanceof Error ? e.message : String(e);
isError = true;
}
stream.push({
type: "tool_execution_complete",
type: "tool_execution_end",
toolCallId: toolCall.id,
toolName: toolCall.name,
result: resultOrError,
@ -224,7 +194,7 @@ async function executeToolCalls<T>(
results.push(toolResultMessage);
stream.push({ type: "message_start", message: toolResultMessage });
stream.push({ type: "message_complete", message: toolResultMessage });
stream.push({ type: "message_end", message: toolResultMessage });
}
return results;

View file

@ -1,3 +1,3 @@
export { type AgentEvent, type PromptConfig, prompt } from "./agent";
export { prompt } from "./agent";
export * from "./tools";
export type { AgentContext, AgentTool } from "./types";
export type { AgentContext, AgentEvent, AgentTool, PromptConfig } from "./types";

View file

@ -1,3 +1,4 @@
import { z } from "zod";
import type { AgentTool } from "../../agent";
export interface CalculateResult {
@ -14,21 +15,16 @@ export function calculate(expression: string): CalculateResult {
}
}
export const calculateTool: AgentTool<undefined> = {
const calculateSchema = z.object({
expression: z.string().describe("The mathematical expression to evaluate"),
});
export const calculateTool: AgentTool<typeof calculateSchema, undefined> = {
label: "Calculator",
name: "calculate",
description: "Evaluate mathematical expressions",
parameters: {
type: "object",
properties: {
expression: {
type: "string",
description: "The mathematical expression to evaluate",
},
},
required: ["expression"],
},
execute: async (args: { expression: string }) => {
parameters: calculateSchema,
execute: async (_toolCallId, args) => {
return calculate(args.expression);
},
};

View file

@ -1,3 +1,4 @@
import { z } from "zod";
import type { AgentTool } from "../../agent";
import type { AgentToolResult } from "../types";
@ -25,20 +26,16 @@ export async function getCurrentTime(timezone?: string): Promise<GetCurrentTimeR
};
}
export const getCurrentTimeTool: AgentTool<{ utcTimestamp: number }> = {
const getCurrentTimeSchema = z.object({
timezone: z.string().optional().describe("Optional timezone (e.g., 'America/New_York', 'Europe/London')"),
});
export const getCurrentTimeTool: AgentTool<typeof getCurrentTimeSchema, { utcTimestamp: number }> = {
label: "Current Time",
name: "get_current_time",
description: "Get the current date and time",
parameters: {
type: "object",
properties: {
timezone: {
type: "string",
description: "Optional timezone (e.g., 'America/New_York', 'Europe/London')",
},
},
},
execute: async (args: { timezone?: string }) => {
parameters: getCurrentTimeSchema,
execute: async (_toolCallId, args) => {
return getCurrentTime(args.timezone);
},
};

View file

@ -1,4 +1,13 @@
import type { Message, Tool } from "../types.js";
import type { ZodSchema, z } from "zod";
import type {
AssistantMessage,
AssistantMessageEvent,
Message,
Model,
SimpleStreamOptions,
Tool,
ToolResultMessage,
} from "../types.js";
export interface AgentToolResult<T> {
// Output of the tool to be given to the LLM in ToolResultMessage.content
@ -8,10 +17,14 @@ export interface AgentToolResult<T> {
}
// AgentTool extends Tool but adds the execute function
export interface AgentTool<TDetails> extends Tool {
export interface AgentTool<TParameters extends ZodSchema = ZodSchema, TDetails = any> extends Tool<TParameters> {
// A human-readable label for the tool to be displayed in UI
label: string;
execute: (params: any, toolCallId: string, signal?: AbortSignal) => Promise<AgentToolResult<TDetails>>;
execute: (
toolCallId: string,
params: z.infer<TParameters>,
signal?: AbortSignal,
) => Promise<AgentToolResult<TDetails>>;
}
// AgentContext is like Context but uses AgentTool
@ -20,3 +33,37 @@ export interface AgentContext {
messages: Message[];
tools?: AgentTool<any>[];
}
// Event types
export type AgentEvent =
// Emitted when the agent starts. An agent can emit multiple turns
| { type: "agent_start" }
// Emitted when a turn starts. A turn can emit an optional user message (initial prompt), an assistant message (response) and multiple tool result messages
| { type: "turn_start" }
// Emitted when a user, assistant or tool result message starts
| { type: "message_start"; message: Message }
// Emitted when an asssitant messages is updated due to streaming
| { type: "message_update"; assistantMessageEvent: AssistantMessageEvent; message: AssistantMessage }
// Emitted when a user, assistant or tool result message is complete
| { type: "message_end"; message: Message }
// Emitted when a tool execution starts
| { type: "tool_execution_start"; toolCallId: string; toolName: string; args: any }
// Emitted when a tool execution completes
| {
type: "tool_execution_end";
toolCallId: string;
toolName: string;
result: AgentToolResult<any> | string;
isError: boolean;
}
// Emitted when a full turn completes
| { type: "turn_end"; assistantMessage: AssistantMessage; toolResults: ToolResultMessage[] }
// Emitted when the agent has completed all its turns. All messages from every turn are
// contained in messages, which can be appended to the context
| { type: "agent_end"; messages: AgentContext["messages"] };
// Configuration for prompt execution
export interface PromptConfig extends SimpleStreamOptions {
model: Model<any>;
preprocessor?: (messages: AgentContext["messages"], abortSignal?: AbortSignal) => Promise<AgentContext["messages"]>;
}

View file

@ -1,8 +1,9 @@
export { z } from "zod";
export * from "./agent/index.js";
export * from "./generate.js";
export * from "./models.js";
export * from "./providers/anthropic.js";
export * from "./providers/google.js";
export * from "./providers/openai-completions.js";
export * from "./providers/openai-responses.js";
export * from "./stream.js";
export * from "./types.js";

View file

@ -1413,6 +1413,23 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
},
openrouter: {
"nvidia/nemotron-nano-9b-v2": {
id: "nvidia/nemotron-nano-9b-v2",
name: "NVIDIA: Nemotron Nano 9B V2",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/sonoma-dusk-alpha": {
id: "openrouter/sonoma-dusk-alpha",
name: "Sonoma Dusk Alpha",

View file

@ -4,32 +4,34 @@ import type {
MessageCreateParamsStreaming,
MessageParam,
} from "@anthropic-ai/sdk/resources/messages.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Message,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
ToolResultMessage,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface AnthropicOptions extends GenerateOptions {
export interface AnthropicOptions extends StreamOptions {
thinkingEnabled?: boolean;
thinkingBudgetTokens?: number;
toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string };
}
export const streamAnthropic: GenerateFunction<"anthropic-messages"> = (
export const streamAnthropic: StreamFunction<"anthropic-messages"> = (
model: Model<"anthropic-messages">,
context: Context,
options?: AnthropicOptions,
@ -159,6 +161,15 @@ export const streamAnthropic: GenerateFunction<"anthropic-messages"> = (
});
} else if (block.type === "toolCall") {
block.arguments = JSON.parse(block.partialJson);
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === block.name);
if (tool) {
block.arguments = validateToolArguments(tool, block);
}
}
delete (block as any).partialJson;
stream.push({
type: "toolcall_end",
@ -390,7 +401,7 @@ function convertMessages(messages: Message[], model: Model<"anthropic-messages">
content: blocks,
});
} else if (msg.role === "toolResult") {
// Collect all consecutive toolResult messages
// Collect all consecutive toolResult messages, needed for z.ai Anthropic endpoint
const toolResults: ContentBlockParam[] = [];
// Add the current tool result
@ -430,15 +441,19 @@ function convertMessages(messages: Message[], model: Model<"anthropic-messages">
function convertTools(tools: Tool[]): Anthropic.Messages.Tool[] {
if (!tools) return [];
return tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: {
type: "object" as const,
properties: tool.parameters.properties || {},
required: tool.parameters.required || [],
},
}));
return tools.map((tool) => {
const jsonSchema = zodToJsonSchema(tool.parameters, { $refStrategy: "none" }) as any;
return {
name: tool.name,
description: tool.description,
input_schema: {
type: "object" as const,
properties: jsonSchema.properties || {},
required: jsonSchema.required || [],
},
};
});
}
function mapStopReason(reason: Anthropic.Messages.StopReason): StopReason {

View file

@ -7,24 +7,26 @@ import {
GoogleGenAI,
type Part,
} from "@google/genai";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface GoogleOptions extends GenerateOptions {
export interface GoogleOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
thinking?: {
enabled: boolean;
@ -35,7 +37,7 @@ export interface GoogleOptions extends GenerateOptions {
// Counter for generating unique tool call IDs
let toolCallCounter = 0;
export const streamGoogle: GenerateFunction<"google-generative-ai"> = (
export const streamGoogle: StreamFunction<"google-generative-ai"> = (
model: Model<"google-generative-ai">,
context: Context,
options?: GoogleOptions,
@ -159,6 +161,15 @@ export const streamGoogle: GenerateFunction<"google-generative-ai"> = (
name: part.functionCall.name || "",
arguments: part.functionCall.args as Record<string, any>,
};
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === toolCall.name);
if (tool) {
toolCall.arguments = validateToolArguments(tool, toolCall);
}
}
output.content.push(toolCall);
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
stream.push({
@ -380,7 +391,7 @@ function convertTools(tools: Tool[]): any[] {
functionDeclarations: tools.map((tool) => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
})),
},
];

View file

@ -7,28 +7,30 @@ import type {
ChatCompletionContentPartText,
ChatCompletionMessageParam,
} from "openai/resources/chat/completions.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface OpenAICompletionsOptions extends GenerateOptions {
export interface OpenAICompletionsOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } };
reasoningEffort?: "minimal" | "low" | "medium" | "high";
}
export const streamOpenAICompletions: GenerateFunction<"openai-completions"> = (
export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
model: Model<"openai-completions">,
context: Context,
options?: OpenAICompletionsOptions,
@ -79,6 +81,15 @@ export const streamOpenAICompletions: GenerateFunction<"openai-completions"> = (
});
} else if (block.type === "toolCall") {
block.arguments = JSON.parse(block.partialArgs || "{}");
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === block.name);
if (tool) {
block.arguments = validateToolArguments(tool, block);
}
}
delete block.partialArgs;
stream.push({
type: "toolcall_end",
@ -381,7 +392,7 @@ function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
},
}));
}

View file

@ -10,25 +10,27 @@ import type {
ResponseOutputMessage,
ResponseReasoningItem,
} from "openai/resources/responses/responses.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
// OpenAI Responses-specific options
export interface OpenAIResponsesOptions extends GenerateOptions {
export interface OpenAIResponsesOptions extends StreamOptions {
reasoningEffort?: "minimal" | "low" | "medium" | "high";
reasoningSummary?: "auto" | "detailed" | "concise" | null;
}
@ -36,7 +38,7 @@ export interface OpenAIResponsesOptions extends GenerateOptions {
/**
* Generate function for OpenAI Responses API
*/
export const streamOpenAIResponses: GenerateFunction<"openai-responses"> = (
export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
model: Model<"openai-responses">,
context: Context,
options?: OpenAIResponsesOptions,
@ -238,6 +240,15 @@ export const streamOpenAIResponses: GenerateFunction<"openai-responses"> = (
name: item.name,
arguments: JSON.parse(item.arguments),
};
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === toolCall.name);
if (tool) {
toolCall.arguments = validateToolArguments(tool, toolCall);
}
}
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
}
}
@ -451,7 +462,7 @@ function convertTools(tools: Tool[]): OpenAITool[] {
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
strict: null,
}));
}

View file

@ -11,7 +11,7 @@ import type {
Model,
OptionsForApi,
ReasoningEffort,
SimpleGenerateOptions,
SimpleStreamOptions,
} from "./types.js";
const apiKeys: Map<string, string> = new Map();
@ -90,7 +90,7 @@ export async function complete<TApi extends Api>(
export function streamSimple<TApi extends Api>(
model: Model<TApi>,
context: Context,
options?: SimpleGenerateOptions,
options?: SimpleStreamOptions,
): AssistantMessageEventStream {
const apiKey = options?.apiKey || getApiKey(model.provider);
if (!apiKey) {
@ -104,7 +104,7 @@ export function streamSimple<TApi extends Api>(
export async function completeSimple<TApi extends Api>(
model: Model<TApi>,
context: Context,
options?: SimpleGenerateOptions,
options?: SimpleStreamOptions,
): Promise<AssistantMessage> {
const s = streamSimple(model, context, options);
return s.result();
@ -112,7 +112,7 @@ export async function completeSimple<TApi extends Api>(
function mapOptionsForApi<TApi extends Api>(
model: Model<TApi>,
options?: SimpleGenerateOptions,
options?: SimpleStreamOptions,
apiKey?: string,
): OptionsForApi<TApi> {
const base = {

View file

@ -16,11 +16,11 @@ export interface ApiOptionsMap {
}
// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys
type _CheckExhaustive = ApiOptionsMap extends Record<Api, GenerateOptions>
? Record<Api, GenerateOptions> extends ApiOptionsMap
type _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>
? Record<Api, StreamOptions> extends ApiOptionsMap
? true
: ["ApiOptionsMap is missing some KnownApi values", Exclude<Api, keyof ApiOptionsMap>]
: ["ApiOptionsMap doesn't extend Record<KnownApi, GenerateOptions>"];
: ["ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>"];
const _exhaustive: _CheckExhaustive = true;
// Helper type to get options for a specific API
@ -32,20 +32,20 @@ export type Provider = KnownProvider | string;
export type ReasoningEffort = "minimal" | "low" | "medium" | "high";
// Base options all providers share
export interface GenerateOptions {
export interface StreamOptions {
temperature?: number;
maxTokens?: number;
signal?: AbortSignal;
apiKey?: string;
}
// Unified options with reasoning (what public generate() accepts)
export interface SimpleGenerateOptions extends GenerateOptions {
// Unified options with reasoning passed to streamSimple() and completeSimple()
export interface SimpleStreamOptions extends StreamOptions {
reasoning?: ReasoningEffort;
}
// Generic GenerateFunction with typed options
export type GenerateFunction<TApi extends Api> = (
// Generic StreamFunction with typed options
export type StreamFunction<TApi extends Api> = (
model: Model<TApi>,
context: Context,
options: OptionsForApi<TApi>,
@ -119,10 +119,12 @@ export interface ToolResultMessage<TDetails = any> {
export type Message = UserMessage | AssistantMessage | ToolResultMessage;
export interface Tool {
import type { ZodSchema } from "zod";
export interface Tool<TParameters extends ZodSchema = ZodSchema> {
name: string;
description: string;
parameters: Record<string, any>; // JSON Schema
parameters: TParameters;
}
export interface Context {

View file

@ -0,0 +1,32 @@
import { z } from "zod";
import type { Tool, ToolCall } from "./types.js";
/**
* Validates tool call arguments against the tool's Zod schema
* @param tool The tool definition with Zod schema
* @param toolCall The tool call from the LLM
* @returns The validated arguments
* @throws ZodError with formatted message if validation fails
*/
export function validateToolArguments(tool: Tool, toolCall: ToolCall): any {
try {
// Validate arguments with Zod schema
return tool.parameters.parse(toolCall.arguments);
} catch (e) {
if (e instanceof z.ZodError) {
// Format validation errors nicely
const errors = e.issues
.map((err) => {
const path = err.path.length > 0 ? err.path.join(".") : "root";
return ` - ${path}: ${err.message}`;
})
.join("\n");
const errorMessage = `Validation failed for tool "${toolCall.name}":\n${errors}\n\nReceived arguments:\n${JSON.stringify(toolCall.arguments, null, 2)}`;
// Throw a new error with the formatted message
throw new Error(errorMessage);
}
throw e;
}
}