feat(ai): Implement Zod-based tool validation and improve Agent API

- Replace JSON Schema with Zod schemas for tool parameter definitions
- Add runtime validation for all tool calls at provider level
- Create shared validation module with detailed error formatting
- Update Agent API with comprehensive event system
- Add agent tests with calculator tool for multi-turn execution
- Add abort test to verify proper handling of aborted requests
- Update documentation with detailed event flow examples
- Rename generate.ts to stream.ts for clarity
This commit is contained in:
Mario Zechner 2025-09-09 14:58:54 +02:00
parent 594b0dac6c
commit 35fe8f21e9
24 changed files with 1069 additions and 221 deletions

View file

@ -4,32 +4,34 @@ import type {
MessageCreateParamsStreaming,
MessageParam,
} from "@anthropic-ai/sdk/resources/messages.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Message,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
ToolResultMessage,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface AnthropicOptions extends GenerateOptions {
export interface AnthropicOptions extends StreamOptions {
thinkingEnabled?: boolean;
thinkingBudgetTokens?: number;
toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string };
}
export const streamAnthropic: GenerateFunction<"anthropic-messages"> = (
export const streamAnthropic: StreamFunction<"anthropic-messages"> = (
model: Model<"anthropic-messages">,
context: Context,
options?: AnthropicOptions,
@ -159,6 +161,15 @@ export const streamAnthropic: GenerateFunction<"anthropic-messages"> = (
});
} else if (block.type === "toolCall") {
block.arguments = JSON.parse(block.partialJson);
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === block.name);
if (tool) {
block.arguments = validateToolArguments(tool, block);
}
}
delete (block as any).partialJson;
stream.push({
type: "toolcall_end",
@ -390,7 +401,7 @@ function convertMessages(messages: Message[], model: Model<"anthropic-messages">
content: blocks,
});
} else if (msg.role === "toolResult") {
// Collect all consecutive toolResult messages
// Collect all consecutive toolResult messages, needed for z.ai Anthropic endpoint
const toolResults: ContentBlockParam[] = [];
// Add the current tool result
@ -430,15 +441,19 @@ function convertMessages(messages: Message[], model: Model<"anthropic-messages">
function convertTools(tools: Tool[]): Anthropic.Messages.Tool[] {
if (!tools) return [];
return tools.map((tool) => ({
name: tool.name,
description: tool.description,
input_schema: {
type: "object" as const,
properties: tool.parameters.properties || {},
required: tool.parameters.required || [],
},
}));
return tools.map((tool) => {
const jsonSchema = zodToJsonSchema(tool.parameters, { $refStrategy: "none" }) as any;
return {
name: tool.name,
description: tool.description,
input_schema: {
type: "object" as const,
properties: jsonSchema.properties || {},
required: jsonSchema.required || [],
},
};
});
}
function mapStopReason(reason: Anthropic.Messages.StopReason): StopReason {

View file

@ -7,24 +7,26 @@ import {
GoogleGenAI,
type Part,
} from "@google/genai";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface GoogleOptions extends GenerateOptions {
export interface GoogleOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
thinking?: {
enabled: boolean;
@ -35,7 +37,7 @@ export interface GoogleOptions extends GenerateOptions {
// Counter for generating unique tool call IDs
let toolCallCounter = 0;
export const streamGoogle: GenerateFunction<"google-generative-ai"> = (
export const streamGoogle: StreamFunction<"google-generative-ai"> = (
model: Model<"google-generative-ai">,
context: Context,
options?: GoogleOptions,
@ -159,6 +161,15 @@ export const streamGoogle: GenerateFunction<"google-generative-ai"> = (
name: part.functionCall.name || "",
arguments: part.functionCall.args as Record<string, any>,
};
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === toolCall.name);
if (tool) {
toolCall.arguments = validateToolArguments(tool, toolCall);
}
}
output.content.push(toolCall);
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
stream.push({
@ -380,7 +391,7 @@ function convertTools(tools: Tool[]): any[] {
functionDeclarations: tools.map((tool) => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
})),
},
];

View file

@ -7,28 +7,30 @@ import type {
ChatCompletionContentPartText,
ChatCompletionMessageParam,
} from "openai/resources/chat/completions.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
export interface OpenAICompletionsOptions extends GenerateOptions {
export interface OpenAICompletionsOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } };
reasoningEffort?: "minimal" | "low" | "medium" | "high";
}
export const streamOpenAICompletions: GenerateFunction<"openai-completions"> = (
export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
model: Model<"openai-completions">,
context: Context,
options?: OpenAICompletionsOptions,
@ -79,6 +81,15 @@ export const streamOpenAICompletions: GenerateFunction<"openai-completions"> = (
});
} else if (block.type === "toolCall") {
block.arguments = JSON.parse(block.partialArgs || "{}");
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === block.name);
if (tool) {
block.arguments = validateToolArguments(tool, block);
}
}
delete block.partialArgs;
stream.push({
type: "toolcall_end",
@ -381,7 +392,7 @@ function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
},
}));
}

View file

@ -10,25 +10,27 @@ import type {
ResponseOutputMessage,
ResponseReasoningItem,
} from "openai/resources/responses/responses.js";
import { zodToJsonSchema } from "zod-to-json-schema";
import { AssistantMessageEventStream } from "../event-stream.js";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
GenerateFunction,
GenerateOptions,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { validateToolArguments } from "../validation.js";
import { transformMessages } from "./transorm-messages.js";
// OpenAI Responses-specific options
export interface OpenAIResponsesOptions extends GenerateOptions {
export interface OpenAIResponsesOptions extends StreamOptions {
reasoningEffort?: "minimal" | "low" | "medium" | "high";
reasoningSummary?: "auto" | "detailed" | "concise" | null;
}
@ -36,7 +38,7 @@ export interface OpenAIResponsesOptions extends GenerateOptions {
/**
* Generate function for OpenAI Responses API
*/
export const streamOpenAIResponses: GenerateFunction<"openai-responses"> = (
export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
model: Model<"openai-responses">,
context: Context,
options?: OpenAIResponsesOptions,
@ -238,6 +240,15 @@ export const streamOpenAIResponses: GenerateFunction<"openai-responses"> = (
name: item.name,
arguments: JSON.parse(item.arguments),
};
// Validate tool arguments if tool definition is available
if (context.tools) {
const tool = context.tools.find((t) => t.name === toolCall.name);
if (tool) {
toolCall.arguments = validateToolArguments(tool, toolCall);
}
}
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
}
}
@ -451,7 +462,7 @@ function convertTools(tools: Tool[]): OpenAITool[] {
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.parameters,
parameters: zodToJsonSchema(tool.parameters, { $refStrategy: "none" }),
strict: null,
}));
}