feat(ai): Add start event emission to all providers

- Emit start event with model and provider info after creating stream
- Add abort signal tests for all providers
- Update README abort signal section to reflect non-throwing API
- Fix model references in README examples
This commit is contained in:
Mario Zechner 2025-08-31 23:09:14 +02:00
parent 8d4edf6458
commit a132b8140c
6 changed files with 167 additions and 20 deletions

View file

@ -129,6 +129,8 @@ export class AnthropicLLM implements LLM<AnthropicLLMOptions> {
},
);
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
let blockType: "text" | "thinking" | "toolUse" | "other" = "other";
let blockContent = "";
let toolCall: (ToolCall & { partialJson: string }) | null = null;

View file

@ -89,6 +89,14 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
};
}
// Abort signal
if (options?.signal) {
if (options.signal.aborted) {
throw new Error("Request aborted");
}
config.abortSignal = options.signal;
}
// Build the request parameters
const params: GenerateContentParameters = {
model: this.model.id,
@ -98,6 +106,8 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
const stream = await this.client.models.generateContentStream(params);
options?.onEvent?.({ type: "start", model: this.model.id, provider: this.model.provider });
const blocks: AssistantMessage["content"] = [];
let currentBlock: TextContent | ThinkingContent | null = null;
let usage: Usage = {

View file

@ -92,6 +92,8 @@ export class OpenAICompletionsLLM implements LLM<OpenAICompletionsLLMOptions> {
signal: options?.signal,
});
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
const blocks: AssistantMessage["content"] = [];
let currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;
let usage: Usage = {

View file

@ -85,6 +85,8 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
signal: options?.signal,
});
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
const outputItems: (ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall)[] = []; // any for function_call items
let currentTextAccum = ""; // For delta accumulation
let currentThinkingAccum = ""; // For delta accumulation
@ -184,9 +186,25 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
} satisfies AssistantMessage;
options?.onEvent?.({ type: "error", error: errorOutput.error || "Unknown error" });
return errorOutput;
} else if (event.type === "response.failed") {
const errorOutput = {
role: "assistant",
content: [],
provider: this.modelInfo.provider,
model: this.modelInfo.id,
usage,
stopReason: "error",
error: "Unknown error",
} satisfies AssistantMessage;
options?.onEvent?.({ type: "error", error: errorOutput.error || "Unknown error" });
return errorOutput;
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
// Convert output items to blocks
const blocks: AssistantMessage["content"] = [];