mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 02:04:05 +00:00
feat(ai): Add start event emission to all providers
- Emit start event with model and provider info after creating stream - Add abort signal tests for all providers - Update README abort signal section to reflect non-throwing API - Fix model references in README examples
This commit is contained in:
parent
8d4edf6458
commit
a132b8140c
6 changed files with 167 additions and 20 deletions
|
|
@ -129,6 +129,8 @@ export class AnthropicLLM implements LLM<AnthropicLLMOptions> {
|
|||
},
|
||||
);
|
||||
|
||||
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
|
||||
|
||||
let blockType: "text" | "thinking" | "toolUse" | "other" = "other";
|
||||
let blockContent = "";
|
||||
let toolCall: (ToolCall & { partialJson: string }) | null = null;
|
||||
|
|
|
|||
|
|
@ -89,6 +89,14 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
|
|||
};
|
||||
}
|
||||
|
||||
// Abort signal
|
||||
if (options?.signal) {
|
||||
if (options.signal.aborted) {
|
||||
throw new Error("Request aborted");
|
||||
}
|
||||
config.abortSignal = options.signal;
|
||||
}
|
||||
|
||||
// Build the request parameters
|
||||
const params: GenerateContentParameters = {
|
||||
model: this.model.id,
|
||||
|
|
@ -98,6 +106,8 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
|
|||
|
||||
const stream = await this.client.models.generateContentStream(params);
|
||||
|
||||
options?.onEvent?.({ type: "start", model: this.model.id, provider: this.model.provider });
|
||||
|
||||
const blocks: AssistantMessage["content"] = [];
|
||||
let currentBlock: TextContent | ThinkingContent | null = null;
|
||||
let usage: Usage = {
|
||||
|
|
|
|||
|
|
@ -92,6 +92,8 @@ export class OpenAICompletionsLLM implements LLM<OpenAICompletionsLLMOptions> {
|
|||
signal: options?.signal,
|
||||
});
|
||||
|
||||
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
|
||||
|
||||
const blocks: AssistantMessage["content"] = [];
|
||||
let currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null;
|
||||
let usage: Usage = {
|
||||
|
|
|
|||
|
|
@ -85,6 +85,8 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
|
|||
signal: options?.signal,
|
||||
});
|
||||
|
||||
options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
|
||||
|
||||
const outputItems: (ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall)[] = []; // any for function_call items
|
||||
let currentTextAccum = ""; // For delta accumulation
|
||||
let currentThinkingAccum = ""; // For delta accumulation
|
||||
|
|
@ -184,9 +186,25 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
|
|||
} satisfies AssistantMessage;
|
||||
options?.onEvent?.({ type: "error", error: errorOutput.error || "Unknown error" });
|
||||
return errorOutput;
|
||||
} else if (event.type === "response.failed") {
|
||||
const errorOutput = {
|
||||
role: "assistant",
|
||||
content: [],
|
||||
provider: this.modelInfo.provider,
|
||||
model: this.modelInfo.id,
|
||||
usage,
|
||||
stopReason: "error",
|
||||
error: "Unknown error",
|
||||
} satisfies AssistantMessage;
|
||||
options?.onEvent?.({ type: "error", error: errorOutput.error || "Unknown error" });
|
||||
return errorOutput;
|
||||
}
|
||||
}
|
||||
|
||||
if (options?.signal?.aborted) {
|
||||
throw new Error("Request was aborted");
|
||||
}
|
||||
|
||||
// Convert output items to blocks
|
||||
const blocks: AssistantMessage["content"] = [];
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue