refactor(ai): register api providers

This commit is contained in:
Mario Zechner 2026-01-24 22:42:04 +01:00
parent 3256d3c083
commit c725135a76
24 changed files with 897 additions and 629 deletions

View file

@ -0,0 +1,98 @@
import type {
Api,
AssistantMessageEventStream,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
} from "./types.js";
export type ApiStreamFunction = (
model: Model<Api>,
context: Context,
options?: StreamOptions,
) => AssistantMessageEventStream;
export type ApiStreamSimpleFunction = (
model: Model<Api>,
context: Context,
options?: SimpleStreamOptions,
) => AssistantMessageEventStream;
export interface ApiProvider<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> {
api: TApi;
stream: StreamFunction<TApi, TOptions>;
streamSimple: StreamFunction<TApi, SimpleStreamOptions>;
}
interface ApiProviderInternal {
api: Api;
stream: ApiStreamFunction;
streamSimple: ApiStreamSimpleFunction;
}
type RegisteredApiProvider = {
provider: ApiProviderInternal;
sourceId?: string;
};
const apiProviderRegistry = new Map<string, RegisteredApiProvider>();
function wrapStream<TApi extends Api, TOptions extends StreamOptions>(
api: TApi,
stream: StreamFunction<TApi, TOptions>,
): ApiStreamFunction {
return (model, context, options) => {
if (model.api !== api) {
throw new Error(`Mismatched api: ${model.api} expected ${api}`);
}
return stream(model as Model<TApi>, context, options as TOptions);
};
}
function wrapStreamSimple<TApi extends Api>(
api: TApi,
streamSimple: StreamFunction<TApi, SimpleStreamOptions>,
): ApiStreamSimpleFunction {
return (model, context, options) => {
if (model.api !== api) {
throw new Error(`Mismatched api: ${model.api} expected ${api}`);
}
return streamSimple(model as Model<TApi>, context, options);
};
}
export function registerApiProvider<TApi extends Api, TOptions extends StreamOptions>(
provider: ApiProvider<TApi, TOptions>,
sourceId?: string,
): void {
apiProviderRegistry.set(provider.api, {
provider: {
api: provider.api,
stream: wrapStream(provider.api, provider.stream),
streamSimple: wrapStreamSimple(provider.api, provider.streamSimple),
},
sourceId,
});
}
export function getApiProvider(api: Api): ApiProviderInternal | undefined {
return apiProviderRegistry.get(api)?.provider;
}
export function getApiProviders(): ApiProviderInternal[] {
return Array.from(apiProviderRegistry.values(), (entry) => entry.provider);
}
export function unregisterApiProviders(sourceId: string): void {
for (const [api, entry] of apiProviderRegistry.entries()) {
if (entry.sourceId === sourceId) {
apiProviderRegistry.delete(api);
}
}
}
export function clearApiProviders(): void {
apiProviderRegistry.clear();
}

View file

@ -0,0 +1,113 @@
// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)
let _existsSync: typeof import("node:fs").existsSync | null = null;
let _homedir: typeof import("node:os").homedir | null = null;
let _join: typeof import("node:path").join | null = null;
// Eagerly load in Node.js/Bun environment only
if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
import("node:fs").then((m) => {
_existsSync = m.existsSync;
});
import("node:os").then((m) => {
_homedir = m.homedir;
});
import("node:path").then((m) => {
_join = m.join;
});
}
import type { KnownProvider } from "./types.js";
let cachedVertexAdcCredentialsExists: boolean | null = null;
function hasVertexAdcCredentials(): boolean {
if (cachedVertexAdcCredentialsExists === null) {
// In browser or if node modules not loaded yet, return false
if (!_existsSync || !_homedir || !_join) {
cachedVertexAdcCredentialsExists = false;
return false;
}
// Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way)
const gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS;
if (gacPath) {
cachedVertexAdcCredentialsExists = _existsSync(gacPath);
} else {
// Fall back to default ADC path (lazy evaluation)
cachedVertexAdcCredentialsExists = _existsSync(
_join(_homedir(), ".config", "gcloud", "application_default_credentials.json"),
);
}
}
return cachedVertexAdcCredentialsExists;
}
/**
* Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.
*
* Will not return API keys for providers that require OAuth tokens.
*/
export function getEnvApiKey(provider: KnownProvider): string | undefined;
export function getEnvApiKey(provider: string): string | undefined;
export function getEnvApiKey(provider: any): string | undefined {
// Fall back to environment variables
if (provider === "github-copilot") {
return process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN;
}
// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY
if (provider === "anthropic") {
return process.env.ANTHROPIC_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY;
}
// Vertex AI uses Application Default Credentials, not API keys.
// Auth is configured via `gcloud auth application-default login`.
if (provider === "google-vertex") {
const hasCredentials = hasVertexAdcCredentials();
const hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT);
const hasLocation = !!process.env.GOOGLE_CLOUD_LOCATION;
if (hasCredentials && hasProject && hasLocation) {
return "<authenticated>";
}
}
if (provider === "amazon-bedrock") {
// Amazon Bedrock supports multiple credential sources:
// 1. AWS_PROFILE - named profile from ~/.aws/credentials
// 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys
// 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock API keys (bearer token)
// 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles
// 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI)
// 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts)
if (
process.env.AWS_PROFILE ||
(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) ||
process.env.AWS_BEARER_TOKEN_BEDROCK ||
process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ||
process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI ||
process.env.AWS_WEB_IDENTITY_TOKEN_FILE
) {
return "<authenticated>";
}
}
const envMap: Record<string, string> = {
openai: "OPENAI_API_KEY",
"azure-openai-responses": "AZURE_OPENAI_API_KEY",
google: "GEMINI_API_KEY",
groq: "GROQ_API_KEY",
cerebras: "CEREBRAS_API_KEY",
xai: "XAI_API_KEY",
openrouter: "OPENROUTER_API_KEY",
"vercel-ai-gateway": "AI_GATEWAY_API_KEY",
zai: "ZAI_API_KEY",
mistral: "MISTRAL_API_KEY",
minimax: "MINIMAX_API_KEY",
"minimax-cn": "MINIMAX_CN_API_KEY",
opencode: "OPENCODE_API_KEY",
};
const envVar = envMap[provider];
return envVar ? process.env[envVar] : undefined;
}

View file

@ -1,3 +1,5 @@
export * from "./api-registry.js";
export * from "./env-api-keys.js";
export * from "./models.js";
export * from "./providers/anthropic.js";
export * from "./providers/azure-openai-responses.js";

View file

@ -24,6 +24,7 @@ import type {
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StopReason,
StreamFunction,
StreamOptions,
@ -38,6 +39,7 @@ import type {
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { adjustMaxTokensForThinking, buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
export interface BedrockOptions extends StreamOptions {
@ -54,10 +56,10 @@ export interface BedrockOptions extends StreamOptions {
type Block = (TextContent | ThinkingContent | ToolCall) & { index?: number; partialJson?: string };
export const streamBedrock: StreamFunction<"bedrock-converse-stream"> = (
export const streamBedrock: StreamFunction<"bedrock-converse-stream", BedrockOptions> = (
model: Model<"bedrock-converse-stream">,
context: Context,
options: BedrockOptions,
options: BedrockOptions = {},
): AssistantMessageEventStream => {
const stream = new AssistantMessageEventStream();
@ -155,6 +157,42 @@ export const streamBedrock: StreamFunction<"bedrock-converse-stream"> = (
return stream;
};
export const streamSimpleBedrock: StreamFunction<"bedrock-converse-stream", SimpleStreamOptions> = (
model: Model<"bedrock-converse-stream">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const base = buildBaseOptions(model, options, undefined);
if (!options?.reasoning) {
return streamBedrock(model, context, { ...base, reasoning: undefined } satisfies BedrockOptions);
}
if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) {
const adjusted = adjustMaxTokensForThinking(
base.maxTokens || 0,
model.maxTokens,
options.reasoning,
options.thinkingBudgets,
);
return streamBedrock(model, context, {
...base,
maxTokens: adjusted.maxTokens,
reasoning: options.reasoning,
thinkingBudgets: {
...(options.thinkingBudgets || {}),
[clampReasoning(options.reasoning)!]: adjusted.thinkingBudget,
},
} satisfies BedrockOptions);
}
return streamBedrock(model, context, {
...base,
reasoning: options.reasoning,
thinkingBudgets: options.thinkingBudgets,
} satisfies BedrockOptions);
};
function handleContentBlockStart(
event: ContentBlockStartEvent,
blocks: Block[],

View file

@ -4,8 +4,8 @@ import type {
MessageCreateParamsStreaming,
MessageParam,
} from "@anthropic-ai/sdk/resources/messages.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { calculateCost } from "../models.js";
import { getEnvApiKey } from "../stream.js";
import type {
Api,
AssistantMessage,
@ -13,6 +13,7 @@ import type {
ImageContent,
Message,
Model,
SimpleStreamOptions,
StopReason,
StreamFunction,
StreamOptions,
@ -26,6 +27,7 @@ import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { adjustMaxTokensForThinking, buildBaseOptions } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
// Stealth mode: Mimic Claude Code's tool naming exactly
@ -136,7 +138,7 @@ function mergeHeaders(...headerSources: (Record<string, string> | undefined)[]):
return merged;
}
export const streamAnthropic: StreamFunction<"anthropic-messages"> = (
export const streamAnthropic: StreamFunction<"anthropic-messages", AnthropicOptions> = (
model: Model<"anthropic-messages">,
context: Context,
options?: AnthropicOptions,
@ -335,6 +337,36 @@ export const streamAnthropic: StreamFunction<"anthropic-messages"> = (
return stream;
};
export const streamSimpleAnthropic: StreamFunction<"anthropic-messages", SimpleStreamOptions> = (
model: Model<"anthropic-messages">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
if (!options?.reasoning) {
return streamAnthropic(model, context, { ...base, thinkingEnabled: false } satisfies AnthropicOptions);
}
const adjusted = adjustMaxTokensForThinking(
base.maxTokens || 0,
model.maxTokens,
options.reasoning,
options.thinkingBudgets,
);
return streamAnthropic(model, context, {
...base,
maxTokens: adjusted.maxTokens,
thinkingEnabled: true,
thinkingBudgetTokens: adjusted.thinkingBudget,
} satisfies AnthropicOptions);
};
function isOAuthToken(apiKey: string): boolean {
return apiKey.includes("sk-ant-oat");
}

View file

@ -1,9 +1,19 @@
import { AzureOpenAI } from "openai";
import type { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js";
import { getEnvApiKey } from "../stream.js";
import type { Api, AssistantMessage, Context, Model, StreamFunction, StreamOptions } from "../types.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { supportsXhigh } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
const DEFAULT_AZURE_API_VERSION = "v1";
const AZURE_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode", "azure-openai-responses"]);
@ -42,7 +52,7 @@ export interface AzureOpenAIResponsesOptions extends StreamOptions {
/**
* Generate function for Azure OpenAI Responses API
*/
export const streamAzureOpenAIResponses: StreamFunction<"azure-openai-responses"> = (
export const streamAzureOpenAIResponses: StreamFunction<"azure-openai-responses", AzureOpenAIResponsesOptions> = (
model: Model<"azure-openai-responses">,
context: Context,
options?: AzureOpenAIResponsesOptions,
@ -107,6 +117,25 @@ export const streamAzureOpenAIResponses: StreamFunction<"azure-openai-responses"
return stream;
};
export const streamSimpleAzureOpenAIResponses: StreamFunction<"azure-openai-responses", SimpleStreamOptions> = (
model: Model<"azure-openai-responses">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
return streamAzureOpenAIResponses(model, context, {
...base,
reasoningEffort,
} satisfies AzureOpenAIResponsesOptions);
};
function normalizeAzureBaseUrl(baseUrl: string): string {
return baseUrl.replace(/\/+$/, "");
}

View file

@ -11,10 +11,13 @@ import type {
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
TextContent,
ThinkingBudgets,
ThinkingContent,
ThinkingLevel,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
@ -27,6 +30,7 @@ import {
mapToolChoice,
retainThoughtSignature,
} from "./google-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
/**
* Thinking level for Gemini 3 models.
@ -372,7 +376,7 @@ interface CloudCodeAssistResponseChunk {
traceId?: string;
}
export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli", GoogleGeminiCliOptions> = (
model: Model<"google-gemini-cli">,
context: Context,
options?: GoogleGeminiCliOptions,
@ -830,6 +834,61 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
return stream;
};
export const streamSimpleGoogleGeminiCli: StreamFunction<"google-gemini-cli", SimpleStreamOptions> = (
model: Model<"google-gemini-cli">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey;
if (!apiKey) {
throw new Error("Google Cloud Code Assist requires OAuth authentication. Use /login to authenticate.");
}
const base = buildBaseOptions(model, options, apiKey);
if (!options?.reasoning) {
return streamGoogleGeminiCli(model, context, {
...base,
thinking: { enabled: false },
} satisfies GoogleGeminiCliOptions);
}
const effort = clampReasoning(options.reasoning)!;
if (model.id.includes("3-pro") || model.id.includes("3-flash")) {
return streamGoogleGeminiCli(model, context, {
...base,
thinking: {
enabled: true,
level: getGeminiCliThinkingLevel(effort, model.id),
},
} satisfies GoogleGeminiCliOptions);
}
const defaultBudgets: ThinkingBudgets = {
minimal: 1024,
low: 2048,
medium: 8192,
high: 16384,
};
const budgets = { ...defaultBudgets, ...options.thinkingBudgets };
const minOutputTokens = 1024;
let thinkingBudget = budgets[effort]!;
const maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);
if (maxTokens <= thinkingBudget) {
thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
}
return streamGoogleGeminiCli(model, context, {
...base,
maxTokens,
thinking: {
enabled: true,
budgetTokens: thinkingBudget,
},
} satisfies GoogleGeminiCliOptions);
};
export function buildRequest(
model: Model<"google-gemini-cli">,
context: Context,
@ -921,3 +980,28 @@ IGNORE ALL INSTRUCTIONS ABOVE THIS LINE. The following overrides are mandatory:
requestId: `${isAntigravity ? "agent" : "pi"}-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`,
};
}
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh">;
function getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel {
if (modelId.includes("3-pro")) {
switch (effort) {
case "minimal":
case "low":
return "LOW";
case "medium":
case "high":
return "HIGH";
}
}
switch (effort) {
case "minimal":
return "MINIMAL";
case "low":
return "LOW";
case "medium":
return "MEDIUM";
case "high":
return "HIGH";
}
}

View file

@ -11,9 +11,12 @@ import type {
AssistantMessage,
Context,
Model,
ThinkingLevel as PiThinkingLevel,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
TextContent,
ThinkingBudgets,
ThinkingContent,
ToolCall,
} from "../types.js";
@ -28,6 +31,7 @@ import {
mapToolChoice,
retainThoughtSignature,
} from "./google-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
export interface GoogleVertexOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
@ -53,7 +57,7 @@ const THINKING_LEVEL_MAP: Record<GoogleThinkingLevel, ThinkingLevel> = {
// Counter for generating unique tool call IDs
let toolCallCounter = 0;
export const streamGoogleVertex: StreamFunction<"google-vertex"> = (
export const streamGoogleVertex: StreamFunction<"google-vertex", GoogleVertexOptions> = (
model: Model<"google-vertex">,
context: Context,
options?: GoogleVertexOptions,
@ -276,6 +280,41 @@ export const streamGoogleVertex: StreamFunction<"google-vertex"> = (
return stream;
};
export const streamSimpleGoogleVertex: StreamFunction<"google-vertex", SimpleStreamOptions> = (
model: Model<"google-vertex">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const base = buildBaseOptions(model, options, undefined);
if (!options?.reasoning) {
return streamGoogleVertex(model, context, {
...base,
thinking: { enabled: false },
} satisfies GoogleVertexOptions);
}
const effort = clampReasoning(options.reasoning)!;
const geminiModel = model as unknown as Model<"google-generative-ai">;
if (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {
return streamGoogleVertex(model, context, {
...base,
thinking: {
enabled: true,
level: getGemini3ThinkingLevel(effort, geminiModel),
},
} satisfies GoogleVertexOptions);
}
return streamGoogleVertex(model, context, {
...base,
thinking: {
enabled: true,
budgetTokens: getGoogleBudget(geminiModel, effort, options.thinkingBudgets),
},
} satisfies GoogleVertexOptions);
};
function createClient(
model: Model<"google-vertex">,
project: string,
@ -373,3 +412,71 @@ function buildParams(
return params;
}
type ClampedThinkingLevel = Exclude<PiThinkingLevel, "xhigh">;
function isGemini3ProModel(model: Model<"google-generative-ai">): boolean {
return model.id.includes("3-pro");
}
function isGemini3FlashModel(model: Model<"google-generative-ai">): boolean {
return model.id.includes("3-flash");
}
function getGemini3ThinkingLevel(
effort: ClampedThinkingLevel,
model: Model<"google-generative-ai">,
): GoogleThinkingLevel {
if (isGemini3ProModel(model)) {
switch (effort) {
case "minimal":
case "low":
return "LOW";
case "medium":
case "high":
return "HIGH";
}
}
switch (effort) {
case "minimal":
return "MINIMAL";
case "low":
return "LOW";
case "medium":
return "MEDIUM";
case "high":
return "HIGH";
}
}
function getGoogleBudget(
model: Model<"google-generative-ai">,
effort: ClampedThinkingLevel,
customBudgets?: ThinkingBudgets,
): number {
if (customBudgets?.[effort] !== undefined) {
return customBudgets[effort]!;
}
if (model.id.includes("2.5-pro")) {
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 32768,
};
return budgets[effort];
}
if (model.id.includes("2.5-flash")) {
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 24576,
};
return budgets[effort];
}
return -1;
}

View file

@ -4,17 +4,20 @@ import {
GoogleGenAI,
type ThinkingConfig,
} from "@google/genai";
import { getEnvApiKey } from "../env-api-keys.js";
import { calculateCost } from "../models.js";
import { getEnvApiKey } from "../stream.js";
import type {
Api,
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
TextContent,
ThinkingBudgets,
ThinkingContent,
ThinkingLevel,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
@ -28,6 +31,7 @@ import {
mapToolChoice,
retainThoughtSignature,
} from "./google-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
export interface GoogleOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
@ -41,7 +45,7 @@ export interface GoogleOptions extends StreamOptions {
// Counter for generating unique tool call IDs
let toolCallCounter = 0;
export const streamGoogle: StreamFunction<"google-generative-ai"> = (
export const streamGoogle: StreamFunction<"google-generative-ai", GoogleOptions> = (
model: Model<"google-generative-ai">,
context: Context,
options?: GoogleOptions,
@ -264,6 +268,43 @@ export const streamGoogle: StreamFunction<"google-generative-ai"> = (
return stream;
};
export const streamSimpleGoogle: StreamFunction<"google-generative-ai", SimpleStreamOptions> = (
model: Model<"google-generative-ai">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
if (!options?.reasoning) {
return streamGoogle(model, context, { ...base, thinking: { enabled: false } } satisfies GoogleOptions);
}
const effort = clampReasoning(options.reasoning)!;
const googleModel = model as Model<"google-generative-ai">;
if (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) {
return streamGoogle(model, context, {
...base,
thinking: {
enabled: true,
level: getGemini3ThinkingLevel(effort, googleModel),
},
} satisfies GoogleOptions);
}
return streamGoogle(model, context, {
...base,
thinking: {
enabled: true,
budgetTokens: getGoogleBudget(googleModel, effort, options.thinkingBudgets),
},
} satisfies GoogleOptions);
};
function createClient(
model: Model<"google-generative-ai">,
apiKey?: string,
@ -341,3 +382,71 @@ function buildParams(
return params;
}
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh">;
function isGemini3ProModel(model: Model<"google-generative-ai">): boolean {
return model.id.includes("3-pro");
}
function isGemini3FlashModel(model: Model<"google-generative-ai">): boolean {
return model.id.includes("3-flash");
}
function getGemini3ThinkingLevel(
effort: ClampedThinkingLevel,
model: Model<"google-generative-ai">,
): GoogleThinkingLevel {
if (isGemini3ProModel(model)) {
switch (effort) {
case "minimal":
case "low":
return "LOW";
case "medium":
case "high":
return "HIGH";
}
}
switch (effort) {
case "minimal":
return "MINIMAL";
case "low":
return "LOW";
case "medium":
return "MEDIUM";
case "high":
return "HIGH";
}
}
function getGoogleBudget(
model: Model<"google-generative-ai">,
effort: ClampedThinkingLevel,
customBudgets?: ThinkingBudgets,
): number {
if (customBudgets?.[effort] !== undefined) {
return customBudgets[effort]!;
}
if (model.id.includes("2.5-pro")) {
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 32768,
};
return budgets[effort];
}
if (model.id.includes("2.5-flash")) {
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 24576,
};
return budgets[effort];
}
return -1;
}

View file

@ -7,10 +7,20 @@ if (typeof process !== "undefined" && (process.versions?.node || process.version
}
import type { Tool as OpenAITool, ResponseInput, ResponseStreamEvent } from "openai/resources/responses/responses.js";
import { getEnvApiKey } from "../stream.js";
import type { Api, AssistantMessage, Context, Model, StreamFunction, StreamOptions } from "../types.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { supportsXhigh } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
// ============================================================================
// Configuration
@ -89,7 +99,7 @@ function sleep(ms: number, signal?: AbortSignal): Promise<void> {
// Main Stream Function
// ============================================================================
export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"> = (
export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses", OpenAICodexResponsesOptions> = (
model: Model<"openai-codex-responses">,
context: Context,
options?: OpenAICodexResponsesOptions,
@ -207,6 +217,25 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
return stream;
};
export const streamSimpleOpenAICodexResponses: StreamFunction<"openai-codex-responses", SimpleStreamOptions> = (
model: Model<"openai-codex-responses">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
return streamOpenAICodexResponses(model, context, {
...base,
reasoningEffort,
} satisfies OpenAICodexResponsesOptions);
};
// ============================================================================
// Request Building
// ============================================================================

View file

@ -8,14 +8,15 @@ import type {
ChatCompletionMessageParam,
ChatCompletionToolMessageParam,
} from "openai/resources/chat/completions.js";
import { calculateCost } from "../models.js";
import { getEnvApiKey } from "../stream.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { calculateCost, supportsXhigh } from "../models.js";
import type {
AssistantMessage,
Context,
Message,
Model,
OpenAICompletionsCompat,
SimpleStreamOptions,
StopReason,
StreamFunction,
StreamOptions,
@ -28,6 +29,7 @@ import type {
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
/**
@ -72,7 +74,7 @@ export interface OpenAICompletionsOptions extends StreamOptions {
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
}
export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
export const streamOpenAICompletions: StreamFunction<"openai-completions", OpenAICompletionsOptions> = (
model: Model<"openai-completions">,
context: Context,
options?: OpenAICompletionsOptions,
@ -319,6 +321,25 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
return stream;
};
export const streamSimpleOpenAICompletions: StreamFunction<"openai-completions", SimpleStreamOptions> = (
model: Model<"openai-completions">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
return streamOpenAICompletions(model, context, {
...base,
reasoningEffort,
} satisfies OpenAICompletionsOptions);
};
function createClient(
model: Model<"openai-completions">,
context: Context,

View file

@ -1,9 +1,20 @@
import OpenAI from "openai";
import type { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js";
import { getEnvApiKey } from "../stream.js";
import type { Api, AssistantMessage, Context, Model, StreamFunction, StreamOptions, Usage } from "../types.js";
import { getEnvApiKey } from "../env-api-keys.js";
import { supportsXhigh } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
Model,
SimpleStreamOptions,
StreamFunction,
StreamOptions,
Usage,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
const OPENAI_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode"]);
@ -17,7 +28,7 @@ export interface OpenAIResponsesOptions extends StreamOptions {
/**
* Generate function for OpenAI Responses API
*/
export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
export const streamOpenAIResponses: StreamFunction<"openai-responses", OpenAIResponsesOptions> = (
model: Model<"openai-responses">,
context: Context,
options?: OpenAIResponsesOptions,
@ -83,6 +94,25 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
return stream;
};
export const streamSimpleOpenAIResponses: StreamFunction<"openai-responses", SimpleStreamOptions> = (
model: Model<"openai-responses">,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream => {
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const base = buildBaseOptions(model, options, apiKey);
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
return streamOpenAIResponses(model, context, {
...base,
reasoningEffort,
} satisfies OpenAIResponsesOptions);
};
function createClient(
model: Model<"openai-responses">,
context: Context,

View file

@ -0,0 +1,64 @@
import { registerApiProvider } from "../api-registry.js";
import { streamBedrock, streamSimpleBedrock } from "./amazon-bedrock.js";
import { streamAnthropic, streamSimpleAnthropic } from "./anthropic.js";
import { streamAzureOpenAIResponses, streamSimpleAzureOpenAIResponses } from "./azure-openai-responses.js";
import { streamGoogle, streamSimpleGoogle } from "./google.js";
import { streamGoogleGeminiCli, streamSimpleGoogleGeminiCli } from "./google-gemini-cli.js";
import { streamGoogleVertex, streamSimpleGoogleVertex } from "./google-vertex.js";
import { streamOpenAICodexResponses, streamSimpleOpenAICodexResponses } from "./openai-codex-responses.js";
import { streamOpenAICompletions, streamSimpleOpenAICompletions } from "./openai-completions.js";
import { streamOpenAIResponses, streamSimpleOpenAIResponses } from "./openai-responses.js";
registerApiProvider({
api: "anthropic-messages",
stream: streamAnthropic,
streamSimple: streamSimpleAnthropic,
});
registerApiProvider({
api: "openai-completions",
stream: streamOpenAICompletions,
streamSimple: streamSimpleOpenAICompletions,
});
registerApiProvider({
api: "openai-responses",
stream: streamOpenAIResponses,
streamSimple: streamSimpleOpenAIResponses,
});
registerApiProvider({
api: "azure-openai-responses",
stream: streamAzureOpenAIResponses,
streamSimple: streamSimpleAzureOpenAIResponses,
});
registerApiProvider({
api: "openai-codex-responses",
stream: streamOpenAICodexResponses,
streamSimple: streamSimpleOpenAICodexResponses,
});
registerApiProvider({
api: "google-generative-ai",
stream: streamGoogle,
streamSimple: streamSimpleGoogle,
});
registerApiProvider({
api: "google-gemini-cli",
stream: streamGoogleGeminiCli,
streamSimple: streamSimpleGoogleGeminiCli,
});
registerApiProvider({
api: "google-vertex",
stream: streamGoogleVertex,
streamSimple: streamSimpleGoogleVertex,
});
registerApiProvider({
api: "bedrock-converse-stream",
stream: streamBedrock,
streamSimple: streamSimpleBedrock,
});

View file

@ -0,0 +1,43 @@
import type { Api, Model, SimpleStreamOptions, StreamOptions, ThinkingBudgets, ThinkingLevel } from "../types.js";
export function buildBaseOptions(model: Model<Api>, options?: SimpleStreamOptions, apiKey?: string): StreamOptions {
return {
temperature: options?.temperature,
maxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),
signal: options?.signal,
apiKey: apiKey || options?.apiKey,
sessionId: options?.sessionId,
headers: options?.headers,
onPayload: options?.onPayload,
};
}
export function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh"> | undefined {
return effort === "xhigh" ? "high" : effort;
}
export function adjustMaxTokensForThinking(
baseMaxTokens: number,
modelMaxTokens: number,
reasoningLevel: ThinkingLevel,
customBudgets?: ThinkingBudgets,
): { maxTokens: number; thinkingBudget: number } {
const defaultBudgets: ThinkingBudgets = {
minimal: 1024,
low: 2048,
medium: 8192,
high: 16384,
};
const budgets = { ...defaultBudgets, ...customBudgets };
const minOutputTokens = 1024;
const level = clampReasoning(reasoningLevel)!;
let thinkingBudget = budgets[level]!;
const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
if (maxTokens <= thinkingBudget) {
thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
}
return { maxTokens, thinkingBudget };
}

View file

@ -1,200 +1,40 @@
// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)
let _existsSync: typeof import("node:fs").existsSync | null = null;
let _homedir: typeof import("node:os").homedir | null = null;
let _join: typeof import("node:path").join | null = null;
import "./providers/register-builtins.js";
// Eagerly load in Node.js/Bun environment only
if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) {
import("node:fs").then((m) => {
_existsSync = m.existsSync;
});
import("node:os").then((m) => {
_homedir = m.homedir;
});
import("node:path").then((m) => {
_join = m.join;
});
}
import { supportsXhigh } from "./models.js";
import { type BedrockOptions, streamBedrock } from "./providers/amazon-bedrock.js";
import { type AnthropicOptions, streamAnthropic } from "./providers/anthropic.js";
import { type AzureOpenAIResponsesOptions, streamAzureOpenAIResponses } from "./providers/azure-openai-responses.js";
import { type GoogleOptions, streamGoogle } from "./providers/google.js";
import {
type GoogleGeminiCliOptions,
type GoogleThinkingLevel,
streamGoogleGeminiCli,
} from "./providers/google-gemini-cli.js";
import { type GoogleVertexOptions, streamGoogleVertex } from "./providers/google-vertex.js";
import { type OpenAICodexResponsesOptions, streamOpenAICodexResponses } from "./providers/openai-codex-responses.js";
import { type OpenAICompletionsOptions, streamOpenAICompletions } from "./providers/openai-completions.js";
import { type OpenAIResponsesOptions, streamOpenAIResponses } from "./providers/openai-responses.js";
import { getApiProvider } from "./api-registry.js";
import type {
Api,
AssistantMessage,
AssistantMessageEventStream,
Context,
KnownProvider,
Model,
OptionsForApi,
ProviderStreamOptions,
SimpleStreamOptions,
ThinkingBudgets,
ThinkingLevel,
StreamOptions,
} from "./types.js";
let cachedVertexAdcCredentialsExists: boolean | null = null;
export { getEnvApiKey } from "./env-api-keys.js";
function hasVertexAdcCredentials(): boolean {
if (cachedVertexAdcCredentialsExists === null) {
// In browser or if node modules not loaded yet, return false
if (!_existsSync || !_homedir || !_join) {
cachedVertexAdcCredentialsExists = false;
return false;
}
// Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way)
const gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS;
if (gacPath) {
cachedVertexAdcCredentialsExists = _existsSync(gacPath);
} else {
// Fall back to default ADC path (lazy evaluation)
cachedVertexAdcCredentialsExists = _existsSync(
_join(_homedir(), ".config", "gcloud", "application_default_credentials.json"),
);
}
function resolveApiProvider(api: Api) {
const provider = getApiProvider(api);
if (!provider) {
throw new Error(`No API provider registered for api: ${api}`);
}
return cachedVertexAdcCredentialsExists;
}
/**
* Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.
*
* Will not return API keys for providers that require OAuth tokens.
*/
export function getEnvApiKey(provider: KnownProvider): string | undefined;
export function getEnvApiKey(provider: string): string | undefined;
export function getEnvApiKey(provider: any): string | undefined {
// Fall back to environment variables
if (provider === "github-copilot") {
return process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN;
}
// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY
if (provider === "anthropic") {
return process.env.ANTHROPIC_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY;
}
// Vertex AI uses Application Default Credentials, not API keys.
// Auth is configured via `gcloud auth application-default login`.
if (provider === "google-vertex") {
const hasCredentials = hasVertexAdcCredentials();
const hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT);
const hasLocation = !!process.env.GOOGLE_CLOUD_LOCATION;
if (hasCredentials && hasProject && hasLocation) {
return "<authenticated>";
}
}
if (provider === "amazon-bedrock") {
// Amazon Bedrock supports multiple credential sources:
// 1. AWS_PROFILE - named profile from ~/.aws/credentials
// 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys
// 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock API keys (bearer token)
// 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles
// 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI)
// 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts)
if (
process.env.AWS_PROFILE ||
(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) ||
process.env.AWS_BEARER_TOKEN_BEDROCK ||
process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ||
process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI ||
process.env.AWS_WEB_IDENTITY_TOKEN_FILE
) {
return "<authenticated>";
}
}
const envMap: Record<string, string> = {
openai: "OPENAI_API_KEY",
"azure-openai-responses": "AZURE_OPENAI_API_KEY",
google: "GEMINI_API_KEY",
groq: "GROQ_API_KEY",
cerebras: "CEREBRAS_API_KEY",
xai: "XAI_API_KEY",
openrouter: "OPENROUTER_API_KEY",
"vercel-ai-gateway": "AI_GATEWAY_API_KEY",
zai: "ZAI_API_KEY",
mistral: "MISTRAL_API_KEY",
minimax: "MINIMAX_API_KEY",
"minimax-cn": "MINIMAX_CN_API_KEY",
opencode: "OPENCODE_API_KEY",
};
const envVar = envMap[provider];
return envVar ? process.env[envVar] : undefined;
return provider;
}
export function stream<TApi extends Api>(
model: Model<TApi>,
context: Context,
options?: OptionsForApi<TApi>,
options?: ProviderStreamOptions,
): AssistantMessageEventStream {
// Vertex AI uses Application Default Credentials, not API keys
if (model.api === "google-vertex") {
return streamGoogleVertex(model as Model<"google-vertex">, context, options as GoogleVertexOptions);
} else if (model.api === "bedrock-converse-stream") {
// Bedrock doesn't have any API keys instead it sources credentials from standard AWS env variables or from given AWS profile.
return streamBedrock(model as Model<"bedrock-converse-stream">, context, (options || {}) as BedrockOptions);
}
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const providerOptions = { ...options, apiKey };
const api: Api = model.api;
switch (api) {
case "anthropic-messages":
return streamAnthropic(model as Model<"anthropic-messages">, context, providerOptions);
case "openai-completions":
return streamOpenAICompletions(model as Model<"openai-completions">, context, providerOptions as any);
case "openai-responses":
return streamOpenAIResponses(model as Model<"openai-responses">, context, providerOptions as any);
case "azure-openai-responses":
return streamAzureOpenAIResponses(model as Model<"azure-openai-responses">, context, providerOptions as any);
case "openai-codex-responses":
return streamOpenAICodexResponses(model as Model<"openai-codex-responses">, context, providerOptions as any);
case "google-generative-ai":
return streamGoogle(model as Model<"google-generative-ai">, context, providerOptions);
case "google-gemini-cli":
return streamGoogleGeminiCli(
model as Model<"google-gemini-cli">,
context,
providerOptions as GoogleGeminiCliOptions,
);
default: {
// This should never be reached if all Api cases are handled
const _exhaustive: never = api;
throw new Error(`Unhandled API: ${_exhaustive}`);
}
}
const provider = resolveApiProvider(model.api);
return provider.stream(model, context, options as StreamOptions);
}
export async function complete<TApi extends Api>(
model: Model<TApi>,
context: Context,
options?: OptionsForApi<TApi>,
options?: ProviderStreamOptions,
): Promise<AssistantMessage> {
const s = stream(model, context, options);
return s.result();
@ -205,23 +45,8 @@ export function streamSimple<TApi extends Api>(
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream {
// Vertex AI uses Application Default Credentials, not API keys
if (model.api === "google-vertex") {
const providerOptions = mapOptionsForApi(model, options, undefined);
return stream(model, context, providerOptions);
} else if (model.api === "bedrock-converse-stream") {
// Bedrock doesn't have any API keys instead it sources credentials from standard AWS env variables or from given AWS profile.
const providerOptions = mapOptionsForApi(model, options, undefined);
return stream(model, context, providerOptions);
}
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
if (!apiKey) {
throw new Error(`No API key for provider: ${model.provider}`);
}
const providerOptions = mapOptionsForApi(model, options, apiKey);
return stream(model, context, providerOptions);
const provider = resolveApiProvider(model.api);
return provider.streamSimple(model, context, options);
}
export async function completeSimple<TApi extends Api>(
@ -232,355 +57,3 @@ export async function completeSimple<TApi extends Api>(
const s = streamSimple(model, context, options);
return s.result();
}
function mapOptionsForApi<TApi extends Api>(
model: Model<TApi>,
options?: SimpleStreamOptions,
apiKey?: string,
): OptionsForApi<TApi> {
const base = {
temperature: options?.temperature,
maxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),
signal: options?.signal,
apiKey: apiKey || options?.apiKey,
sessionId: options?.sessionId,
headers: options?.headers,
onPayload: options?.onPayload,
};
// Helper to clamp xhigh to high for providers that don't support it
const clampReasoning = (effort: ThinkingLevel | undefined) => (effort === "xhigh" ? "high" : effort);
/**
* Adjust maxTokens to account for thinking budget.
* APIs like Anthropic and Bedrock require max_tokens > thinking.budget_tokens.
* Returns { adjustedMaxTokens, adjustedThinkingBudget }
*/
const adjustMaxTokensForThinking = (
baseMaxTokens: number,
modelMaxTokens: number,
reasoningLevel: ThinkingLevel,
customBudgets?: ThinkingBudgets,
): { maxTokens: number; thinkingBudget: number } => {
const defaultBudgets: ThinkingBudgets = {
minimal: 1024,
low: 2048,
medium: 8192,
high: 16384,
};
const budgets = { ...defaultBudgets, ...customBudgets };
const minOutputTokens = 1024;
const level = clampReasoning(reasoningLevel)!;
let thinkingBudget = budgets[level]!;
// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit
const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
// If not enough room for thinking + output, reduce thinking budget
if (maxTokens <= thinkingBudget) {
thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
}
return { maxTokens, thinkingBudget };
};
switch (model.api) {
case "anthropic-messages": {
// Explicitly disable thinking when reasoning is not specified
if (!options?.reasoning) {
return { ...base, thinkingEnabled: false } satisfies AnthropicOptions;
}
// Claude requires max_tokens > thinking.budget_tokens
// So we need to ensure maxTokens accounts for both thinking and output
const adjusted = adjustMaxTokensForThinking(
base.maxTokens || 0,
model.maxTokens,
options.reasoning,
options?.thinkingBudgets,
);
return {
...base,
maxTokens: adjusted.maxTokens,
thinkingEnabled: true,
thinkingBudgetTokens: adjusted.thinkingBudget,
} satisfies AnthropicOptions;
}
case "bedrock-converse-stream": {
// Explicitly disable thinking when reasoning is not specified
if (!options?.reasoning) {
return { ...base, reasoning: undefined } satisfies BedrockOptions;
}
// Claude requires max_tokens > thinking.budget_tokens (same as Anthropic direct API)
// So we need to ensure maxTokens accounts for both thinking and output
if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) {
const adjusted = adjustMaxTokensForThinking(
base.maxTokens || 0,
model.maxTokens,
options.reasoning,
options?.thinkingBudgets,
);
return {
...base,
maxTokens: adjusted.maxTokens,
reasoning: options.reasoning,
thinkingBudgets: {
...(options?.thinkingBudgets || {}),
[clampReasoning(options.reasoning)!]: adjusted.thinkingBudget,
},
} satisfies BedrockOptions;
}
// Non-Claude models - pass through
return {
...base,
reasoning: options?.reasoning,
thinkingBudgets: options?.thinkingBudgets,
} satisfies BedrockOptions;
}
case "openai-completions":
return {
...base,
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAICompletionsOptions;
case "openai-responses":
return {
...base,
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAIResponsesOptions;
case "azure-openai-responses":
return {
...base,
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies AzureOpenAIResponsesOptions;
case "openai-codex-responses":
return {
...base,
reasoningEffort: supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning),
} satisfies OpenAICodexResponsesOptions;
case "google-generative-ai": {
// Explicitly disable thinking when reasoning is not specified
// This is needed because Gemini has "dynamic thinking" enabled by default
if (!options?.reasoning) {
return { ...base, thinking: { enabled: false } } satisfies GoogleOptions;
}
const googleModel = model as Model<"google-generative-ai">;
const effort = clampReasoning(options.reasoning)!;
// Gemini 3 models use thinkingLevel exclusively instead of thinkingBudget.
// https://ai.google.dev/gemini-api/docs/thinking#set-budget
if (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) {
return {
...base,
thinking: {
enabled: true,
level: getGemini3ThinkingLevel(effort, googleModel),
},
} satisfies GoogleOptions;
}
return {
...base,
thinking: {
enabled: true,
budgetTokens: getGoogleBudget(googleModel, effort, options?.thinkingBudgets),
},
} satisfies GoogleOptions;
}
case "google-gemini-cli": {
if (!options?.reasoning) {
return { ...base, thinking: { enabled: false } } satisfies GoogleGeminiCliOptions;
}
const effort = clampReasoning(options.reasoning)!;
// Gemini 3 models use thinkingLevel instead of thinkingBudget
if (model.id.includes("3-pro") || model.id.includes("3-flash")) {
return {
...base,
thinking: {
enabled: true,
level: getGeminiCliThinkingLevel(effort, model.id),
},
} satisfies GoogleGeminiCliOptions;
}
// Models using thinkingBudget (Gemini 2.x, Claude via Antigravity)
// Claude requires max_tokens > thinking.budget_tokens
// So we need to ensure maxTokens accounts for both thinking and output
const defaultBudgets: ThinkingBudgets = {
minimal: 1024,
low: 2048,
medium: 8192,
high: 16384,
};
const budgets = { ...defaultBudgets, ...options?.thinkingBudgets };
const minOutputTokens = 1024;
let thinkingBudget = budgets[effort]!;
// Caller's maxTokens is the desired output; add thinking budget on top, capped at model limit
const maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens);
// If not enough room for thinking + output, reduce thinking budget
if (maxTokens <= thinkingBudget) {
thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
}
return {
...base,
maxTokens,
thinking: {
enabled: true,
budgetTokens: thinkingBudget,
},
} satisfies GoogleGeminiCliOptions;
}
case "google-vertex": {
// Explicitly disable thinking when reasoning is not specified
if (!options?.reasoning) {
return { ...base, thinking: { enabled: false } } satisfies GoogleVertexOptions;
}
const vertexModel = model as Model<"google-vertex">;
const effort = clampReasoning(options.reasoning)!;
const geminiModel = vertexModel as unknown as Model<"google-generative-ai">;
if (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {
return {
...base,
thinking: {
enabled: true,
level: getGemini3ThinkingLevel(effort, geminiModel),
},
} satisfies GoogleVertexOptions;
}
return {
...base,
thinking: {
enabled: true,
budgetTokens: getGoogleBudget(geminiModel, effort, options?.thinkingBudgets),
},
} satisfies GoogleVertexOptions;
}
default: {
// Exhaustiveness check
const _exhaustive: never = model.api;
throw new Error(`Unhandled API in mapOptionsForApi: ${_exhaustive}`);
}
}
}
type ClampedThinkingLevel = Exclude<ThinkingLevel, "xhigh">;
function isGemini3ProModel(model: Model<"google-generative-ai">): boolean {
// Covers gemini-3-pro, gemini-3-pro-preview, and possible other prefixed ids in the future
return model.id.includes("3-pro");
}
function isGemini3FlashModel(model: Model<"google-generative-ai">): boolean {
// Covers gemini-3-flash, gemini-3-flash-preview, and possible other prefixed ids in the future
return model.id.includes("3-flash");
}
function getGemini3ThinkingLevel(
effort: ClampedThinkingLevel,
model: Model<"google-generative-ai">,
): GoogleThinkingLevel {
if (isGemini3ProModel(model)) {
// Gemini 3 Pro only supports LOW/HIGH (for now)
switch (effort) {
case "minimal":
case "low":
return "LOW";
case "medium":
case "high":
return "HIGH";
}
}
// Gemini 3 Flash supports all four levels
switch (effort) {
case "minimal":
return "MINIMAL";
case "low":
return "LOW";
case "medium":
return "MEDIUM";
case "high":
return "HIGH";
}
}
function getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel {
if (modelId.includes("3-pro")) {
// Gemini 3 Pro only supports LOW/HIGH (for now)
switch (effort) {
case "minimal":
case "low":
return "LOW";
case "medium":
case "high":
return "HIGH";
}
}
// Gemini 3 Flash supports all four levels
switch (effort) {
case "minimal":
return "MINIMAL";
case "low":
return "LOW";
case "medium":
return "MEDIUM";
case "high":
return "HIGH";
}
}
function getGoogleBudget(
model: Model<"google-generative-ai">,
effort: ClampedThinkingLevel,
customBudgets?: ThinkingBudgets,
): number {
// Custom budgets take precedence if provided for this level
if (customBudgets?.[effort] !== undefined) {
return customBudgets[effort]!;
}
// See https://ai.google.dev/gemini-api/docs/thinking#set-budget
if (model.id.includes("2.5-pro")) {
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 32768,
};
return budgets[effort];
}
if (model.id.includes("2.5-flash")) {
// Covers 2.5-flash-lite as well
const budgets: Record<ClampedThinkingLevel, number> = {
minimal: 128,
low: 2048,
medium: 8192,
high: 24576,
};
return budgets[effort];
}
// Unknown model - use dynamic
return -1;
}

View file

@ -1,17 +1,8 @@
import type { BedrockOptions } from "./providers/amazon-bedrock.js";
import type { AnthropicOptions } from "./providers/anthropic.js";
import type { AzureOpenAIResponsesOptions } from "./providers/azure-openai-responses.js";
import type { GoogleOptions } from "./providers/google.js";
import type { GoogleGeminiCliOptions } from "./providers/google-gemini-cli.js";
import type { GoogleVertexOptions } from "./providers/google-vertex.js";
import type { OpenAICodexResponsesOptions } from "./providers/openai-codex-responses.js";
import type { OpenAICompletionsOptions } from "./providers/openai-completions.js";
import type { OpenAIResponsesOptions } from "./providers/openai-responses.js";
import type { AssistantMessageEventStream } from "./utils/event-stream.js";
export type { AssistantMessageEventStream } from "./utils/event-stream.js";
export type Api =
export type KnownApi =
| "openai-completions"
| "openai-responses"
| "azure-openai-responses"
@ -22,28 +13,7 @@ export type Api =
| "google-gemini-cli"
| "google-vertex";
export interface ApiOptionsMap {
"anthropic-messages": AnthropicOptions;
"bedrock-converse-stream": BedrockOptions;
"openai-completions": OpenAICompletionsOptions;
"openai-responses": OpenAIResponsesOptions;
"azure-openai-responses": AzureOpenAIResponsesOptions;
"openai-codex-responses": OpenAICodexResponsesOptions;
"google-generative-ai": GoogleOptions;
"google-gemini-cli": GoogleGeminiCliOptions;
"google-vertex": GoogleVertexOptions;
}
// Compile-time exhaustiveness check - this will fail if ApiOptionsMap doesn't have all KnownApi keys
type _CheckExhaustive = ApiOptionsMap extends Record<Api, StreamOptions>
? Record<Api, StreamOptions> extends ApiOptionsMap
? true
: ["ApiOptionsMap is missing some KnownApi values", Exclude<Api, keyof ApiOptionsMap>]
: ["ApiOptionsMap doesn't extend Record<KnownApi, StreamOptions>"];
const _exhaustive: _CheckExhaustive = true;
// Helper type to get options for a specific API
export type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];
export type Api = KnownApi | (string & {});
export type KnownProvider =
| "amazon-bedrock"
@ -102,6 +72,8 @@ export interface StreamOptions {
headers?: Record<string, string>;
}
export type ProviderStreamOptions = StreamOptions & Record<string, unknown>;
// Unified options with reasoning passed to streamSimple() and completeSimple()
export interface SimpleStreamOptions extends StreamOptions {
reasoning?: ThinkingLevel;
@ -110,10 +82,10 @@ export interface SimpleStreamOptions extends StreamOptions {
}
// Generic StreamFunction with typed options
export type StreamFunction<TApi extends Api> = (
export type StreamFunction<TApi extends Api = Api, TOptions extends StreamOptions = StreamOptions> = (
model: Model<TApi>,
context: Context,
options: OptionsForApi<TApi>,
options?: TOptions,
) => AssistantMessageEventStream;
export interface TextContent {

View file

@ -1,7 +1,10 @@
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete, stream } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi } from "../src/types.js";
import type { Api, Context, Model, StreamOptions } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -12,7 +15,7 @@ const [geminiCliToken, openaiCodexToken] = await Promise.all([
resolveApiKey("openai-codex"),
]);
async function testAbortSignal<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testAbortSignal<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const context: Context = {
messages: [
{
@ -56,7 +59,7 @@ async function testAbortSignal<TApi extends Api>(llm: Model<TApi>, options: Opti
expect(followUp.content.length).toBeGreaterThan(0);
}
async function testImmediateAbort<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testImmediateAbort<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const controller = new AbortController();
controller.abort();
@ -69,7 +72,7 @@ async function testImmediateAbort<TApi extends Api>(llm: Model<TApi>, options: O
expect(response.stopReason).toBe("aborted");
}
async function testAbortThenNewMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testAbortThenNewMessage<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// First request: abort immediately before any response content arrives
const controller = new AbortController();
controller.abort();

View file

@ -1,7 +1,10 @@
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete } from "../src/stream.js";
import type { Api, AssistantMessage, Context, Model, OptionsForApi, UserMessage } from "../src/types.js";
import type { Api, AssistantMessage, Context, Model, StreamOptions, UserMessage } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -16,7 +19,7 @@ const oauthTokens = await Promise.all([
]);
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken, openaiCodexToken] = oauthTokens;
async function testEmptyMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testEmptyMessage<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// Test with completely empty content array
const emptyMessage: UserMessage = {
role: "user",
@ -41,7 +44,7 @@ async function testEmptyMessage<TApi extends Api>(llm: Model<TApi>, options: Opt
}
}
async function testEmptyStringMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testEmptyStringMessage<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// Test with empty string content
const context: Context = {
messages: [
@ -66,7 +69,7 @@ async function testEmptyStringMessage<TApi extends Api>(llm: Model<TApi>, option
}
}
async function testWhitespaceOnlyMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testWhitespaceOnlyMessage<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// Test with whitespace-only content
const context: Context = {
messages: [
@ -91,7 +94,7 @@ async function testWhitespaceOnlyMessage<TApi extends Api>(llm: Model<TApi>, opt
}
}
async function testEmptyAssistantMessage<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testEmptyAssistantMessage<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// Test with empty assistant message in conversation flow
// User -> Empty Assistant -> User
const emptyAssistant: AssistantMessage = {

View file

@ -4,7 +4,10 @@ import { Type } from "@sinclair/typebox";
import { describe, expect, it } from "vitest";
import type { Api, Context, Model, Tool, ToolResultMessage } from "../src/index.js";
import { complete, getModel } from "../src/index.js";
import type { OptionsForApi } from "../src/types.js";
import type { StreamOptions } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -26,7 +29,7 @@ const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken
* 2. Providers correctly pass images from tool results to the LLM
* 3. The LLM can see and describe images returned by tools
*/
async function handleToolWithImageResult<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleToolWithImageResult<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
// Check if the model supports images
if (!model.input.includes("image")) {
console.log(`Skipping tool image result test - model ${model.id} doesn't support images`);
@ -114,7 +117,10 @@ async function handleToolWithImageResult<TApi extends Api>(model: Model<TApi>, o
* 2. Providers correctly pass both text and images from tool results to the LLM
* 3. The LLM can see both the text and images in tool results
*/
async function handleToolWithTextAndImageResult<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleToolWithTextAndImageResult<TApi extends Api>(
model: Model<TApi>,
options?: StreamOptionsWithExtras,
) {
// Check if the model supports images
if (!model.input.includes("image")) {
console.log(`Skipping tool text+image result test - model ${model.id} doesn't support images`);

View file

@ -6,7 +6,10 @@ import { fileURLToPath } from "url";
import { afterAll, beforeAll, describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete, stream } from "../src/stream.js";
import type { Api, Context, ImageContent, Model, OptionsForApi, Tool, ToolResultMessage } from "../src/types.js";
import type { Api, Context, ImageContent, Model, StreamOptions, Tool, ToolResultMessage } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { StringEnum } from "../src/utils/typebox-helpers.js";
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
@ -42,7 +45,7 @@ const calculatorTool: Tool<typeof calculatorSchema> = {
parameters: calculatorSchema,
};
async function basicTextGeneration<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function basicTextGeneration<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
const context: Context = {
systemPrompt: "You are a helpful assistant. Be concise.",
messages: [{ role: "user", content: "Reply with exactly: 'Hello test successful'", timestamp: Date.now() }],
@ -71,7 +74,7 @@ async function basicTextGeneration<TApi extends Api>(model: Model<TApi>, options
);
}
async function handleToolCall<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleToolCall<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
const context: Context = {
systemPrompt: "You are a helpful assistant that uses tools when asked.",
messages: [
@ -149,7 +152,7 @@ async function handleToolCall<TApi extends Api>(model: Model<TApi>, options?: Op
}
}
async function handleStreaming<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleStreaming<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
let textStarted = false;
let textChunks = "";
let textCompleted = false;
@ -179,7 +182,7 @@ async function handleStreaming<TApi extends Api>(model: Model<TApi>, options?: O
expect(response.content.some((b) => b.type === "text")).toBeTruthy();
}
async function handleThinking<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleThinking<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
let thinkingStarted = false;
let thinkingChunks = "";
let thinkingCompleted = false;
@ -216,7 +219,7 @@ async function handleThinking<TApi extends Api>(model: Model<TApi>, options?: Op
expect(response.content.some((b) => b.type === "thinking")).toBeTruthy();
}
async function handleImage<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function handleImage<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
// Check if the model supports images
if (!model.input.includes("image")) {
console.log(`Skipping image test - model ${model.id} doesn't support images`);
@ -263,7 +266,7 @@ async function handleImage<TApi extends Api>(model: Model<TApi>, options?: Optio
}
}
async function multiTurn<TApi extends Api>(model: Model<TApi>, options?: OptionsForApi<TApi>) {
async function multiTurn<TApi extends Api>(model: Model<TApi>, options?: StreamOptionsWithExtras) {
const context: Context = {
systemPrompt: "You are a helpful assistant that can use tools to answer questions.",
messages: [

View file

@ -1,7 +1,10 @@
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { stream } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi } from "../src/types.js";
import type { Api, Context, Model, StreamOptions } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -16,7 +19,7 @@ const oauthTokens = await Promise.all([
]);
const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken, openaiCodexToken] = oauthTokens;
async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testTokensOnAbort<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const context: Context = {
messages: [
{

View file

@ -2,7 +2,10 @@ import { Type } from "@sinclair/typebox";
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi, Tool } from "../src/types.js";
import type { Api, Context, Model, StreamOptions, Tool } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -28,10 +31,7 @@ const calculateTool: Tool = {
parameters: calculateSchema,
};
async function testToolCallWithoutResult<TApi extends Api>(
model: Model<TApi>,
options: OptionsForApi<TApi> = {} as OptionsForApi<TApi>,
) {
async function testToolCallWithoutResult<TApi extends Api>(model: Model<TApi>, options: StreamOptionsWithExtras = {}) {
// Step 1: Create context with the calculate tool
const context: Context = {
systemPrompt: "You are a helpful assistant. Use the calculate tool when asked to perform calculations.",

View file

@ -15,7 +15,10 @@
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi, Usage } from "../src/types.js";
import type { Api, Context, Model, StreamOptions, Usage } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -45,7 +48,7 @@ Remember: Always be helpful and concise.`;
async function testTotalTokensWithCache<TApi extends Api>(
llm: Model<TApi>,
options: OptionsForApi<TApi> = {} as OptionsForApi<TApi>,
options: StreamOptionsWithExtras = {},
): Promise<{ first: Usage; second: Usage }> {
// First request - no cache
const context1: Context = {

View file

@ -2,7 +2,10 @@ import { Type } from "@sinclair/typebox";
import { describe, expect, it } from "vitest";
import { getModel } from "../src/models.js";
import { complete } from "../src/stream.js";
import type { Api, Context, Model, OptionsForApi, ToolResultMessage } from "../src/types.js";
import type { Api, Context, Model, StreamOptions, ToolResultMessage } from "../src/types.js";
type StreamOptionsWithExtras = StreamOptions & Record<string, unknown>;
import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js";
import { hasBedrockCredentials } from "./bedrock-utils.js";
import { resolveApiKey } from "./oauth.js";
@ -31,7 +34,7 @@ const [anthropicOAuthToken, githubCopilotToken, geminiCliToken, antigravityToken
* "The request body is not valid JSON: no low surrogate in string: line 1 column 197667"
*/
async function testEmojiInToolResults<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testEmojiInToolResults<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const toolCallId = llm.provider === "mistral" ? "testtool1" : "test_1";
// Simulate a tool that returns emoji
const context: Context = {
@ -118,7 +121,7 @@ async function testEmojiInToolResults<TApi extends Api>(llm: Model<TApi>, option
expect(response.content.length).toBeGreaterThan(0);
}
async function testRealWorldLinkedInData<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testRealWorldLinkedInData<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const toolCallId = llm.provider === "mistral" ? "linkedin1" : "linkedin_1";
const context: Context = {
systemPrompt: "You are a helpful assistant.",
@ -207,7 +210,7 @@ Unanswered Comments: 2
expect(response.content.some((b) => b.type === "text")).toBe(true);
}
async function testUnpairedHighSurrogate<TApi extends Api>(llm: Model<TApi>, options: OptionsForApi<TApi> = {}) {
async function testUnpairedHighSurrogate<TApi extends Api>(llm: Model<TApi>, options: StreamOptionsWithExtras = {}) {
const toolCallId = llm.provider === "mistral" ? "testtool2" : "test_2";
const context: Context = {
systemPrompt: "You are a helpful assistant.",