Fix tsgo type issues: update tsgo, fix ReasoningEffort import, remove broken enum-test

This commit is contained in:
Mario Zechner 2025-12-08 22:59:13 +01:00
parent 0bc8d79216
commit 238c5d34e4
7 changed files with 289 additions and 835 deletions

834
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@
"devDependencies": {
"@biomejs/biome": "2.3.5",
"@types/node": "^22.10.5",
"@typescript/native-preview": "^7.0.0-dev.20251111.1",
"@typescript/native-preview": "^7.0.0-dev.20251208.1",
"concurrently": "^9.2.1",
"husky": "^9.1.7",
"tsx": "^4.20.3",

View file

@ -1,4 +1,4 @@
import type { ImageContent, Message, QueuedMessage, TextContent } from "@mariozechner/pi-ai";
import type { ImageContent, Message, QueuedMessage, ReasoningEffort, TextContent } from "@mariozechner/pi-ai";
import { getModel } from "@mariozechner/pi-ai";
import type { AgentTransport } from "./transports/types.js";
import type { AgentEvent, AgentState, AppMessage, Attachment, ThinkingLevel } from "./types.js";
@ -209,7 +209,7 @@ export class Agent {
this._state.streamMessage = null;
this._state.error = undefined;
const reasoning =
const reasoning: ReasoningEffort | undefined =
this._state.thinkingLevel === "off"
? undefined
: this._state.thinkingLevel === "minimal"

View file

@ -1,4 +1,4 @@
import type { AgentEvent, AgentTool, Message, Model, QueuedMessage } from "@mariozechner/pi-ai";
import type { AgentEvent, AgentTool, Message, Model, QueuedMessage, ReasoningEffort } from "@mariozechner/pi-ai";
/**
* The minimal configuration needed to run an agent turn.
@ -7,7 +7,7 @@ export interface AgentRunConfig {
systemPrompt: string;
tools: AgentTool<any>[];
model: Model<any>;
reasoning?: "low" | "medium" | "high" | "xhigh";
reasoning?: ReasoningEffort;
getQueuedMessages?: <T>() => Promise<QueuedMessage<T>[]>;
}

View file

@ -1991,6 +1991,23 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
},
openrouter: {
"relace/relace-search": {
id: "relace/relace-search",
name: "Relace: Relace Search",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 3,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 256000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5.1-codex-max": {
id: "openai/gpt-5.1-codex-max",
name: "OpenAI: GPT-5.1-Codex-Max",
@ -2153,13 +2170,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.27,
output: 0.39999999999999997,
cacheRead: 0.216,
input: 0.26,
output: 0.39,
cacheRead: 0.19999999999999998,
cacheWrite: 0,
},
contextWindow: 163840,
maxTokens: 4096,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"prime-intellect/intellect-3": {
id: "prime-intellect/intellect-3",
@ -2646,13 +2663,13 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.15,
output: 0.6,
input: 0.14,
output: 1,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 4096,
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"openai/gpt-5-pro": {
id: "openai/gpt-5-pro",
@ -3241,13 +3258,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
input: 0.19999999999999998,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
contextWindow: 163840,
maxTokens: 163840,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-audio-preview": {
id: "openai/gpt-4o-audio-preview",
@ -3485,7 +3502,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-oss-20b": {
id: "openai/gpt-oss-20b",
@ -3963,23 +3980,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/magistral-medium-2506:thinking": {
id: "mistralai/magistral-medium-2506:thinking",
name: "Mistral: Magistral Medium 2506 (thinking)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 2,
output: 5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 40960,
maxTokens: 40000,
} satisfies Model<"openai-completions">,
"google/gemini-2.5-pro-preview": {
id: "google/gemini-2.5-pro-preview",
name: "Google: Gemini 2.5 Pro Preview 06-05",
@ -4006,8 +4006,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.19999999999999998,
output: 4.5,
input: 0.39999999999999997,
output: 1.75,
cacheRead: 0,
cacheWrite: 0,
},
@ -4499,13 +4499,13 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.07,
output: 0.5,
input: 0.049999999999999996,
output: 0.22,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
contextWindow: 96000,
maxTokens: 96000,
} satisfies Model<"openai-completions">,
"qwen/qwq-32b": {
id: "qwen/qwq-32b",
@ -4983,9 +4983,9 @@ export const MODELS = {
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5000,9 +5000,9 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5034,23 +5034,6 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"mistralai/ministral-8b": {
id: "mistralai/ministral-8b",
name: "Mistral: Ministral 8B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/ministral-3b": {
id: "mistralai/ministral-3b",
name: "Mistral: Ministral 3B",
@ -5068,6 +5051,23 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/ministral-8b": {
id: "mistralai/ministral-8b",
name: "Mistral: Ministral 8B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"nvidia/llama-3.1-nemotron-70b-instruct": {
id: "nvidia/llama-3.1-nemotron-70b-instruct",
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
@ -5238,22 +5238,22 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.02,
output: 0.03,
input: 3.5,
output: 3.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
@ -5272,22 +5272,22 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 3.5,
output: 3.5,
input: 0.02,
output: 0.03,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 130815,
maxTokens: 4096,
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
@ -5306,9 +5306,9 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5323,9 +5323,9 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-mini-2024-07-18": {
id: "openai/gpt-4o-mini-2024-07-18",
name: "OpenAI: GPT-4o-mini (2024-07-18)",
"openai/gpt-4o-mini": {
id: "openai/gpt-4o-mini",
name: "OpenAI: GPT-4o-mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -5425,6 +5425,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -5459,23 +5476,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-8b-instruct": {
id: "meta-llama/llama-3-8b-instruct",
name: "Meta: Llama 3 8B Instruct",
@ -5595,23 +5595,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 10,
output: 30,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo-0613": {
id: "openai/gpt-3.5-turbo-0613",
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
@ -5629,6 +5612,23 @@ export const MODELS = {
contextWindow: 4095,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-turbo-preview": {
id: "openai/gpt-4-turbo-preview",
name: "OpenAI: GPT-4 Turbo Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 10,
output: 30,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-tiny": {
id: "mistralai/mistral-tiny",
name: "Mistral Tiny",
@ -5697,6 +5697,23 @@ export const MODELS = {
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4-0314": {
id: "openai/gpt-4-0314",
name: "OpenAI: GPT-4 (older v0314)",
@ -5731,23 +5748,6 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/auto": {
id: "openrouter/auto",
name: "OpenRouter: Auto Router",

View file

@ -11,6 +11,7 @@ import { calculateCost } from "../models.js";
import type {
AssistantMessage,
Context,
Message,
Model,
OpenAICompat,
StopReason,
@ -24,9 +25,7 @@ import type {
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transorm-messages.js";
import type { Message } from "../types.js";
/**
* Check if conversation messages contain tool calls or tool results.

View file

@ -1,17 +0,0 @@
import { Type } from "@sinclair/typebox";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { StringEnum } from "../src/utils/typebox-helpers.js";
// Zod version
const zodSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
});
// TypeBox with our StringEnum helper
const typeboxHelper = Type.Object({
operation: StringEnum(["add", "subtract", "multiply", "divide"]),
});
console.log("Zod:", JSON.stringify(zodToJsonSchema(zodSchema), null, 2));
console.log("\nTypeBox.StringEnum:", JSON.stringify(typeboxHelper, null, 2));