Update models/package-lock.json after adding deps

This commit is contained in:
Mario Zechner 2026-01-25 19:29:02 +01:00
parent 5555864c57
commit 676de103e1
2 changed files with 44 additions and 70 deletions

10
package-lock.json generated
View file

@ -6522,6 +6522,7 @@
"resolved": "https://registry.npmjs.org/lit/-/lit-3.3.2.tgz",
"integrity": "sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==",
"license": "BSD-3-Clause",
"peer": true,
"dependencies": {
"@lit/reactive-element": "^2.1.0",
"lit-element": "^4.2.0",
@ -8001,6 +8002,7 @@
"resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz",
"integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==",
"license": "MIT",
"peer": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/dcastil"
@ -8029,7 +8031,8 @@
"version": "4.1.18",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz",
"integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==",
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/tapable": {
"version": "2.3.0",
@ -8147,6 +8150,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@ -8243,6 +8247,7 @@
"integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "~0.27.0",
"get-tsconfig": "^4.7.5"
@ -8331,6 +8336,7 @@
"resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz",
"integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.27.0",
"fdir": "^6.5.0",
@ -8445,6 +8451,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@ -8825,6 +8832,7 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}

View file

@ -2083,7 +2083,7 @@ export const MODELS = {
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
contextWindow: 64000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"gpt-4o": {
@ -4449,40 +4449,6 @@ export const MODELS = {
} satisfies Model<"openai-codex-responses">,
},
"opencode": {
"alpha-gd4": {
id: "alpha-gd4",
name: "Alpha GD4",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text"],
cost: {
input: 0.5,
output: 2,
cacheRead: 0.15,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
"alpha-glm-4.7": {
id: "alpha-glm-4.7",
name: "Alpha GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.6,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"big-pickle": {
id: "big-pickle",
name: "Big Pickle",
@ -4653,6 +4619,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7": {
id: "glm-4.7",
name: "GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.1,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7-free": {
id: "glm-4.7-free",
name: "GLM-4.7",
@ -5636,7 +5619,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.024999999999999998,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 8192,
@ -5704,7 +5687,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.01,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5721,7 +5704,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.01,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5738,7 +5721,7 @@ export const MODELS = {
input: 0.3,
output: 2.5,
cacheRead: 0.03,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5771,7 +5754,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.31,
cacheRead: 0.125,
cacheWrite: 0.375,
},
contextWindow: 1048576,
@ -5788,7 +5771,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.31,
cacheRead: 0.125,
cacheWrite: 0.375,
},
contextWindow: 1048576,
@ -5806,7 +5789,7 @@ export const MODELS = {
input: 0.5,
output: 3,
cacheRead: 0.049999999999999996,
cacheWrite: 0,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -6093,12 +6076,12 @@ export const MODELS = {
input: ["text"],
cost: {
input: 0.27,
output: 1.12,
output: 1.1,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 196608,
maxTokens: 65536,
maxTokens: 196608,
} satisfies Model<"openai-completions">,
"mistralai/codestral-2508": {
id: "mistralai/codestral-2508",
@ -7851,23 +7834,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"qwen/qwen2.5-vl-72b-instruct": {
id: "qwen/qwen2.5-vl-72b-instruct",
name: "Qwen: Qwen2.5 VL 72B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.15,
output: 0.6,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"qwen/qwen3-14b": {
id: "qwen/qwen3-14b",
name: "Qwen: Qwen3 14B",
@ -8117,7 +8083,7 @@ export const MODELS = {
cost: {
input: 0.22,
output: 1.7999999999999998,
cacheRead: 0,
cacheRead: 0.022,
cacheWrite: 0,
},
contextWindow: 262144,
@ -8952,7 +8918,7 @@ export const MODELS = {
cost: {
input: 1,
output: 5,
cacheRead: 0,
cacheRead: 0.19999999999999998,
cacheWrite: 0,
},
contextWindow: 1000000,
@ -9305,15 +9271,15 @@ export const MODELS = {
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
input: ["text"],
cost: {
input: 0.3,
output: 2.5,
cacheRead: 0.03,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1000000,
maxTokens: 64000,
maxTokens: 65536,
} satisfies Model<"anthropic-messages">,
"google/gemini-2.5-flash-lite": {
id: "google/gemini-2.5-flash-lite",
@ -9373,11 +9339,11 @@ export const MODELS = {
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
input: ["text"],
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1048576,
@ -10788,7 +10754,7 @@ export const MODELS = {
cost: {
input: 0.19999999999999998,
output: 1.1,
cacheRead: 0,
cacheRead: 0.03,
cacheWrite: 0,
},
contextWindow: 128000,