Update models/package-lock.json after adding deps

This commit is contained in:
Mario Zechner 2026-01-25 19:29:02 +01:00
parent 5555864c57
commit 676de103e1
2 changed files with 44 additions and 70 deletions

View file

@ -2083,7 +2083,7 @@ export const MODELS = {
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
contextWindow: 64000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"gpt-4o": {
@ -4449,40 +4449,6 @@ export const MODELS = {
} satisfies Model<"openai-codex-responses">,
},
"opencode": {
"alpha-gd4": {
id: "alpha-gd4",
name: "Alpha GD4",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text"],
cost: {
input: 0.5,
output: 2,
cacheRead: 0.15,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
"alpha-glm-4.7": {
id: "alpha-glm-4.7",
name: "Alpha GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.6,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"big-pickle": {
id: "big-pickle",
name: "Big Pickle",
@ -4653,6 +4619,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7": {
id: "glm-4.7",
name: "GLM-4.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.6,
output: 2.2,
cacheRead: 0.1,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7-free": {
id: "glm-4.7-free",
name: "GLM-4.7",
@ -5636,7 +5619,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.024999999999999998,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 8192,
@ -5704,7 +5687,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.01,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5721,7 +5704,7 @@ export const MODELS = {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.01,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5738,7 +5721,7 @@ export const MODELS = {
input: 0.3,
output: 2.5,
cacheRead: 0.03,
cacheWrite: 0.0833,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -5771,7 +5754,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.31,
cacheRead: 0.125,
cacheWrite: 0.375,
},
contextWindow: 1048576,
@ -5788,7 +5771,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.31,
cacheRead: 0.125,
cacheWrite: 0.375,
},
contextWindow: 1048576,
@ -5806,7 +5789,7 @@ export const MODELS = {
input: 0.5,
output: 3,
cacheRead: 0.049999999999999996,
cacheWrite: 0,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65535,
@ -6093,12 +6076,12 @@ export const MODELS = {
input: ["text"],
cost: {
input: 0.27,
output: 1.12,
output: 1.1,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 196608,
maxTokens: 65536,
maxTokens: 196608,
} satisfies Model<"openai-completions">,
"mistralai/codestral-2508": {
id: "mistralai/codestral-2508",
@ -7851,23 +7834,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"qwen/qwen2.5-vl-72b-instruct": {
id: "qwen/qwen2.5-vl-72b-instruct",
name: "Qwen: Qwen2.5 VL 72B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.15,
output: 0.6,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"qwen/qwen3-14b": {
id: "qwen/qwen3-14b",
name: "Qwen: Qwen3 14B",
@ -8117,7 +8083,7 @@ export const MODELS = {
cost: {
input: 0.22,
output: 1.7999999999999998,
cacheRead: 0,
cacheRead: 0.022,
cacheWrite: 0,
},
contextWindow: 262144,
@ -8952,7 +8918,7 @@ export const MODELS = {
cost: {
input: 1,
output: 5,
cacheRead: 0,
cacheRead: 0.19999999999999998,
cacheWrite: 0,
},
contextWindow: 1000000,
@ -9305,15 +9271,15 @@ export const MODELS = {
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
input: ["text"],
cost: {
input: 0.3,
output: 2.5,
cacheRead: 0.03,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1000000,
maxTokens: 64000,
maxTokens: 65536,
} satisfies Model<"anthropic-messages">,
"google/gemini-2.5-flash-lite": {
id: "google/gemini-2.5-flash-lite",
@ -9373,11 +9339,11 @@ export const MODELS = {
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
input: ["text"],
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1048576,
@ -10788,7 +10754,7 @@ export const MODELS = {
cost: {
input: 0.19999999999999998,
output: 1.1,
cacheRead: 0,
cacheRead: 0.03,
cacheWrite: 0,
},
contextWindow: 128000,