Add [Unreleased] section for next cycle

This commit is contained in:
Mario Zechner 2026-01-30 03:27:09 +01:00
parent 3f5fe42d37
commit 2cee7e17de
7 changed files with 79 additions and 67 deletions

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.50.4] - 2026-01-30
### Added

View file

@ -3671,8 +3671,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
input: 0.4,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
@ -4926,6 +4926,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"glm-4.7-free": {
id: "glm-4.7-free",
name: "GLM-4.7 Free",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"gpt-5": {
id: "gpt-5",
name: "GPT-5",
@ -5124,7 +5141,24 @@ export const MODELS = {
cost: {
input: 0.6,
output: 3,
cacheRead: 0.1,
cacheRead: 0.08,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"kimi-k2.5-free": {
id: "kimi-k2.5-free",
name: "Kimi K2.5 Free",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
@ -5147,6 +5181,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"minimax-m2.1-free": {
id: "minimax-m2.1-free",
name: "MiniMax M2.1 Free",
api: "anthropic-messages",
provider: "opencode",
baseUrl: "https://opencode.ai/zen",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"qwen3-coder": {
id: "qwen3-coder",
name: "Qwen3 Coder",
@ -5914,23 +5965,6 @@ export const MODELS = {
contextWindow: 1048576,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"google/gemini-2.0-flash-exp:free": {
id: "google/gemini-2.0-flash-exp:free",
name: "Google: Gemini 2.0 Flash Experimental (free)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1048576,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"google/gemini-2.0-flash-lite-001": {
id: "google/gemini-2.0-flash-lite-001",
name: "Google: Gemini 2.0 Flash Lite",
@ -6926,13 +6960,13 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.5700000000000001,
output: 2.8499999999999996,
input: 0.5,
output: 2.8,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"nex-agi/deepseek-v3.1-nex-n1": {
id: "nex-agi/deepseek-v3.1-nex-n1",
@ -9263,7 +9297,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 256000,
maxTokens: 256000,
maxTokens: 65536,
} satisfies Model<"anthropic-messages">,
"anthropic/claude-3-haiku": {
id: "anthropic/claude-3-haiku",
@ -10175,9 +10209,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.2,
output: 1.2,
cacheRead: 0.6,
input: 0.6,
output: 3,
cacheRead: 0.09999999999999999,
cacheWrite: 0,
},
contextWindow: 256000,
@ -10296,7 +10330,7 @@ export const MODELS = {
cost: {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0.024999999999999998,
cacheRead: 0.03,
cacheWrite: 0,
},
contextWindow: 1047576,
@ -10500,7 +10534,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheRead: 0.13,
cacheWrite: 0,
},
contextWindow: 128000,
@ -10517,7 +10551,7 @@ export const MODELS = {
cost: {
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheRead: 0.13,
cacheWrite: 0,
},
contextWindow: 400000,
@ -10525,7 +10559,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.2": {
id: "openai/gpt-5.2",
name: "GPT-5.2",
name: "GPT 5.2",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
@ -10534,7 +10568,7 @@ export const MODELS = {
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheRead: 0.18,
cacheWrite: 0,
},
contextWindow: 400000,
@ -10795,40 +10829,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"stealth/sonoma-dusk-alpha": {
id: "stealth/sonoma-dusk-alpha",
name: "Sonoma Dusk Alpha",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.19999999999999998,
output: 0.5,
cacheRead: 0.049999999999999996,
cacheWrite: 0,
},
contextWindow: 2000000,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"stealth/sonoma-sky-alpha": {
id: "stealth/sonoma-sky-alpha",
name: "Sonoma Sky Alpha",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.19999999999999998,
output: 0.5,
cacheRead: 0.049999999999999996,
cacheWrite: 0,
},
contextWindow: 2000000,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"vercel/v0-1.0-md": {
id: "vercel/v0-1.0-md",
name: "v0-1.0-md",
@ -11116,7 +11116,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 65536,
maxTokens: 66000,
maxTokens: 16384,
} satisfies Model<"anthropic-messages">,
"zai/glm-4.6": {
id: "zai/glm-4.6",