Add [Unreleased] section for next cycle

This commit is contained in:
Mario Zechner 2026-02-26 01:10:55 +01:00
parent a753e24135
commit 9a0a8d7ccb
7 changed files with 119 additions and 5 deletions

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
## [0.55.0] - 2026-02-24

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
### Added

View file

@ -7074,6 +7074,23 @@ export const MODELS = {
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"google/gemini-3.1-pro-preview-customtools": {
id: "google/gemini-3.1-pro-preview-customtools",
name: "Google: Gemini 3.1 Pro Preview Custom Tools",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2,
output: 12,
cacheRead: 0.19999999999999998,
cacheWrite: 0.375,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"google/gemma-3-27b-it": {
id: "google/gemma-3-27b-it",
name: "Google: Gemma 3 27B",
@ -7159,6 +7176,23 @@ export const MODELS = {
contextWindow: 256000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"meituan/longcat-flash-chat": {
id: "meituan/longcat-flash-chat",
name: "Meituan: LongCat Flash Chat",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.19999999999999998,
output: 0.7999999999999999,
cacheRead: 0.19999999999999998,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-8b-instruct": {
id: "meta-llama/llama-3-8b-instruct",
name: "Meta: Llama 3 8B Instruct",
@ -9522,6 +9556,57 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-122b-a10b": {
id: "qwen/qwen3.5-122b-a10b",
name: "Qwen: Qwen3.5-122B-A10B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.39999999999999997,
output: 3.1999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-27b": {
id: "qwen/qwen3.5-27b",
name: "Qwen: Qwen3.5-27B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.3,
output: 2.4,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-35b-a3b": {
id: "qwen/qwen3.5-35b-a3b",
name: "Qwen: Qwen3.5-35B-A3B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.25,
output: 2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-397b-a17b": {
id: "qwen/qwen3.5-397b-a17b",
name: "Qwen: Qwen3.5 397B A17B",
@ -9539,6 +9624,23 @@ export const MODELS = {
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-flash-02-23": {
id: "qwen/qwen3.5-flash-02-23",
name: "Qwen: Qwen3.5-Flash",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.09999999999999999,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1000000,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"qwen/qwen3.5-plus-02-15": {
id: "qwen/qwen3.5-plus-02-15",
name: "Qwen: Qwen3.5 Plus 2026-02-15",
@ -10027,7 +10129,7 @@ export const MODELS = {
input: 0.3,
output: 1.4,
cacheRead: 0.15,
cacheWrite: 0.6,
cacheWrite: 0,
},
contextWindow: 202752,
maxTokens: 4096,
@ -11464,7 +11566,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"openai/gpt-5-chat": {
id: "openai/gpt-5-chat",
name: "GPT-5 Chat",
name: "GPT 5 Chat",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
@ -11583,7 +11685,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.1-codex-mini": {
id: "openai/gpt-5.1-codex-mini",
name: "GPT-5.1 Codex mini",
name: "GPT 5.1 Codex Mini",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
@ -11651,7 +11753,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.2-chat": {
id: "openai/gpt-5.2-chat",
name: "GPT-5.2 Chat",
name: "GPT 5.2 Chat",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
@ -11668,7 +11770,7 @@ export const MODELS = {
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.2-codex": {
id: "openai/gpt-5.2-codex",
name: "GPT-5.2-Codex",
name: "GPT 5.2 Codex",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
### New Features

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
## [0.55.0] - 2026-02-24

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
### Fixed

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.55.1] - 2026-02-26
## [0.55.0] - 2026-02-24