Update AGENTS.md

This commit is contained in:
Mario Zechner 2026-03-04 18:06:19 +01:00
parent f710c2705d
commit 8e157412a6
2 changed files with 129 additions and 36 deletions

View file

@ -6583,6 +6583,23 @@ export const MODELS = {
contextWindow: 131000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"arcee-ai/trinity-mini": {
id: "arcee-ai/trinity-mini",
name: "Arcee AI: Trinity Mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.045,
output: 0.15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"arcee-ai/trinity-mini:free": {
id: "arcee-ai/trinity-mini:free",
name: "Arcee AI: Trinity Mini (free)",
@ -7093,6 +7110,23 @@ export const MODELS = {
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"google/gemini-3.1-flash-lite-preview": {
id: "google/gemini-3.1-flash-lite-preview",
name: "Google: Gemini 3.1 Flash Lite Preview",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.25,
output: 1.5,
cacheRead: 0.024999999999999998,
cacheWrite: 0.08333333333333334,
},
contextWindow: 1048576,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"google/gemini-3.1-pro-preview": {
id: "google/gemini-3.1-pro-preview",
name: "Google: Gemini 3.1 Pro Preview",
@ -7171,8 +7205,8 @@ export const MODELS = {
input: ["text"],
cost: {
input: 0.25,
output: 1,
cacheRead: 0,
output: 0.75,
cacheRead: 0.024999999999999998,
cacheWrite: 0,
},
contextWindow: 128000,
@ -7188,8 +7222,8 @@ export const MODELS = {
input: ["text"],
cost: {
input: 0.25,
output: 1,
cacheRead: 0,
output: 0.75,
cacheRead: 0.024999999999999998,
cacheWrite: 0,
},
contextWindow: 128000,
@ -8640,6 +8674,23 @@ export const MODELS = {
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5.3-chat": {
id: "openai/gpt-5.3-chat",
name: "OpenAI: GPT-5.3 Chat",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"openai/gpt-5.3-codex": {
id: "openai/gpt-5.3-codex",
name: "OpenAI: GPT-5.3-Codex",
@ -9006,9 +9057,9 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 1.5999999999999999,
output: 6.3999999999999995,
cacheRead: 0.32,
input: 1.04,
output: 4.16,
cacheRead: 0.20800000000000002,
cacheWrite: 0,
},
contextWindow: 32768,
@ -9040,8 +9091,8 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 1.2,
input: 0.26,
output: 0.78,
cacheRead: 0,
cacheWrite: 0,
},
@ -9057,8 +9108,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 1.2,
input: 0.26,
output: 0.78,
cacheRead: 0,
cacheWrite: 0,
},
@ -9074,9 +9125,9 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.049999999999999996,
output: 0.19999999999999998,
cacheRead: 0.01,
input: 0.0325,
output: 0.13,
cacheRead: 0.006500000000000001,
cacheWrite: 0,
},
contextWindow: 131072,
@ -9312,9 +9363,9 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.3,
output: 1.5,
cacheRead: 0.06,
input: 0.195,
output: 0.975,
cacheRead: 0.039,
cacheWrite: 0,
},
contextWindow: 1000000,
@ -9346,9 +9397,9 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 5,
cacheRead: 0.19999999999999998,
input: 0.65,
output: 3.25,
cacheRead: 0.13,
cacheWrite: 0,
},
contextWindow: 1000000,
@ -9414,8 +9465,8 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 1.2,
output: 6,
input: 0.78,
output: 3.9,
cacheRead: 0,
cacheWrite: 0,
},
@ -9601,9 +9652,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.3,
output: 2.4,
cacheRead: 0.3,
input: 0.26,
output: 2.08,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
@ -9618,9 +9669,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.27,
output: 2.16,
cacheRead: 0.27,
input: 0.195,
output: 1.56,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
@ -9635,9 +9686,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.22499999999999998,
output: 1.7999999999999998,
cacheRead: 0.22499999999999998,
input: 0.1625,
output: 1.3,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
@ -9652,9 +9703,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.55,
output: 3.5,
cacheRead: 0.55,
input: 0.39,
output: 2.34,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
@ -9686,8 +9737,8 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.39999999999999997,
output: 2.4,
input: 0.26,
output: 1.56,
cacheRead: 0,
cacheWrite: 0,
},
@ -10920,6 +10971,23 @@ export const MODELS = {
contextWindow: 1000000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"google/gemini-3.1-flash-lite-preview": {
id: "google/gemini-3.1-flash-lite-preview",
name: "Gemini 3.1 Flash Lite Preview",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.25,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 1000000,
maxTokens: 65000,
} satisfies Model<"anthropic-messages">,
"google/gemini-3.1-pro-preview": {
id: "google/gemini-3.1-pro-preview",
name: "Gemini 3.1 Pro Preview",
@ -11838,6 +11906,23 @@ export const MODELS = {
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.3-chat": {
id: "openai/gpt-5.3-chat",
name: "GPT-5.3 Chat",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"anthropic-messages">,
"openai/gpt-5.3-codex": {
id: "openai/gpt-5.3-codex",
name: "GPT 5.3 Codex",