fix: clean up Codex thinking level handling

- Remove per-thinking-level model variants (gpt-5.2-codex-high, etc.)
- Remove thinkingLevels from Model type
- Provider clamps reasoning effort internally
- Omit reasoning field when thinking is off

fixes #472
This commit is contained in:
Mario Zechner 2026-01-05 21:58:26 +01:00
parent 02b72b49d5
commit 0b9e3ada0c
11 changed files with 45 additions and 148 deletions

View file

@ -2781,7 +2781,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["medium","high"],
input: ["text", "image"],
cost: {
input: 0,
@ -2816,7 +2815,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["low","medium","high"],
input: ["text", "image"],
cost: {
input: 0,
@ -2834,7 +2832,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["medium","high"],
input: ["text", "image"],
cost: {
input: 0,
@ -2920,7 +2917,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["low","medium","high"],
input: ["text", "image"],
cost: {
input: 0,
@ -2938,7 +2934,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["low","medium","high","xhigh"],
input: ["text", "image"],
cost: {
input: 0,
@ -2956,7 +2951,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["medium","high"],
input: ["text", "image"],
cost: {
input: 0,
@ -2991,7 +2985,6 @@ export const MODELS = {
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
thinkingLevels: ["low","medium","high","xhigh"],
input: ["text", "image"],
cost: {
input: 0,
@ -4056,7 +4049,7 @@ export const MODELS = {
cacheWrite: 0,
},
contextWindow: 256000,
maxTokens: 32768,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
@ -6676,6 +6669,23 @@ export const MODELS = {
contextWindow: 163840,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"tngtech/tng-r1t-chimera:free": {
id: "tngtech/tng-r1t-chimera:free",
name: "TNG: R1T Chimera (free)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 163840,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"x-ai/grok-3": {
id: "x-ai/grok-3",
name: "xAI: Grok 3",