fix(ai): clean up openai-codex models and token limits

- Remove model aliases (gpt-5, gpt-5-mini, gpt-5-nano, codex-mini-latest, gpt-5-codex, gpt-5.1-codex, gpt-5.1-chat-latest)
- Fix context window from 400k to 272k tokens to match Codex CLI defaults
- Keep maxTokens at 128k (original value)
- Simplify reasoning effort clamping

closes #536
This commit is contained in:
Mario Zechner 2026-01-07 20:39:46 +01:00
parent d893ba7f20
commit 39fa25eb67
3 changed files with 11 additions and 8 deletions

View file

@ -447,8 +447,7 @@ async function generateModels() {
// Context window is based on observed server limits (400s above ~272k), not marketing numbers.
const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const CODEX_CONTEXT = 272000;
// Use the same max output token budget as Codex CLI.
const CODEX_MAX_TOKENS = 10000;
const CODEX_MAX_TOKENS = 128000;
const codexModels: Model<"openai-codex-responses">[] = [
{
id: "gpt-5.1",