mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-20 20:01:06 +00:00
fix(ai): clean up openai-codex models and token limits
- Remove model aliases (gpt-5, gpt-5-mini, gpt-5-nano, codex-mini-latest, gpt-5-codex, gpt-5.1-codex, gpt-5.1-chat-latest) - Fix context window from 400k to 272k tokens to match Codex CLI defaults - Keep maxTokens at 128k (original value) - Simplify reasoning effort clamping closes #536
This commit is contained in:
parent
d893ba7f20
commit
39fa25eb67
3 changed files with 11 additions and 8 deletions
|
|
@ -2,9 +2,13 @@
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- Removed OpenAI Codex model aliases (`gpt-5`, `gpt-5-mini`, `gpt-5-nano`, `codex-mini-latest`, `gpt-5-codex`, `gpt-5.1-codex`, `gpt-5.1-chat-latest`). Use canonical model IDs: `gpt-5.1`, `gpt-5.1-codex-max`, `gpt-5.1-codex-mini`, `gpt-5.2`, `gpt-5.2-codex`. ([#536](https://github.com/badlogic/pi-mono/pull/536) by [@ghoulr](https://github.com/ghoulr))
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed OpenAI Codex OAuth model list (removed aliases), aligned context window/maxTokens with observed backend limits, and refined reasoning effort clamping.
|
- Fixed OpenAI Codex context window from 400,000 to 272,000 tokens to match Codex CLI defaults and prevent 400 errors. ([#536](https://github.com/badlogic/pi-mono/pull/536) by [@ghoulr](https://github.com/ghoulr))
|
||||||
|
|
||||||
## [0.37.8] - 2026-01-07
|
## [0.37.8] - 2026-01-07
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -447,8 +447,7 @@ async function generateModels() {
|
||||||
// Context window is based on observed server limits (400s above ~272k), not marketing numbers.
|
// Context window is based on observed server limits (400s above ~272k), not marketing numbers.
|
||||||
const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||||
const CODEX_CONTEXT = 272000;
|
const CODEX_CONTEXT = 272000;
|
||||||
// Use the same max output token budget as Codex CLI.
|
const CODEX_MAX_TOKENS = 128000;
|
||||||
const CODEX_MAX_TOKENS = 10000;
|
|
||||||
const codexModels: Model<"openai-codex-responses">[] = [
|
const codexModels: Model<"openai-codex-responses">[] = [
|
||||||
{
|
{
|
||||||
id: "gpt-5.1",
|
id: "gpt-5.1",
|
||||||
|
|
|
||||||
|
|
@ -2806,7 +2806,7 @@ export const MODELS = {
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 272000,
|
contextWindow: 272000,
|
||||||
maxTokens: 10000,
|
maxTokens: 128000,
|
||||||
} satisfies Model<"openai-codex-responses">,
|
} satisfies Model<"openai-codex-responses">,
|
||||||
"gpt-5.1-codex-max": {
|
"gpt-5.1-codex-max": {
|
||||||
id: "gpt-5.1-codex-max",
|
id: "gpt-5.1-codex-max",
|
||||||
|
|
@ -2823,7 +2823,7 @@ export const MODELS = {
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 272000,
|
contextWindow: 272000,
|
||||||
maxTokens: 10000,
|
maxTokens: 128000,
|
||||||
} satisfies Model<"openai-codex-responses">,
|
} satisfies Model<"openai-codex-responses">,
|
||||||
"gpt-5.1-codex-mini": {
|
"gpt-5.1-codex-mini": {
|
||||||
id: "gpt-5.1-codex-mini",
|
id: "gpt-5.1-codex-mini",
|
||||||
|
|
@ -2840,7 +2840,7 @@ export const MODELS = {
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 272000,
|
contextWindow: 272000,
|
||||||
maxTokens: 10000,
|
maxTokens: 128000,
|
||||||
} satisfies Model<"openai-codex-responses">,
|
} satisfies Model<"openai-codex-responses">,
|
||||||
"gpt-5.2": {
|
"gpt-5.2": {
|
||||||
id: "gpt-5.2",
|
id: "gpt-5.2",
|
||||||
|
|
@ -2857,7 +2857,7 @@ export const MODELS = {
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 272000,
|
contextWindow: 272000,
|
||||||
maxTokens: 10000,
|
maxTokens: 128000,
|
||||||
} satisfies Model<"openai-codex-responses">,
|
} satisfies Model<"openai-codex-responses">,
|
||||||
"gpt-5.2-codex": {
|
"gpt-5.2-codex": {
|
||||||
id: "gpt-5.2-codex",
|
id: "gpt-5.2-codex",
|
||||||
|
|
@ -2874,7 +2874,7 @@ export const MODELS = {
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 272000,
|
contextWindow: 272000,
|
||||||
maxTokens: 10000,
|
maxTokens: 128000,
|
||||||
} satisfies Model<"openai-codex-responses">,
|
} satisfies Model<"openai-codex-responses">,
|
||||||
},
|
},
|
||||||
"openrouter": {
|
"openrouter": {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue