fix: add accurate pricing for openai-codex OAuth models (#501)

Previously all openai-codex models had pricing set to 0, causing the
TUI to always show $0.00 for cost tracking.

Updated pricing based on OpenAI Standard tier rates:
- gpt-5.2/gpt-5.2-codex: $1.75/$14.00 per 1M tokens
- gpt-5.1/gpt-5.1-codex/gpt-5.1-codex-max: $1.25/$10.00 per 1M tokens
- gpt-5/gpt-5-codex: $1.25/$10.00 per 1M tokens
- codex-mini-latest: $1.50/$6.00 per 1M tokens
- gpt-5-mini/gpt-5.1-codex-mini/gpt-5-codex-mini: $0.25/$2.00 per 1M tokens
- gpt-5-nano: $0.05/$0.40 per 1M tokens

Source: https://platform.openai.com/docs/pricing
This commit is contained in:
Ben Vargas 2026-01-06 09:45:09 -07:00 committed by GitHub
parent a236e62025
commit e80a924292
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 54 additions and 105 deletions

View file

@ -2783,9 +2783,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.5,
output: 6,
cacheRead: 0.375,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2800,9 +2800,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2817,9 +2817,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2834,9 +2834,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 0.25,
output: 2,
cacheRead: 0.025,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2851,9 +2851,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 0.25,
output: 2,
cacheRead: 0.025,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2868,9 +2868,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 0.05,
output: 0.4,
cacheRead: 0.005,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2885,9 +2885,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2902,9 +2902,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2919,9 +2919,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2936,9 +2936,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.25,
output: 10,
cacheRead: 0.125,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2953,9 +2953,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 0.25,
output: 2,
cacheRead: 0.025,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2970,9 +2970,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
@ -2987,9 +2987,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 1.75,
output: 14,
cacheRead: 0.175,
cacheWrite: 0,
},
contextWindow: 400000,
@ -4204,57 +4204,6 @@ export const MODELS = {
contextWindow: 327680,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"microsoft/phi-3-medium-128k-instruct": {
id: "microsoft/phi-3-medium-128k-instruct",
name: "Microsoft: Phi-3 Medium 128K Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"microsoft/phi-3-mini-128k-instruct": {
id: "microsoft/phi-3-mini-128k-instruct",
name: "Microsoft: Phi-3 Mini 128K Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"microsoft/phi-3.5-mini-128k-instruct": {
id: "microsoft/phi-3.5-mini-128k-instruct",
name: "Microsoft: Phi-3.5 Mini 128K Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.09999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"minimax/minimax-m1": {
id: "minimax/minimax-m1",
name: "MiniMax: MiniMax M1",
@ -6952,11 +6901,11 @@ export const MODELS = {
cost: {
input: 0.3,
output: 0.8999999999999999,
cacheRead: 0.049999999999999996,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 24000,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"z-ai/glm-4.7": {
id: "z-ai/glm-4.7",