mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-17 08:00:59 +00:00
fix: clean up Codex thinking level handling
- Remove per-thinking-level model variants (gpt-5.2-codex-high, etc.) - Remove thinkingLevels from Model type - Provider clamps reasoning effort internally - Omit reasoning field when thinking is off fixes #472
This commit is contained in:
parent
02b72b49d5
commit
0b9e3ada0c
11 changed files with 45 additions and 148 deletions
|
|
@ -1,50 +1,13 @@
|
|||
import { MODELS } from "./models.generated.js";
|
||||
import type { Api, KnownProvider, Model, ReasoningEffort, Usage } from "./types.js";
|
||||
import type { Api, KnownProvider, Model, Usage } from "./types.js";
|
||||
|
||||
const modelRegistry: Map<string, Map<string, Model<Api>>> = new Map();
|
||||
|
||||
const CODEX_THINKING_SUFFIXES = ["-none", "-minimal", "-low", "-medium", "-high", "-xhigh"];
|
||||
const CODEX_THINKING_LEVELS: Record<string, ReasoningEffort[]> = {
|
||||
"gpt-5.2-codex": ["low", "medium", "high", "xhigh"],
|
||||
"gpt-5.1-codex-max": ["low", "medium", "high", "xhigh"],
|
||||
"gpt-5.1-codex": ["low", "medium", "high"],
|
||||
"gpt-5.1-codex-mini": ["medium", "high"],
|
||||
"codex-mini-latest": ["medium", "high"],
|
||||
"gpt-5-codex-mini": ["medium", "high"],
|
||||
"gpt-5-codex": ["low", "medium", "high"],
|
||||
};
|
||||
|
||||
function isCodexThinkingVariant(modelId: string): boolean {
|
||||
const normalized = modelId.toLowerCase();
|
||||
return CODEX_THINKING_SUFFIXES.some((suffix) => normalized.endsWith(suffix));
|
||||
}
|
||||
|
||||
function normalizeCodexModelId(modelId: string): string {
|
||||
const normalized = modelId.toLowerCase();
|
||||
for (const suffix of CODEX_THINKING_SUFFIXES) {
|
||||
if (normalized.endsWith(suffix)) {
|
||||
return modelId.slice(0, modelId.length - suffix.length);
|
||||
}
|
||||
}
|
||||
return modelId;
|
||||
}
|
||||
|
||||
function applyCodexThinkingLevels<TApi extends Api>(model: Model<TApi>): Model<TApi> {
|
||||
if (model.provider !== "openai-codex") return model;
|
||||
const thinkingLevels = CODEX_THINKING_LEVELS[model.id];
|
||||
if (!thinkingLevels) return model;
|
||||
return { ...model, thinkingLevels };
|
||||
}
|
||||
|
||||
// Initialize registry from MODELS on module load
|
||||
for (const [provider, models] of Object.entries(MODELS)) {
|
||||
const providerModels = new Map<string, Model<Api>>();
|
||||
for (const [id, model] of Object.entries(models)) {
|
||||
const typedModel = model as Model<Api>;
|
||||
if (provider === "openai-codex" && isCodexThinkingVariant(typedModel.id)) {
|
||||
continue;
|
||||
}
|
||||
providerModels.set(id, applyCodexThinkingLevels(typedModel));
|
||||
providerModels.set(id, model as Model<Api>);
|
||||
}
|
||||
modelRegistry.set(provider, providerModels);
|
||||
}
|
||||
|
|
@ -59,16 +22,7 @@ export function getModel<TProvider extends KnownProvider, TModelId extends keyof
|
|||
modelId: TModelId,
|
||||
): Model<ModelApi<TProvider, TModelId>> {
|
||||
const providerModels = modelRegistry.get(provider);
|
||||
const direct = providerModels?.get(modelId as string);
|
||||
if (direct) return direct as Model<ModelApi<TProvider, TModelId>>;
|
||||
if (provider === "openai-codex") {
|
||||
const normalized = normalizeCodexModelId(modelId as string);
|
||||
const normalizedModel = providerModels?.get(normalized);
|
||||
if (normalizedModel) {
|
||||
return normalizedModel as Model<ModelApi<TProvider, TModelId>>;
|
||||
}
|
||||
}
|
||||
return direct as unknown as Model<ModelApi<TProvider, TModelId>>;
|
||||
return providerModels?.get(modelId as string) as Model<ModelApi<TProvider, TModelId>>;
|
||||
}
|
||||
|
||||
export function getProviders(): KnownProvider[] {
|
||||
|
|
@ -96,12 +50,9 @@ const XHIGH_MODELS = new Set(["gpt-5.1-codex-max", "gpt-5.2", "gpt-5.2-codex"]);
|
|||
|
||||
/**
|
||||
* Check if a model supports xhigh thinking level.
|
||||
* Currently only certain OpenAI models support this.
|
||||
* Currently only certain OpenAI Codex models support this.
|
||||
*/
|
||||
export function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean {
|
||||
if (model.thinkingLevels) {
|
||||
return model.thinkingLevels.includes("xhigh");
|
||||
}
|
||||
return XHIGH_MODELS.has(model.id);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue