feat: implement thinking for some more copilot models (#234)

Signed-off-by: StarLight842 <mail@aadishv.dev>
This commit is contained in:
Aadish Verma 2025-12-18 19:42:23 -08:00 committed by GitHub
parent bab5cddd4d
commit 314ef34ebc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 49 additions and 16 deletions

View file

@ -318,7 +318,7 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
if (m.status === "deprecated") continue;
// gpt-5 models require responses API, others use completions
const needsResponsesApi = modelId.startsWith("gpt-5");
const needsResponsesApi = modelId.startsWith("gpt-5") || modelId.startsWith("oswe");
const copilotModel: Model<any> = {
id: modelId,
@ -561,4 +561,4 @@ export const MODELS = {
}
// Run the generator
generateModels().catch(console.error);
generateModels().catch(console.error);

View file

@ -2178,6 +2178,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"devstral-2512": {
id: "devstral-2512",
name: "Devstral 2",
api: "openai-completions",
provider: "mistral",
baseUrl: "https://api.mistral.ai/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"ministral-3b-latest": {
id: "ministral-3b-latest",
name: "Ministral 3B",
@ -2238,8 +2255,8 @@ export const MODELS = {
reasoning: false,
input: ["text", "image"],
cost: {
input: 0.1,
output: 0.3,
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
@ -2583,11 +2600,10 @@ export const MODELS = {
"oswe-vscode-prime": {
id: "oswe-vscode-prime",
name: "Raptor Mini (Preview)",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"},
compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false},
reasoning: true,
input: ["text", "image"],
cost: {
@ -2598,7 +2614,7 @@ export const MODELS = {
},
contextWindow: 200000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"gpt-5.1-codex-mini": {
id: "gpt-5.1-codex-mini",
name: "GPT-5.1-Codex-mini",
@ -2909,6 +2925,23 @@ export const MODELS = {
contextWindow: 256000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"nvidia/nemotron-3-nano-30b-a3b": {
id: "nvidia/nemotron-3-nano-30b-a3b",
name: "NVIDIA: Nemotron 3 Nano 30B A3B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.06,
output: 0.24,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-5.2-chat": {
id: "openai/gpt-5.2-chat",
name: "OpenAI: GPT-5.2 Chat",
@ -3190,13 +3223,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.26,
input: 0.24,
output: 0.38,
cacheRead: 0,
cacheRead: 0.19,
cacheWrite: 0,
},
contextWindow: 163840,
maxTokens: 65536,
maxTokens: 163840,
} satisfies Model<"openai-completions">,
"prime-intellect/intellect-3": {
id: "prime-intellect/intellect-3",
@ -5451,13 +5484,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.15,
output: 0.75,
cacheRead: 0,
input: 0.19999999999999998,
output: 0.88,
cacheRead: 0.106,
cacheWrite: 0,
},
contextWindow: 8192,
maxTokens: 7168,
contextWindow: 163840,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-small-3.1-24b-instruct:free": {
id: "mistralai/mistral-small-3.1-24b-instruct:free",

View file

@ -194,7 +194,7 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
// Some endpoints return reasoning in reasoning_content (llama.cpp),
// or reasoning (other openai compatible endpoints)
const reasoningFields = ["reasoning_content", "reasoning"];
const reasoningFields = ["reasoning_content", "reasoning", "reasoning_text"];
for (const field of reasoningFields) {
if (
(choice.delta as any)[field] !== null &&