diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 19bbc883..352208fd 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -4,6 +4,8 @@ ### Added +- **GitHub Copilot provider**: Added `github-copilot` as a known provider with models sourced from models.dev. Includes Claude, GPT, Gemini, Grok, and other models available through GitHub Copilot. ([#191](https://github.com/badlogic/pi-mono/pull/191) by [@cau1k](https://github.com/cau1k)) + - **Gemini 3 Pro thinking levels**: Thinking level configuration now works correctly for Gemini 3 Pro models. Previously all levels mapped to -1 (minimal thinking). Now LOW/MEDIUM/HIGH properly control test-time computation. ([#176](https://github.com/badlogic/pi-mono/pull/176) by [@markusylisiurunen](https://github.com/markusylisiurunen)) ## [0.18.2] - 2025-12-11 diff --git a/packages/ai/scripts/generate-models.ts b/packages/ai/scripts/generate-models.ts index 76cd2d5c..4b8e508c 100644 --- a/packages/ai/scripts/generate-models.ts +++ b/packages/ai/scripts/generate-models.ts @@ -31,219 +31,11 @@ interface ModelsDevModel { const COPILOT_STATIC_HEADERS = { "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", + "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", + "Copilot-Integration-Id": "vscode-chat", } as const; -function getCopilotTokenFromEnv(): string | null { - return process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN || null; -} - -function isCopilotModelDeprecated(model: Record): boolean { - const deprecated = model.deprecated; - if (deprecated === true) return true; - if (model.is_deprecated === true) return true; - if (model.status === "deprecated") return true; - if (model.lifecycle === "deprecated") return true; - return false; -} - -/** - * Models to exclude from Copilot - dated snapshots, legacy models, and unsupported versions. - * Users should use the main model ID (e.g., "gpt-4o") instead of dated versions. - */ -const COPILOT_EXCLUDED_MODELS = new Set([ - // Dated GPT-4o snapshots - use "gpt-4o" instead - "gpt-4o-2024-05-13", - "gpt-4o-2024-08-06", - "gpt-4o-2024-11-20", - // Legacy GPT-3.5 and GPT-4 models - "gpt-3.5-turbo", - "gpt-3.5-turbo-0613", - "gpt-4", - "gpt-4-0613", -]); - -function isCopilotModelExcluded(modelId: string): boolean { - return COPILOT_EXCLUDED_MODELS.has(modelId); -} - -function getCopilotApi(modelId: string, supportedEndpoints: string[] | null): Api { - if (supportedEndpoints?.includes("/responses")) return "openai-responses"; - if (supportedEndpoints?.includes("/chat/completions")) return "openai-completions"; - - const id = modelId.toLowerCase(); - if (id.includes("codex") || id.startsWith("o1") || id.startsWith("o3")) { - return "openai-responses"; - } - return "openai-completions"; -} - -async function fetchCopilotModels(githubToken: string): Promise[]> { - try { - console.log("Fetching models from GitHub Copilot API..."); - const response = await fetch("https://api.githubcopilot.com/models", { - headers: { - Accept: "application/json", - Authorization: `Bearer ${githubToken}`, - ...COPILOT_STATIC_HEADERS, - }, - }); - - if (!response.ok) { - const text = await response.text(); - console.warn(`Failed to fetch GitHub Copilot models: ${response.status} ${text}`); - return []; - } - - const data = (await response.json()) as unknown; - const list = - Array.isArray(data) - ? data - : Array.isArray((data as any)?.data) - ? (data as any).data - : Array.isArray((data as any)?.models) - ? (data as any).models - : null; - - if (!Array.isArray(list)) { - console.warn("Failed to parse GitHub Copilot models response"); - return []; - } - - const models: Model[] = []; - - for (const item of list) { - if (!item || typeof item !== "object") continue; - const model = item as Record; - - const id = typeof model.id === "string" ? model.id : null; - if (!id) continue; - if (isCopilotModelDeprecated(model)) continue; - if (isCopilotModelExcluded(id)) continue; - - const caps = model.capabilities; - if (!caps || typeof caps !== "object") continue; - const supports = (caps as Record).supports; - if (!supports || typeof supports !== "object") continue; - - const supportsToolCalls = (supports as Record).tool_calls === true; - if (!supportsToolCalls) continue; - - const supportsVision = (supports as Record).vision === true; - const input: ("text" | "image")[] = supportsVision ? ["text", "image"] : ["text"]; - - const limits = (caps as Record).limits; - - // Copilot exposes both: - // - max_context_window_tokens: the model's full context window capability - // - max_prompt_tokens: the maximum prompt tokens Copilot will accept - // For pi's purposes (compaction, prompt sizing), the prompt limit is the effective context window. - const contextWindow = - limits && typeof limits === "object" && typeof (limits as any).max_prompt_tokens === "number" - ? (limits as any).max_prompt_tokens - : limits && typeof limits === "object" && typeof (limits as any).max_context_window_tokens === "number" - ? (limits as any).max_context_window_tokens - : 128000; - const maxTokens = - limits && typeof limits === "object" && typeof (limits as any).max_output_tokens === "number" - ? (limits as any).max_output_tokens - : 8192; - - const supportedEndpoints = Array.isArray(model.supported_endpoints) - ? (model.supported_endpoints as unknown[]).filter((e): e is string => typeof e === "string") - : null; - - const api = getCopilotApi(id, supportedEndpoints); - - const base: Model = { - id, - name: id, - api, - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - reasoning: false, - input, - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow, - maxTokens, - headers: { ...COPILOT_STATIC_HEADERS }, - }; - - if (api === "openai-completions") { - base.compat = { - supportsStore: false, - supportsDeveloperRole: false, - supportsReasoningEffort: false, - }; - } - - if (supportedEndpoints && !supportedEndpoints.includes("/chat/completions") && !supportedEndpoints.includes("/responses")) { - continue; - } - - models.push(base); - } - - console.log(`Fetched ${models.length} tool-capable models from GitHub Copilot`); - return models; - } catch (error) { - console.warn("Failed to fetch GitHub Copilot models:", error); - return []; - } -} - -function getFallbackCopilotModels(): Model[] { - const fallback: Array<{ id: string; api: Api; input: ("text" | "image")[] }> = [ - { id: "claude-opus-4.5", api: "openai-completions", input: ["text", "image"] }, - { id: "claude-sonnet-4.5", api: "openai-completions", input: ["text", "image"] }, - { id: "claude-haiku-4.5", api: "openai-completions", input: ["text", "image"] }, - { id: "gemini-3-pro-preview", api: "openai-completions", input: ["text", "image"] }, - { id: "grok-code-fast-1", api: "openai-completions", input: ["text"] }, - { id: "gpt-5.2", api: "openai-responses", input: ["text", "image"] }, - { id: "gpt-5.1-codex-max", api: "openai-responses", input: ["text", "image"] }, - ]; - - return fallback.map(({ id, api, input }) => { - const model: Model = { - id, - name: id, - api, - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - reasoning: false, - input, - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 8192, - headers: { ...COPILOT_STATIC_HEADERS }, - }; - - if (api === "openai-completions") { - model.compat = { - supportsStore: false, - supportsDeveloperRole: false, - supportsReasoningEffort: false, - }; - } - - return model; - }); -} - async function fetchOpenRouterModels(): Promise[]> { try { console.log("Fetching models from OpenRouter API..."); @@ -518,6 +310,40 @@ async function loadModelsDevData(): Promise[]> { } } + // Process GitHub Copilot models + if (data["github-copilot"]?.models) { + for (const [modelId, model] of Object.entries(data["github-copilot"].models)) { + const m = model as ModelsDevModel & { status?: string }; + if (m.tool_call !== true) continue; + if (m.status === "deprecated") continue; + + const copilotModel: Model = { + id: modelId, + name: m.name || modelId, + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + reasoning: m.reasoning === true, + input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"], + cost: { + input: m.cost?.input || 0, + output: m.cost?.output || 0, + cacheRead: m.cost?.cache_read || 0, + cacheWrite: m.cost?.cache_write || 0, + }, + contextWindow: m.limit?.context || 128000, + maxTokens: m.limit?.output || 8192, + headers: { ...COPILOT_STATIC_HEADERS }, + compat: { + supportsStore: false, + supportsDeveloperRole: false, + supportsReasoningEffort: false, + }, + }; + + models.push(copilotModel); + } + } console.log(`Loaded ${models.length} tool-capable models from models.dev`); return models; @@ -537,20 +363,6 @@ async function generateModels() { // Combine models (models.dev has priority) const allModels = [...modelsDevModels, ...openRouterModels]; - const copilotToken = getCopilotTokenFromEnv(); - let copilotModels: Model[] = []; - if (copilotToken) { - copilotModels = await fetchCopilotModels(copilotToken); - if (copilotModels.length === 0) { - console.warn("GitHub Copilot model fetch returned no models. Using fallback list."); - copilotModels = getFallbackCopilotModels(); - } - } else { - console.warn("No Copilot token found (set COPILOT_GITHUB_TOKEN, GH_TOKEN, or GITHUB_TOKEN). Using fallback list."); - copilotModels = getFallbackCopilotModels(); - } - allModels.push(...copilotModels); - // Fix incorrect cache pricing for Claude Opus 4.5 from models.dev // models.dev has 3x the correct pricing (1.5/18.75 instead of 0.5/6.25) const opus45 = allModels.find(m => m.provider === "anthropic" && m.id === "claude-opus-4-5"); diff --git a/packages/ai/src/models.generated.ts b/packages/ai/src/models.generated.ts index 84202af6..d285ace5 100644 --- a/packages/ai/src/models.generated.ts +++ b/packages/ai/src/models.generated.ts @@ -2468,7 +2468,458 @@ export const MODELS = { maxTokens: 16384, } satisfies Model<"openai-completions">, }, + "github-copilot": { + "grok-code-fast-1": { + id: "grok-code-fast-1", + name: "Grok Code Fast 1", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1-Codex", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "claude-haiku-4.5": { + id: "claude-haiku-4.5", + name: "Claude Haiku 4.5", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16000, + } satisfies Model<"openai-completions">, + "gemini-3-pro-preview": { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "oswe-vscode-prime": { + id: "oswe-vscode-prime", + name: "Raptor Mini (Preview)", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1-Codex-mini", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "gpt-5-codex": { + id: "gpt-5-codex", + name: "GPT-5-Codex", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "gpt-4o": { + id: "gpt-4o", + name: "GPT-4o", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "gpt-4.1": { + id: "gpt-4.1", + name: "GPT-4.1", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "gpt-5-mini": { + id: "gpt-5-mini", + name: "GPT-5-mini", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gemini-2.5-pro": { + id: "gemini-2.5-pro", + name: "Gemini 2.5 Pro", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1-Codex-max", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "claude-sonnet-4": { + id: "claude-sonnet-4", + name: "Claude Sonnet 4", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16000, + } satisfies Model<"openai-completions">, + "gpt-5": { + id: "gpt-5", + name: "GPT-5", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "claude-opus-4.5": { + id: "claude-opus-4.5", + name: "Claude Opus 4.5", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16000, + } satisfies Model<"openai-completions">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "claude-sonnet-4.5": { + id: "claude-sonnet-4.5", + name: "Claude Sonnet 4.5", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", + }, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16000, + } satisfies Model<"openai-completions">, + }, openrouter: { + "nvidia/nemotron-3-nano-30b-a3b:free": { + id: "nvidia/nemotron-3-nano-30b-a3b:free", + name: "NVIDIA: Nemotron 3 Nano 30B A3B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "openai/gpt-5.2-chat": { id: "openai/gpt-5.2-chat", name: "OpenAI: GPT-5.2 Chat", @@ -2767,7 +3218,7 @@ export const MODELS = { reasoning: true, input: ["text"], cost: { - input: 0.25, + input: 0.24, output: 0.38, cacheRead: 0.19, cacheWrite: 0, @@ -3073,13 +3524,13 @@ export const MODELS = { reasoning: true, input: ["text"], cost: { - input: 0.254, - output: 1.02, - cacheRead: 0.127, + input: 0.19999999999999998, + output: 1, + cacheRead: 0, cacheWrite: 0, }, - contextWindow: 262144, - maxTokens: 4096, + contextWindow: 196608, + maxTokens: 131072, } satisfies Model<"openai-completions">, "deepcogito/cogito-v2-preview-llama-405b": { id: "deepcogito/cogito-v2-preview-llama-405b", @@ -3294,9 +3745,9 @@ export const MODELS = { reasoning: true, input: ["text"], cost: { - input: 0.39999999999999997, - output: 1.75, - cacheRead: 0, + input: 0.38, + output: 1.69, + cacheRead: 0.06, cacheWrite: 0, }, contextWindow: 202752, @@ -3347,11 +3798,11 @@ export const MODELS = { cost: { input: 0.21, output: 0.32, - cacheRead: 0.16799999999999998, + cacheRead: 0, cacheWrite: 0, }, contextWindow: 163840, - maxTokens: 4096, + maxTokens: 65536, } satisfies Model<"openai-completions">, "google/gemini-2.5-flash-preview-09-2025": { id: "google/gemini-2.5-flash-preview-09-2025", @@ -5070,6 +5521,23 @@ export const MODELS = { contextWindow: 131072, maxTokens: 131072, } satisfies Model<"openai-completions">, + "google/gemma-3-27b-it:free": { + id: "google/gemma-3-27b-it:free", + name: "Google: Gemma 3 27B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "google/gemma-3-27b-it": { id: "google/gemma-3-27b-it", name: "Google: Gemma 3 27B", @@ -5563,9 +6031,9 @@ export const MODELS = { contextWindow: 32768, maxTokens: 4096, } satisfies Model<"openai-completions">, - "anthropic/claude-3.5-haiku-20241022": { - id: "anthropic/claude-3.5-haiku-20241022", - name: "Anthropic: Claude 3.5 Haiku (2024-10-22)", + "anthropic/claude-3.5-haiku": { + id: "anthropic/claude-3.5-haiku", + name: "Anthropic: Claude 3.5 Haiku", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", @@ -5580,9 +6048,9 @@ export const MODELS = { contextWindow: 200000, maxTokens: 8192, } satisfies Model<"openai-completions">, - "anthropic/claude-3.5-haiku": { - id: "anthropic/claude-3.5-haiku", - name: "Anthropic: Claude 3.5 Haiku", + "anthropic/claude-3.5-haiku-20241022": { + id: "anthropic/claude-3.5-haiku-20241022", + name: "Anthropic: Claude 3.5 Haiku (2024-10-22)", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", @@ -5614,23 +6082,6 @@ export const MODELS = { contextWindow: 200000, maxTokens: 8192, } satisfies Model<"openai-completions">, - "mistralai/ministral-8b": { - id: "mistralai/ministral-8b", - name: "Mistral: Ministral 8B", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 0.09999999999999999, - output: 0.09999999999999999, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 131072, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "mistralai/ministral-3b": { id: "mistralai/ministral-3b", name: "Mistral: Ministral 3B", @@ -5648,6 +6099,23 @@ export const MODELS = { contextWindow: 131072, maxTokens: 4096, } satisfies Model<"openai-completions">, + "mistralai/ministral-8b": { + id: "mistralai/ministral-8b", + name: "Mistral: Ministral 8B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "nvidia/llama-3.1-nemotron-70b-instruct": { id: "nvidia/llama-3.1-nemotron-70b-instruct", name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct", @@ -5835,23 +6303,6 @@ export const MODELS = { contextWindow: 131072, maxTokens: 16384, } satisfies Model<"openai-completions">, - "meta-llama/llama-3.1-405b-instruct": { - id: "meta-llama/llama-3.1-405b-instruct", - name: "Meta: Llama 3.1 405B Instruct", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 3.5, - output: 3.5, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 130815, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "meta-llama/llama-3.1-70b-instruct": { id: "meta-llama/llama-3.1-70b-instruct", name: "Meta: Llama 3.1 70B Instruct", @@ -5869,6 +6320,23 @@ export const MODELS = { contextWindow: 131072, maxTokens: 4096, } satisfies Model<"openai-completions">, + "meta-llama/llama-3.1-405b-instruct": { + id: "meta-llama/llama-3.1-405b-instruct", + name: "Meta: Llama 3.1 405B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3.5, + output: 3.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 130815, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "mistralai/mistral-nemo": { id: "mistralai/mistral-nemo", name: "Mistral: Mistral Nemo", @@ -6005,23 +6473,6 @@ export const MODELS = { contextWindow: 128000, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4o-2024-05-13": { - id: "openai/gpt-4o-2024-05-13", - name: "OpenAI: GPT-4o (2024-05-13)", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text", "image"], - cost: { - input: 5, - output: 15, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-4o": { id: "openai/gpt-4o", name: "OpenAI: GPT-4o", @@ -6056,22 +6507,22 @@ export const MODELS = { contextWindow: 128000, maxTokens: 64000, } satisfies Model<"openai-completions">, - "meta-llama/llama-3-70b-instruct": { - id: "meta-llama/llama-3-70b-instruct", - name: "Meta: Llama 3 70B Instruct", + "openai/gpt-4o-2024-05-13": { + id: "openai/gpt-4o-2024-05-13", + name: "OpenAI: GPT-4o (2024-05-13)", api: "openai-completions", provider: "openrouter", baseUrl: "https://openrouter.ai/api/v1", reasoning: false, - input: ["text"], + input: ["text", "image"], cost: { - input: 0.3, - output: 0.39999999999999997, + input: 5, + output: 15, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 8192, - maxTokens: 16384, + contextWindow: 128000, + maxTokens: 4096, } satisfies Model<"openai-completions">, "meta-llama/llama-3-8b-instruct": { id: "meta-llama/llama-3-8b-instruct", @@ -6090,6 +6541,23 @@ export const MODELS = { contextWindow: 8192, maxTokens: 16384, } satisfies Model<"openai-completions">, + "meta-llama/llama-3-70b-instruct": { + id: "meta-llama/llama-3-70b-instruct", + name: "Meta: Llama 3 70B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 16384, + } satisfies Model<"openai-completions">, "mistralai/mixtral-8x22b-instruct": { id: "mistralai/mixtral-8x22b-instruct", name: "Mistral: Mixtral 8x22B Instruct", @@ -6294,23 +6762,6 @@ export const MODELS = { contextWindow: 8191, maxTokens: 4096, } satisfies Model<"openai-completions">, - "openai/gpt-4": { - id: "openai/gpt-4", - name: "OpenAI: GPT-4", - api: "openai-completions", - provider: "openrouter", - baseUrl: "https://openrouter.ai/api/v1", - reasoning: false, - input: ["text"], - cost: { - input: 30, - output: 60, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 8191, - maxTokens: 4096, - } satisfies Model<"openai-completions">, "openai/gpt-3.5-turbo": { id: "openai/gpt-3.5-turbo", name: "OpenAI: GPT-3.5 Turbo", @@ -6328,6 +6779,23 @@ export const MODELS = { contextWindow: 16385, maxTokens: 4096, } satisfies Model<"openai-completions">, + "openai/gpt-4": { + id: "openai/gpt-4", + name: "OpenAI: GPT-4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8191, + maxTokens: 4096, + } satisfies Model<"openai-completions">, "openrouter/auto": { id: "openrouter/auto", name: "OpenRouter: Auto Router", @@ -6346,392 +6814,4 @@ export const MODELS = { maxTokens: 30000, } satisfies Model<"openai-completions">, }, - "github-copilot": { - "gpt-4o-mini-2024-07-18": { - id: "gpt-4o-mini-2024-07-18", - name: "gpt-4o-mini-2024-07-18", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, - "grok-code-fast-1": { - id: "grok-code-fast-1", - name: "grok-code-fast-1", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 64000, - } satisfies Model<"openai-completions">, - "gpt-5.1-codex": { - id: "gpt-5.1-codex", - name: "gpt-5.1-codex", - api: "openai-responses", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 128000, - } satisfies Model<"openai-responses">, - "gpt-5.1-codex-mini": { - id: "gpt-5.1-codex-mini", - name: "gpt-5.1-codex-mini", - api: "openai-responses", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 128000, - } satisfies Model<"openai-responses">, - "gpt-5.1-codex-max": { - id: "gpt-5.1-codex-max", - name: "gpt-5.1-codex-max", - api: "openai-responses", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 128000, - } satisfies Model<"openai-responses">, - "claude-sonnet-4.5": { - id: "claude-sonnet-4.5", - name: "claude-sonnet-4.5", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 16000, - } satisfies Model<"openai-completions">, - "claude-opus-4.5": { - id: "claude-opus-4.5", - name: "claude-opus-4.5", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 16000, - } satisfies Model<"openai-completions">, - "claude-haiku-4.5": { - id: "claude-haiku-4.5", - name: "claude-haiku-4.5", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 16000, - } satisfies Model<"openai-completions">, - "gemini-3-pro-preview": { - id: "gemini-3-pro-preview", - name: "gemini-3-pro-preview", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 64000, - } satisfies Model<"openai-completions">, - "gpt-4.1-2025-04-14": { - id: "gpt-4.1-2025-04-14", - name: "gpt-4.1-2025-04-14", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 16384, - } satisfies Model<"openai-completions">, - "gpt-5.2": { - id: "gpt-5.2", - name: "gpt-5.2", - api: "openai-responses", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 128000, - maxTokens: 64000, - } satisfies Model<"openai-responses">, - "gpt-4-o-preview": { - id: "gpt-4-o-preview", - name: "gpt-4-o-preview", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, - "gpt-4.1": { - id: "gpt-4.1", - name: "gpt-4.1", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 16384, - } satisfies Model<"openai-completions">, - "gpt-4o-mini": { - id: "gpt-4o-mini", - name: "gpt-4o-mini", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, - "gpt-4o": { - id: "gpt-4o", - name: "gpt-4o", - api: "openai-completions", - provider: "github-copilot", - baseUrl: "https://api.githubcopilot.com", - headers: { - "User-Agent": "GitHubCopilotChat/0.35.0", - "Editor-Version": "vscode/1.105.1", - "Editor-Plugin-Version": "copilot-chat/0.35.0", - "Copilot-Integration-Id": "copilot-developer-cli", - "Openai-Intent": "conversation-edits", - "X-Initiator": "agent", - }, - compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false }, - reasoning: false, - input: ["text", "image"], - cost: { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, - }, - contextWindow: 64000, - maxTokens: 4096, - } satisfies Model<"openai-completions">, - }, } as const; diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index bed2bad5..e96d6178 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -2,6 +2,10 @@ ## [Unreleased] +### Added + +- **GitHub Copilot support**: Use GitHub Copilot models via OAuth login (`/login` -> "GitHub Copilot"). Supports both github.com and GitHub Enterprise. Models are sourced from models.dev and include Claude, GPT, Gemini, Grok, and more. Some models require enablement at https://github.com/settings/copilot/features before use. ([#191](https://github.com/badlogic/pi-mono/pull/191) by [@cau1k](https://github.com/cau1k)) + ### Fixed - Model selector fuzzy search now matches against provider name (not just model ID) and supports space-separated tokens where all tokens must match diff --git a/packages/coding-agent/README.md b/packages/coding-agent/README.md index 9fd51d00..76921d1b 100644 --- a/packages/coding-agent/README.md +++ b/packages/coding-agent/README.md @@ -125,6 +125,23 @@ pi /login # Select "Anthropic (Claude Pro/Max)", authorize in browser ``` +**GitHub Copilot:** + +```bash +pi +/login # Select "GitHub Copilot", authorize in browser +``` + +During login, you'll be prompted for an enterprise domain. Press Enter to use github.com, or enter your GitHub Enterprise Server domain (e.g., `github.mycompany.com`). + +Some models require explicit enablement before use. If you get "The requested model is not supported" error, enable the model at: + +**https://github.com/settings/copilot/features** + +For enterprise users, check with your organization's Copilot administrator for model availability and policies. + +Note: Enabling some models (e.g., Grok from xAI) may involve sharing usage data with the provider. Review the terms before enabling. + Tokens stored in `~/.pi/agent/oauth.json` (mode 0600). Use `/logout` to clear. ### Quick Start diff --git a/packages/coding-agent/src/core/model-config.ts b/packages/coding-agent/src/core/model-config.ts index d971f2d4..35790586 100644 --- a/packages/coding-agent/src/core/model-config.ts +++ b/packages/coding-agent/src/core/model-config.ts @@ -242,18 +242,15 @@ export function loadAndMergeModels(): { models: Model[]; error: string | nu const combined = [...builtInModels, ...customModels]; + // Update github-copilot base URL based on OAuth token or enterprise domain const copilotCreds = loadOAuthCredentials("github-copilot"); - if (copilotCreds?.enterpriseUrl) { - const domain = normalizeDomain(copilotCreds.enterpriseUrl); - if (domain) { - const baseUrl = getGitHubCopilotBaseUrl(domain); - return { - models: combined.map((m) => - m.provider === "github-copilot" && m.baseUrl === "https://api.githubcopilot.com" ? { ...m, baseUrl } : m, - ), - error: null, - }; - } + if (copilotCreds) { + const domain = copilotCreds.enterpriseUrl ? normalizeDomain(copilotCreds.enterpriseUrl) : undefined; + const baseUrl = getGitHubCopilotBaseUrl(copilotCreds.access, domain ?? undefined); + return { + models: combined.map((m) => (m.provider === "github-copilot" ? { ...m, baseUrl } : m)), + error: null, + }; } return { models: combined, error: null }; @@ -288,23 +285,31 @@ export async function getApiKeyForModel(model: Model): Promise {