mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-16 02:01:29 +00:00
Add GitHub Copilot support (#191)
- OAuth login for GitHub Copilot via /login command - Support for github.com and GitHub Enterprise - Models sourced from models.dev (Claude, GPT, Gemini, Grok, etc.) - Dynamic base URL from token's proxy-ep field - Use vscode-chat integration ID for API compatibility - Documentation for model enablement at github.com/settings/copilot/features Co-authored-by: cau1k <cau1k@users.noreply.github.com>
This commit is contained in:
parent
ce4ba70d33
commit
b66157c649
7 changed files with 664 additions and 726 deletions
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
### Added
|
||||
|
||||
- **GitHub Copilot provider**: Added `github-copilot` as a known provider with models sourced from models.dev. Includes Claude, GPT, Gemini, Grok, and other models available through GitHub Copilot. ([#191](https://github.com/badlogic/pi-mono/pull/191) by [@cau1k](https://github.com/cau1k))
|
||||
|
||||
- **Gemini 3 Pro thinking levels**: Thinking level configuration now works correctly for Gemini 3 Pro models. Previously all levels mapped to -1 (minimal thinking). Now LOW/MEDIUM/HIGH properly control test-time computation. ([#176](https://github.com/badlogic/pi-mono/pull/176) by [@markusylisiurunen](https://github.com/markusylisiurunen))
|
||||
|
||||
## [0.18.2] - 2025-12-11
|
||||
|
|
|
|||
|
|
@ -31,219 +31,11 @@ interface ModelsDevModel {
|
|||
|
||||
const COPILOT_STATIC_HEADERS = {
|
||||
"User-Agent": "GitHubCopilotChat/0.35.0",
|
||||
"Editor-Version": "vscode/1.105.1",
|
||||
"Editor-Version": "vscode/1.107.0",
|
||||
"Editor-Plugin-Version": "copilot-chat/0.35.0",
|
||||
"Copilot-Integration-Id": "copilot-developer-cli",
|
||||
"Openai-Intent": "conversation-edits",
|
||||
"X-Initiator": "agent",
|
||||
"Copilot-Integration-Id": "vscode-chat",
|
||||
} as const;
|
||||
|
||||
function getCopilotTokenFromEnv(): string | null {
|
||||
return process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN || null;
|
||||
}
|
||||
|
||||
function isCopilotModelDeprecated(model: Record<string, unknown>): boolean {
|
||||
const deprecated = model.deprecated;
|
||||
if (deprecated === true) return true;
|
||||
if (model.is_deprecated === true) return true;
|
||||
if (model.status === "deprecated") return true;
|
||||
if (model.lifecycle === "deprecated") return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Models to exclude from Copilot - dated snapshots, legacy models, and unsupported versions.
|
||||
* Users should use the main model ID (e.g., "gpt-4o") instead of dated versions.
|
||||
*/
|
||||
const COPILOT_EXCLUDED_MODELS = new Set([
|
||||
// Dated GPT-4o snapshots - use "gpt-4o" instead
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
// Legacy GPT-3.5 and GPT-4 models
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
]);
|
||||
|
||||
function isCopilotModelExcluded(modelId: string): boolean {
|
||||
return COPILOT_EXCLUDED_MODELS.has(modelId);
|
||||
}
|
||||
|
||||
function getCopilotApi(modelId: string, supportedEndpoints: string[] | null): Api {
|
||||
if (supportedEndpoints?.includes("/responses")) return "openai-responses";
|
||||
if (supportedEndpoints?.includes("/chat/completions")) return "openai-completions";
|
||||
|
||||
const id = modelId.toLowerCase();
|
||||
if (id.includes("codex") || id.startsWith("o1") || id.startsWith("o3")) {
|
||||
return "openai-responses";
|
||||
}
|
||||
return "openai-completions";
|
||||
}
|
||||
|
||||
async function fetchCopilotModels(githubToken: string): Promise<Model<any>[]> {
|
||||
try {
|
||||
console.log("Fetching models from GitHub Copilot API...");
|
||||
const response = await fetch("https://api.githubcopilot.com/models", {
|
||||
headers: {
|
||||
Accept: "application/json",
|
||||
Authorization: `Bearer ${githubToken}`,
|
||||
...COPILOT_STATIC_HEADERS,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
console.warn(`Failed to fetch GitHub Copilot models: ${response.status} ${text}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = (await response.json()) as unknown;
|
||||
const list =
|
||||
Array.isArray(data)
|
||||
? data
|
||||
: Array.isArray((data as any)?.data)
|
||||
? (data as any).data
|
||||
: Array.isArray((data as any)?.models)
|
||||
? (data as any).models
|
||||
: null;
|
||||
|
||||
if (!Array.isArray(list)) {
|
||||
console.warn("Failed to parse GitHub Copilot models response");
|
||||
return [];
|
||||
}
|
||||
|
||||
const models: Model<any>[] = [];
|
||||
|
||||
for (const item of list) {
|
||||
if (!item || typeof item !== "object") continue;
|
||||
const model = item as Record<string, unknown>;
|
||||
|
||||
const id = typeof model.id === "string" ? model.id : null;
|
||||
if (!id) continue;
|
||||
if (isCopilotModelDeprecated(model)) continue;
|
||||
if (isCopilotModelExcluded(id)) continue;
|
||||
|
||||
const caps = model.capabilities;
|
||||
if (!caps || typeof caps !== "object") continue;
|
||||
const supports = (caps as Record<string, unknown>).supports;
|
||||
if (!supports || typeof supports !== "object") continue;
|
||||
|
||||
const supportsToolCalls = (supports as Record<string, unknown>).tool_calls === true;
|
||||
if (!supportsToolCalls) continue;
|
||||
|
||||
const supportsVision = (supports as Record<string, unknown>).vision === true;
|
||||
const input: ("text" | "image")[] = supportsVision ? ["text", "image"] : ["text"];
|
||||
|
||||
const limits = (caps as Record<string, unknown>).limits;
|
||||
|
||||
// Copilot exposes both:
|
||||
// - max_context_window_tokens: the model's full context window capability
|
||||
// - max_prompt_tokens: the maximum prompt tokens Copilot will accept
|
||||
// For pi's purposes (compaction, prompt sizing), the prompt limit is the effective context window.
|
||||
const contextWindow =
|
||||
limits && typeof limits === "object" && typeof (limits as any).max_prompt_tokens === "number"
|
||||
? (limits as any).max_prompt_tokens
|
||||
: limits && typeof limits === "object" && typeof (limits as any).max_context_window_tokens === "number"
|
||||
? (limits as any).max_context_window_tokens
|
||||
: 128000;
|
||||
const maxTokens =
|
||||
limits && typeof limits === "object" && typeof (limits as any).max_output_tokens === "number"
|
||||
? (limits as any).max_output_tokens
|
||||
: 8192;
|
||||
|
||||
const supportedEndpoints = Array.isArray(model.supported_endpoints)
|
||||
? (model.supported_endpoints as unknown[]).filter((e): e is string => typeof e === "string")
|
||||
: null;
|
||||
|
||||
const api = getCopilotApi(id, supportedEndpoints);
|
||||
|
||||
const base: Model<any> = {
|
||||
id,
|
||||
name: id,
|
||||
api,
|
||||
provider: "github-copilot",
|
||||
baseUrl: "https://api.githubcopilot.com",
|
||||
reasoning: false,
|
||||
input,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow,
|
||||
maxTokens,
|
||||
headers: { ...COPILOT_STATIC_HEADERS },
|
||||
};
|
||||
|
||||
if (api === "openai-completions") {
|
||||
base.compat = {
|
||||
supportsStore: false,
|
||||
supportsDeveloperRole: false,
|
||||
supportsReasoningEffort: false,
|
||||
};
|
||||
}
|
||||
|
||||
if (supportedEndpoints && !supportedEndpoints.includes("/chat/completions") && !supportedEndpoints.includes("/responses")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
models.push(base);
|
||||
}
|
||||
|
||||
console.log(`Fetched ${models.length} tool-capable models from GitHub Copilot`);
|
||||
return models;
|
||||
} catch (error) {
|
||||
console.warn("Failed to fetch GitHub Copilot models:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
function getFallbackCopilotModels(): Model<any>[] {
|
||||
const fallback: Array<{ id: string; api: Api; input: ("text" | "image")[] }> = [
|
||||
{ id: "claude-opus-4.5", api: "openai-completions", input: ["text", "image"] },
|
||||
{ id: "claude-sonnet-4.5", api: "openai-completions", input: ["text", "image"] },
|
||||
{ id: "claude-haiku-4.5", api: "openai-completions", input: ["text", "image"] },
|
||||
{ id: "gemini-3-pro-preview", api: "openai-completions", input: ["text", "image"] },
|
||||
{ id: "grok-code-fast-1", api: "openai-completions", input: ["text"] },
|
||||
{ id: "gpt-5.2", api: "openai-responses", input: ["text", "image"] },
|
||||
{ id: "gpt-5.1-codex-max", api: "openai-responses", input: ["text", "image"] },
|
||||
];
|
||||
|
||||
return fallback.map(({ id, api, input }) => {
|
||||
const model: Model<any> = {
|
||||
id,
|
||||
name: id,
|
||||
api,
|
||||
provider: "github-copilot",
|
||||
baseUrl: "https://api.githubcopilot.com",
|
||||
reasoning: false,
|
||||
input,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 8192,
|
||||
headers: { ...COPILOT_STATIC_HEADERS },
|
||||
};
|
||||
|
||||
if (api === "openai-completions") {
|
||||
model.compat = {
|
||||
supportsStore: false,
|
||||
supportsDeveloperRole: false,
|
||||
supportsReasoningEffort: false,
|
||||
};
|
||||
}
|
||||
|
||||
return model;
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchOpenRouterModels(): Promise<Model<any>[]> {
|
||||
try {
|
||||
console.log("Fetching models from OpenRouter API...");
|
||||
|
|
@ -518,6 +310,40 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
|
|||
}
|
||||
}
|
||||
|
||||
// Process GitHub Copilot models
|
||||
if (data["github-copilot"]?.models) {
|
||||
for (const [modelId, model] of Object.entries(data["github-copilot"].models)) {
|
||||
const m = model as ModelsDevModel & { status?: string };
|
||||
if (m.tool_call !== true) continue;
|
||||
if (m.status === "deprecated") continue;
|
||||
|
||||
const copilotModel: Model<any> = {
|
||||
id: modelId,
|
||||
name: m.name || modelId,
|
||||
api: "openai-completions",
|
||||
provider: "github-copilot",
|
||||
baseUrl: "https://api.individual.githubcopilot.com",
|
||||
reasoning: m.reasoning === true,
|
||||
input: m.modalities?.input?.includes("image") ? ["text", "image"] : ["text"],
|
||||
cost: {
|
||||
input: m.cost?.input || 0,
|
||||
output: m.cost?.output || 0,
|
||||
cacheRead: m.cost?.cache_read || 0,
|
||||
cacheWrite: m.cost?.cache_write || 0,
|
||||
},
|
||||
contextWindow: m.limit?.context || 128000,
|
||||
maxTokens: m.limit?.output || 8192,
|
||||
headers: { ...COPILOT_STATIC_HEADERS },
|
||||
compat: {
|
||||
supportsStore: false,
|
||||
supportsDeveloperRole: false,
|
||||
supportsReasoningEffort: false,
|
||||
},
|
||||
};
|
||||
|
||||
models.push(copilotModel);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Loaded ${models.length} tool-capable models from models.dev`);
|
||||
return models;
|
||||
|
|
@ -537,20 +363,6 @@ async function generateModels() {
|
|||
// Combine models (models.dev has priority)
|
||||
const allModels = [...modelsDevModels, ...openRouterModels];
|
||||
|
||||
const copilotToken = getCopilotTokenFromEnv();
|
||||
let copilotModels: Model<any>[] = [];
|
||||
if (copilotToken) {
|
||||
copilotModels = await fetchCopilotModels(copilotToken);
|
||||
if (copilotModels.length === 0) {
|
||||
console.warn("GitHub Copilot model fetch returned no models. Using fallback list.");
|
||||
copilotModels = getFallbackCopilotModels();
|
||||
}
|
||||
} else {
|
||||
console.warn("No Copilot token found (set COPILOT_GITHUB_TOKEN, GH_TOKEN, or GITHUB_TOKEN). Using fallback list.");
|
||||
copilotModels = getFallbackCopilotModels();
|
||||
}
|
||||
allModels.push(...copilotModels);
|
||||
|
||||
// Fix incorrect cache pricing for Claude Opus 4.5 from models.dev
|
||||
// models.dev has 3x the correct pricing (1.5/18.75 instead of 0.5/6.25)
|
||||
const opus45 = allModels.find(m => m.provider === "anthropic" && m.id === "claude-opus-4-5");
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- **GitHub Copilot support**: Use GitHub Copilot models via OAuth login (`/login` -> "GitHub Copilot"). Supports both github.com and GitHub Enterprise. Models are sourced from models.dev and include Claude, GPT, Gemini, Grok, and more. Some models require enablement at https://github.com/settings/copilot/features before use. ([#191](https://github.com/badlogic/pi-mono/pull/191) by [@cau1k](https://github.com/cau1k))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Model selector fuzzy search now matches against provider name (not just model ID) and supports space-separated tokens where all tokens must match
|
||||
|
|
|
|||
|
|
@ -125,6 +125,23 @@ pi
|
|||
/login # Select "Anthropic (Claude Pro/Max)", authorize in browser
|
||||
```
|
||||
|
||||
**GitHub Copilot:**
|
||||
|
||||
```bash
|
||||
pi
|
||||
/login # Select "GitHub Copilot", authorize in browser
|
||||
```
|
||||
|
||||
During login, you'll be prompted for an enterprise domain. Press Enter to use github.com, or enter your GitHub Enterprise Server domain (e.g., `github.mycompany.com`).
|
||||
|
||||
Some models require explicit enablement before use. If you get "The requested model is not supported" error, enable the model at:
|
||||
|
||||
**https://github.com/settings/copilot/features**
|
||||
|
||||
For enterprise users, check with your organization's Copilot administrator for model availability and policies.
|
||||
|
||||
Note: Enabling some models (e.g., Grok from xAI) may involve sharing usage data with the provider. Review the terms before enabling.
|
||||
|
||||
Tokens stored in `~/.pi/agent/oauth.json` (mode 0600). Use `/logout` to clear.
|
||||
|
||||
### Quick Start
|
||||
|
|
|
|||
|
|
@ -242,18 +242,15 @@ export function loadAndMergeModels(): { models: Model<Api>[]; error: string | nu
|
|||
|
||||
const combined = [...builtInModels, ...customModels];
|
||||
|
||||
// Update github-copilot base URL based on OAuth token or enterprise domain
|
||||
const copilotCreds = loadOAuthCredentials("github-copilot");
|
||||
if (copilotCreds?.enterpriseUrl) {
|
||||
const domain = normalizeDomain(copilotCreds.enterpriseUrl);
|
||||
if (domain) {
|
||||
const baseUrl = getGitHubCopilotBaseUrl(domain);
|
||||
return {
|
||||
models: combined.map((m) =>
|
||||
m.provider === "github-copilot" && m.baseUrl === "https://api.githubcopilot.com" ? { ...m, baseUrl } : m,
|
||||
),
|
||||
error: null,
|
||||
};
|
||||
}
|
||||
if (copilotCreds) {
|
||||
const domain = copilotCreds.enterpriseUrl ? normalizeDomain(copilotCreds.enterpriseUrl) : undefined;
|
||||
const baseUrl = getGitHubCopilotBaseUrl(copilotCreds.access, domain ?? undefined);
|
||||
return {
|
||||
models: combined.map((m) => (m.provider === "github-copilot" ? { ...m, baseUrl } : m)),
|
||||
error: null,
|
||||
};
|
||||
}
|
||||
|
||||
return { models: combined, error: null };
|
||||
|
|
@ -288,23 +285,31 @@ export async function getApiKeyForModel(model: Model<Api>): Promise<string | und
|
|||
}
|
||||
|
||||
if (model.provider === "github-copilot") {
|
||||
// 1. Check OAuth storage (from device flow login)
|
||||
const oauthToken = await getOAuthToken("github-copilot");
|
||||
if (oauthToken) {
|
||||
return oauthToken;
|
||||
}
|
||||
|
||||
// 2. Use GitHub token directly (works with copilot scope on github.com)
|
||||
const githubToken = process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN;
|
||||
if (!githubToken) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// 3. For enterprise, exchange token for short-lived Copilot token
|
||||
const enterpriseDomain = process.env.COPILOT_ENTERPRISE_URL
|
||||
? normalizeDomain(process.env.COPILOT_ENTERPRISE_URL)
|
||||
: undefined;
|
||||
|
||||
const creds = await refreshGitHubCopilotToken(githubToken, enterpriseDomain || undefined);
|
||||
saveOAuthCredentials("github-copilot", creds);
|
||||
return creds.access;
|
||||
if (enterpriseDomain) {
|
||||
const creds = await refreshGitHubCopilotToken(githubToken, enterpriseDomain);
|
||||
saveOAuthCredentials("github-copilot", creds);
|
||||
return creds.access;
|
||||
}
|
||||
|
||||
// 4. For github.com, use token directly
|
||||
return githubToken;
|
||||
}
|
||||
|
||||
// For built-in providers, use getApiKey from @mariozechner/pi-ai
|
||||
|
|
|
|||
|
|
@ -4,11 +4,9 @@ const CLIENT_ID = "Iv1.b507a08c87ecfe98";
|
|||
|
||||
const COPILOT_HEADERS = {
|
||||
"User-Agent": "GitHubCopilotChat/0.35.0",
|
||||
"Editor-Version": "vscode/1.105.1",
|
||||
"Editor-Version": "vscode/1.107.0",
|
||||
"Editor-Plugin-Version": "copilot-chat/0.35.0",
|
||||
"Copilot-Integration-Id": "copilot-developer-cli",
|
||||
"Openai-Intent": "conversation-edits",
|
||||
"X-Initiator": "agent",
|
||||
"Copilot-Integration-Id": "vscode-chat",
|
||||
} as const;
|
||||
|
||||
type DeviceCodeResponse = {
|
||||
|
|
@ -54,9 +52,29 @@ function getUrls(domain: string): {
|
|||
};
|
||||
}
|
||||
|
||||
export function getGitHubCopilotBaseUrl(enterpriseDomain?: string): string {
|
||||
if (!enterpriseDomain) return "https://api.githubcopilot.com";
|
||||
return `https://copilot-api.${enterpriseDomain}`;
|
||||
/**
|
||||
* Parse the proxy-ep from a Copilot token and convert to API base URL.
|
||||
* Token format: tid=...;exp=...;proxy-ep=proxy.individual.githubcopilot.com;...
|
||||
* Returns API URL like https://api.individual.githubcopilot.com
|
||||
*/
|
||||
export function getBaseUrlFromToken(token: string): string | null {
|
||||
const match = token.match(/proxy-ep=([^;]+)/);
|
||||
if (!match) return null;
|
||||
const proxyHost = match[1];
|
||||
// Convert proxy.xxx to api.xxx
|
||||
const apiHost = proxyHost.replace(/^proxy\./, "api.");
|
||||
return `https://${apiHost}`;
|
||||
}
|
||||
|
||||
export function getGitHubCopilotBaseUrl(token?: string, enterpriseDomain?: string): string {
|
||||
// If we have a token, extract the base URL from proxy-ep
|
||||
if (token) {
|
||||
const urlFromToken = getBaseUrlFromToken(token);
|
||||
if (urlFromToken) return urlFromToken;
|
||||
}
|
||||
// Fallback for enterprise or if token parsing fails
|
||||
if (enterpriseDomain) return `https://copilot-api.${enterpriseDomain}`;
|
||||
return "https://api.individual.githubcopilot.com";
|
||||
}
|
||||
|
||||
async function fetchJson(url: string, init: RequestInit): Promise<unknown> {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue