GitHub Copilot: auto-enable models, fix gpt-5 API, normalize tool call IDs

- Auto-enable all models after /login via POST /models/{model}/policy
- Use openai-responses API for gpt-5/o3/o4 models (not accessible via completions)
- Normalize tool call IDs when switching between github-copilot models with different APIs
  (fixes #198: openai-responses generates 450+ char IDs with special chars that break other models)
- Update README with streamlined GitHub Copilot docs
This commit is contained in:
Mario Zechner 2025-12-15 20:03:51 +01:00
parent 16c8861842
commit c5543f7586
7 changed files with 216 additions and 127 deletions

View file

@ -317,10 +317,13 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
if (m.tool_call !== true) continue;
if (m.status === "deprecated") continue;
// gpt-5 models require responses API, others use completions
const needsResponsesApi = modelId.startsWith("gpt-5");
const copilotModel: Model<any> = {
id: modelId,
name: m.name || modelId,
api: "openai-completions",
api: needsResponsesApi ? "openai-responses" : "openai-completions",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
reasoning: m.reasoning === true,
@ -334,11 +337,14 @@ async function loadModelsDevData(): Promise<Model<any>[]> {
contextWindow: m.limit?.context || 128000,
maxTokens: m.limit?.output || 8192,
headers: { ...COPILOT_STATIC_HEADERS },
compat: {
supportsStore: false,
supportsDeveloperRole: false,
supportsReasoningEffort: false,
},
// compat only applies to openai-completions
...(needsResponsesApi ? {} : {
compat: {
supportsStore: false,
supportsDeveloperRole: false,
supportsReasoningEffort: false,
},
}),
};
models.push(copilotModel);

View file

@ -2496,7 +2496,7 @@ export const MODELS = {
"gpt-5.1-codex": {
id: "gpt-5.1-codex",
name: "GPT-5.1-Codex",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2505,7 +2505,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2516,7 +2515,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"claude-haiku-4.5": {
id: "claude-haiku-4.5",
name: "Claude Haiku 4.5",
@ -2592,7 +2591,7 @@ export const MODELS = {
"gpt-5.1-codex-mini": {
id: "gpt-5.1-codex-mini",
name: "GPT-5.1-Codex-mini",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2601,7 +2600,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2612,11 +2610,11 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 100000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"gpt-5.1": {
id: "gpt-5.1",
name: "GPT-5.1",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2625,7 +2623,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2636,11 +2633,11 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"gpt-5-codex": {
id: "gpt-5-codex",
name: "GPT-5-Codex",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2649,7 +2646,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2660,7 +2656,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"gpt-4o": {
id: "gpt-4o",
name: "GPT-4o",
@ -2712,7 +2708,7 @@ export const MODELS = {
"gpt-5-mini": {
id: "gpt-5-mini",
name: "GPT-5-mini",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2721,7 +2717,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2732,7 +2727,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"gemini-2.5-pro": {
id: "gemini-2.5-pro",
name: "Gemini 2.5 Pro",
@ -2760,7 +2755,7 @@ export const MODELS = {
"gpt-5.1-codex-max": {
id: "gpt-5.1-codex-max",
name: "GPT-5.1-Codex-max",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2769,7 +2764,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2780,7 +2774,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"claude-sonnet-4": {
id: "claude-sonnet-4",
name: "Claude Sonnet 4",
@ -2808,7 +2802,7 @@ export const MODELS = {
"gpt-5": {
id: "gpt-5",
name: "GPT-5",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2817,7 +2811,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2828,7 +2821,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"claude-opus-4.5": {
id: "claude-opus-4.5",
name: "Claude Opus 4.5",
@ -2856,7 +2849,7 @@ export const MODELS = {
"gpt-5.2": {
id: "gpt-5.2",
name: "GPT-5.2",
api: "openai-completions",
api: "openai-responses",
provider: "github-copilot",
baseUrl: "https://api.individual.githubcopilot.com",
headers: {
@ -2865,7 +2858,6 @@ export const MODELS = {
"Editor-Plugin-Version": "copilot-chat/0.35.0",
"Copilot-Integration-Id": "vscode-chat",
},
compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: false },
reasoning: true,
input: ["text", "image"],
cost: {
@ -2876,7 +2868,7 @@ export const MODELS = {
},
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
} satisfies Model<"openai-responses">,
"claude-sonnet-4.5": {
id: "claude-sonnet-4.5",
name: "Claude Sonnet 4.5",
@ -6031,9 +6023,9 @@ export const MODELS = {
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -6048,9 +6040,9 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -6082,23 +6074,6 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"mistralai/ministral-3b": {
id: "mistralai/ministral-3b",
name: "Mistral: Ministral 3B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.04,
output: 0.04,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/ministral-8b": {
id: "mistralai/ministral-8b",
name: "Mistral: Ministral 8B",
@ -6116,6 +6091,23 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/ministral-3b": {
id: "mistralai/ministral-3b",
name: "Mistral: Ministral 3B",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.04,
output: 0.04,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"nvidia/llama-3.1-nemotron-70b-instruct": {
id: "nvidia/llama-3.1-nemotron-70b-instruct",
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
@ -6303,23 +6295,6 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
@ -6337,6 +6312,23 @@ export const MODELS = {
contextWindow: 130815,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.39999999999999997,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -6473,6 +6465,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -6507,22 +6516,22 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
input: ["text"],
cost: {
input: 5,
output: 15,
input: 0.3,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-8b-instruct": {
id: "meta-llama/llama-3-8b-instruct",
@ -6541,23 +6550,6 @@ export const MODELS = {
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.3,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"mistralai/mixtral-8x22b-instruct": {
id: "mistralai/mixtral-8x22b-instruct",
name: "Mistral: Mixtral 8x22B Instruct",
@ -6762,23 +6754,6 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
@ -6796,6 +6771,23 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/auto": {
id: "openrouter/auto",
name: "OpenRouter: Auto Router",

View file

@ -1,10 +1,31 @@
import type { Api, AssistantMessage, Message, Model } from "../types.js";
import type { Api, AssistantMessage, Message, Model, ToolCall } from "../types.js";
/**
* Normalize tool call ID for GitHub Copilot cross-API compatibility.
* OpenAI Responses API generates IDs that are 450+ chars with special characters like `|`.
* Other APIs (Claude, etc.) require max 40 chars and only alphanumeric + underscore + hyphen.
*/
function normalizeCopilotToolCallId(id: string): string {
return id.replace(/[^a-zA-Z0-9_-]/g, "").slice(0, 40);
}
export function transformMessages<TApi extends Api>(messages: Message[], model: Model<TApi>): Message[] {
// Build a map of original tool call IDs to normalized IDs for github-copilot cross-API switches
const toolCallIdMap = new Map<string, string>();
return messages
.map((msg) => {
// User and toolResult messages pass through unchanged
if (msg.role === "user" || msg.role === "toolResult") {
// User messages pass through unchanged
if (msg.role === "user") {
return msg;
}
// Handle toolResult messages - normalize toolCallId if we have a mapping
if (msg.role === "toolResult") {
const normalizedId = toolCallIdMap.get(msg.toolCallId);
if (normalizedId && normalizedId !== msg.toolCallId) {
return { ...msg, toolCallId: normalizedId };
}
return msg;
}
@ -17,6 +38,12 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
return msg;
}
// Check if we need to normalize tool call IDs (github-copilot cross-API)
const needsToolCallIdNormalization =
assistantMsg.provider === "github-copilot" &&
model.provider === "github-copilot" &&
assistantMsg.api !== model.api;
// Transform message from different provider/model
const transformedContent = assistantMsg.content.map((block) => {
if (block.type === "thinking") {
@ -26,7 +53,16 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
text: `<thinking>\n${block.thinking}\n</thinking>`,
};
}
// All other blocks (text, toolCall) pass through unchanged
// Normalize tool call IDs for github-copilot cross-API switches
if (block.type === "toolCall" && needsToolCallIdNormalization) {
const toolCall = block as ToolCall;
const normalizedId = normalizeCopilotToolCallId(toolCall.id);
if (normalizedId !== toolCall.id) {
toolCallIdMap.set(toolCall.id, normalizedId);
return { ...toolCall, id: normalizedId };
}
}
// All other blocks pass through unchanged
return block;
});

View file

@ -132,18 +132,11 @@ pi
/login # Select "GitHub Copilot", authorize in browser
```
During login, you'll be prompted for an enterprise domain. Press Enter to use github.com, or enter your GitHub Enterprise Server domain (e.g., `github.mycompany.com`).
During login, you'll be prompted for an enterprise domain. Press Enter to use github.com, or enter your GitHub Enterprise Server domain (e.g., `github.mycompany.com`). All models are automatically enabled after login.
Some models require explicit enablement before use. If you get "The requested model is not supported" error:
If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable".
1. Open VS Code with GitHub Copilot Chat extension
2. Open the Copilot Chat panel and click the model selector
3. Select the model (marked with a warning icon)
4. Click "Enable" to accept the terms
For enterprise users, check with your organization's Copilot administrator for model availability and policies.
Note: Enabling some models (e.g., Grok from xAI) may involve sharing usage data with the provider. Review the terms before enabling.
For enterprise users, check with your organization's Copilot administrator for model availability.
Tokens stored in `~/.pi/agent/oauth.json` (mode 0600). Use `/logout` to clear.

View file

@ -1,3 +1,4 @@
import { getModels } from "@mariozechner/pi-ai";
import type { OAuthCredentials } from "./storage.js";
const CLIENT_ID = "Iv1.b507a08c87ecfe98";
@ -216,9 +217,58 @@ export async function refreshGitHubCopilotToken(
} satisfies OAuthCredentials;
}
/**
* Enable a model for the user's GitHub Copilot account.
* This is required for some models (like Claude, Grok) before they can be used.
*/
export async function enableGitHubCopilotModel(
token: string,
modelId: string,
enterpriseDomain?: string,
): Promise<boolean> {
const baseUrl = getGitHubCopilotBaseUrl(token, enterpriseDomain);
const url = `${baseUrl}/models/${modelId}/policy`;
try {
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${token}`,
...COPILOT_HEADERS,
"openai-intent": "chat-policy",
"x-interaction-type": "chat-policy",
},
body: JSON.stringify({ state: "enabled" }),
});
return response.ok;
} catch {
return false;
}
}
/**
* Enable all known GitHub Copilot models that may require policy acceptance.
* Called after successful login to ensure all models are available.
*/
export async function enableAllGitHubCopilotModels(
token: string,
enterpriseDomain?: string,
onProgress?: (model: string, success: boolean) => void,
): Promise<void> {
const models = getModels("github-copilot");
await Promise.all(
models.map(async (model) => {
const success = await enableGitHubCopilotModel(token, model.id, enterpriseDomain);
onProgress?.(model.id, success);
}),
);
}
export async function loginGitHubCopilot(options: {
onAuth: (url: string, instructions?: string) => void;
onPrompt: (prompt: { message: string; placeholder?: string; allowEmpty?: boolean }) => Promise<string>;
onProgress?: (message: string) => void;
}): Promise<OAuthCredentials> {
const input = await options.onPrompt({
message: "GitHub Enterprise URL/domain (blank for github.com)",
@ -242,5 +292,11 @@ export async function loginGitHubCopilot(options: {
device.interval,
device.expires_in,
);
return await refreshGitHubCopilotToken(githubAccessToken, enterpriseDomain ?? undefined);
const credentials = await refreshGitHubCopilotToken(githubAccessToken, enterpriseDomain ?? undefined);
// Enable all models after successful login
options.onProgress?.("Enabling models...");
await enableAllGitHubCopilotModels(credentials.access, enterpriseDomain ?? undefined);
return credentials;
}

View file

@ -55,6 +55,7 @@ export async function login(
provider: SupportedOAuthProvider,
onAuth: (info: OAuthAuthInfo) => void,
onPrompt: (prompt: OAuthPrompt) => Promise<string>,
onProgress?: (message: string) => void,
): Promise<void> {
switch (provider) {
case "anthropic":
@ -67,6 +68,7 @@ export async function login(
const creds = await loginGitHubCopilot({
onAuth: (url, instructions) => onAuth({ url, instructions }),
onPrompt,
onProgress,
});
saveOAuthCredentials("github-copilot", creds);
break;

View file

@ -1419,6 +1419,10 @@ export class InteractiveMode {
this.ui.requestRender();
});
},
(message) => {
this.chatContainer.addChild(new Text(theme.fg("dim", message), 1, 0));
this.ui.requestRender();
},
);
invalidateOAuthCache();