Add Google Gemini CLI and Antigravity OAuth providers

- Add google-gemini-cli provider: free Gemini 2.0/2.5 via Cloud Code Assist
- Add google-antigravity provider: free Gemini 3, Claude, GPT-OSS via sandbox
- Move OAuth infrastructure from coding-agent to ai package
- Fix thinking signature handling for cross-model handoff
- Fix OpenAI message ID length limit (max 64 chars)
- Add GitHub Copilot overflow pattern detection
- Add OAuth provider tests for context overflow and streaming
This commit is contained in:
Mario Zechner 2025-12-20 18:21:32 +01:00
parent 3266cac0f1
commit c359023c3f
25 changed files with 1392 additions and 413 deletions

View file

@ -1,5 +1,6 @@
/**
* Google Cloud Code Assist provider for Gemini CLI / Antigravity authentication.
* Google Gemini CLI / Antigravity provider.
* Shared implementation for both google-gemini-cli and google-antigravity providers.
* Uses the Cloud Code Assist API endpoint to access Gemini and Claude models.
*/
@ -20,7 +21,7 @@ import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { convertMessages, convertTools, mapStopReasonString, mapToolChoice } from "./google-shared.js";
export interface GoogleCloudCodeAssistOptions extends StreamOptions {
export interface GoogleGeminiCliOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
thinking?: {
enabled: boolean;
@ -29,11 +30,27 @@ export interface GoogleCloudCodeAssistOptions extends StreamOptions {
projectId?: string;
}
const ENDPOINT = "https://cloudcode-pa.googleapis.com";
const HEADERS = {
"User-Agent": "google-api-nodejs-client/9.15.1",
const DEFAULT_ENDPOINT = "https://cloudcode-pa.googleapis.com";
// Headers for Gemini CLI (prod endpoint)
const GEMINI_CLI_HEADERS = {
"User-Agent": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"X-Goog-Api-Client": "gl-node/22.17.0",
"Client-Metadata": "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI",
"Client-Metadata": JSON.stringify({
ideType: "IDE_UNSPECIFIED",
platform: "PLATFORM_UNSPECIFIED",
pluginType: "GEMINI",
}),
};
// Headers for Antigravity (sandbox endpoint) - requires specific User-Agent
const ANTIGRAVITY_HEADERS = {
"User-Agent": "antigravity/1.11.5 darwin/arm64",
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"Client-Metadata": JSON.stringify({
ideType: "IDE_UNSPECIFIED",
platform: "PLATFORM_UNSPECIFIED",
pluginType: "GEMINI",
}),
};
// Counter for generating unique tool call IDs
@ -92,10 +109,10 @@ interface CloudCodeAssistResponseChunk {
traceId?: string;
}
export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assist"> = (
model: Model<"google-cloud-code-assist">,
export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
model: Model<"google-gemini-cli">,
context: Context,
options?: GoogleCloudCodeAssistOptions,
options?: GoogleGeminiCliOptions,
): AssistantMessageEventStream => {
const stream = new AssistantMessageEventStream();
@ -103,7 +120,7 @@ export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assi
const output: AssistantMessage = {
role: "assistant",
content: [],
api: "google-cloud-code-assist" as Api,
api: "google-gemini-cli" as Api,
provider: model.provider,
model: model.id,
usage: {
@ -141,7 +158,12 @@ export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assi
}
const requestBody = buildRequest(model, context, projectId, options);
const url = `${ENDPOINT}/v1internal:streamGenerateContent?alt=sse`;
const endpoint = model.baseUrl || DEFAULT_ENDPOINT;
const url = `${endpoint}/v1internal:streamGenerateContent?alt=sse`;
// Use Antigravity headers for sandbox endpoint, otherwise Gemini CLI headers
const isAntigravity = endpoint.includes("sandbox.googleapis.com");
const headers = isAntigravity ? ANTIGRAVITY_HEADERS : GEMINI_CLI_HEADERS;
const response = await fetch(url, {
method: "POST",
@ -149,7 +171,7 @@ export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assi
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
Accept: "text/event-stream",
...HEADERS,
...headers,
},
body: JSON.stringify(requestBody),
signal: options?.signal,
@ -379,10 +401,10 @@ export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assi
};
function buildRequest(
model: Model<"google-cloud-code-assist">,
model: Model<"google-gemini-cli">,
context: Context,
projectId: string,
options: GoogleCloudCodeAssistOptions = {},
options: GoogleGeminiCliOptions = {},
): CloudCodeAssistRequest {
const contents = convertMessages(model, context);

View file

@ -7,7 +7,7 @@ import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transorm-messages.js";
type GoogleApiType = "google-generative-ai" | "google-cloud-code-assist";
type GoogleApiType = "google-generative-ai" | "google-gemini-cli";
/**
* Convert internal messages to Gemini Content[] format.
@ -48,14 +48,23 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
for (const block of msg.content) {
if (block.type === "text") {
// Skip empty text blocks - they can cause issues with some models (e.g. Claude via Antigravity)
if (!block.text || block.text.trim() === "") continue;
parts.push({ text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking") {
const thinkingPart: Part = {
thought: true,
thoughtSignature: block.thinkingSignature,
text: sanitizeSurrogates(block.thinking),
};
parts.push(thinkingPart);
// Thinking blocks require signatures for Claude via Antigravity.
// If signature is missing (e.g. from GPT-OSS), convert to regular text with delimiters.
if (block.thinkingSignature) {
parts.push({
thought: true,
text: sanitizeSurrogates(block.thinking),
thoughtSignature: block.thinkingSignature,
});
} else {
parts.push({
text: `<thinking>\n${sanitizeSurrogates(block.thinking)}\n</thinking>`,
});
}
} else if (block.type === "toolCall") {
const part: Part = {
functionCall: {
@ -112,10 +121,17 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
});
}
contents.push({
role: "user",
parts,
});
// Cloud Code Assist API requires all function responses to be in a single user turn.
// Check if the last content is already a user turn with function responses and merge.
const lastContent = contents[contents.length - 1];
if (lastContent?.role === "user" && lastContent.parts?.some((p) => p.functionResponse)) {
lastContent.parts.push(...parts);
} else {
contents.push({
role: "user",
parts,
});
}
}
}

View file

@ -30,6 +30,20 @@ import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transorm-messages.js";
/** Fast deterministic hash to shorten long strings */
function shortHash(str: string): string {
let h1 = 0xdeadbeef;
let h2 = 0x41c6ce57;
for (let i = 0; i < str.length; i++) {
const ch = str.charCodeAt(i);
h1 = Math.imul(h1 ^ ch, 2654435761);
h2 = Math.imul(h2 ^ ch, 1597334677);
}
h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909);
h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);
return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);
}
// OpenAI Responses-specific options
export interface OpenAIResponsesOptions extends StreamOptions {
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
@ -401,6 +415,7 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
});
}
let msgIndex = 0;
for (const msg of transformedMessages) {
if (msg.role === "user") {
if (typeof msg.content === "string") {
@ -444,12 +459,19 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
}
} else if (block.type === "text") {
const textBlock = block as TextContent;
// OpenAI requires id to be max 64 characters
let msgId = textBlock.textSignature;
if (!msgId) {
msgId = "msg_" + msgIndex;
} else if (msgId.length > 64) {
msgId = "msg_" + shortHash(msgId);
}
output.push({
type: "message",
role: "assistant",
content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }],
status: "completed",
id: textBlock.textSignature || "msg_" + Math.random().toString(36).substring(2, 15),
id: msgId,
} satisfies ResponseOutputMessage);
// Do not submit toolcall blocks if the completion had an error (i.e. abort)
} else if (block.type === "toolCall" && msg.stopReason !== "error") {
@ -508,6 +530,7 @@ function convertMessages(model: Model<"openai-responses">, context: Context): Re
});
}
}
msgIndex++;
}
return messages;