feat(ai): add Google Cloud Code Assist provider

- Add new API type 'google-cloud-code-assist' for Gemini CLI / Antigravity auth
- Extract shared Google utilities to google-shared.ts
- Implement streaming provider for Cloud Code Assist endpoint
- Add 7 models: gemini-3-pro-high/low, gemini-3-flash, claude-sonnet/opus, gpt-oss

Models use OAuth authentication and have sh cost (uses Google account quota).
OAuth flow will be implemented in coding-agent in a follow-up.
This commit is contained in:
Mario Zechner 2025-12-20 10:20:30 +01:00
parent 04dcdebbc6
commit 36e17933d5
10 changed files with 1208 additions and 178 deletions

View file

@ -0,0 +1,429 @@
/**
* Google Cloud Code Assist provider for Gemini CLI / Antigravity authentication.
* Uses the Cloud Code Assist API endpoint to access Gemini and Claude models.
*/
import type { Content, ThinkingConfig } from "@google/genai";
import { calculateCost } from "../models.js";
import type {
Api,
AssistantMessage,
Context,
Model,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { convertMessages, convertTools, mapStopReasonString, mapToolChoice } from "./google-shared.js";
export interface GoogleCloudCodeAssistOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
thinking?: {
enabled: boolean;
budgetTokens?: number;
};
projectId?: string;
}
const ENDPOINT = "https://cloudcode-pa.googleapis.com";
const HEADERS = {
"User-Agent": "google-api-nodejs-client/9.15.1",
"X-Goog-Api-Client": "gl-node/22.17.0",
"Client-Metadata": "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI",
};
// Counter for generating unique tool call IDs
let toolCallCounter = 0;
interface CloudCodeAssistRequest {
project: string;
model: string;
request: {
contents: Content[];
systemInstruction?: { parts: { text: string }[] };
generationConfig?: {
maxOutputTokens?: number;
temperature?: number;
thinkingConfig?: ThinkingConfig;
};
tools?: ReturnType<typeof convertTools>;
toolConfig?: {
functionCallingConfig: {
mode: ReturnType<typeof mapToolChoice>;
};
};
};
userAgent?: string;
requestId?: string;
}
interface CloudCodeAssistResponseChunk {
response?: {
candidates?: Array<{
content?: {
role: string;
parts?: Array<{
text?: string;
thought?: boolean;
thoughtSignature?: string;
functionCall?: {
name: string;
args: Record<string, unknown>;
id?: string;
};
}>;
};
finishReason?: string;
}>;
usageMetadata?: {
promptTokenCount?: number;
candidatesTokenCount?: number;
thoughtsTokenCount?: number;
totalTokenCount?: number;
cachedContentTokenCount?: number;
};
modelVersion?: string;
responseId?: string;
};
traceId?: string;
}
export const streamGoogleCloudCodeAssist: StreamFunction<"google-cloud-code-assist"> = (
model: Model<"google-cloud-code-assist">,
context: Context,
options?: GoogleCloudCodeAssistOptions,
): AssistantMessageEventStream => {
const stream = new AssistantMessageEventStream();
(async () => {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: "google-cloud-code-assist" as Api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
try {
const apiKey = options?.apiKey;
if (!apiKey) {
throw new Error("Google Cloud Code Assist requires an OAuth access token");
}
const projectId = options?.projectId;
if (!projectId) {
throw new Error("Google Cloud Code Assist requires a project ID");
}
const requestBody = buildRequest(model, context, projectId, options);
const url = `${ENDPOINT}/v1internal:streamGenerateContent?alt=sse`;
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${apiKey}`,
"Content-Type": "application/json",
Accept: "text/event-stream",
...HEADERS,
},
body: JSON.stringify(requestBody),
signal: options?.signal,
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Cloud Code Assist API error (${response.status}): ${errorText}`);
}
if (!response.body) {
throw new Error("No response body");
}
stream.push({ type: "start", partial: output });
let currentBlock: TextContent | ThinkingContent | null = null;
const blocks = output.content;
const blockIndex = () => blocks.length - 1;
// Read SSE stream
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (!line.startsWith("data:")) continue;
const jsonStr = line.slice(5).trim();
if (!jsonStr) continue;
let chunk: CloudCodeAssistResponseChunk;
try {
chunk = JSON.parse(jsonStr);
} catch {
continue;
}
// Unwrap the response
const responseData = chunk.response;
if (!responseData) continue;
const candidate = responseData.candidates?.[0];
if (candidate?.content?.parts) {
for (const part of candidate.content.parts) {
if (part.text !== undefined) {
const isThinking = part.thought === true;
if (
!currentBlock ||
(isThinking && currentBlock.type !== "thinking") ||
(!isThinking && currentBlock.type !== "text")
) {
if (currentBlock) {
if (currentBlock.type === "text") {
stream.push({
type: "text_end",
contentIndex: blocks.length - 1,
content: currentBlock.text,
partial: output,
});
} else {
stream.push({
type: "thinking_end",
contentIndex: blockIndex(),
content: currentBlock.thinking,
partial: output,
});
}
}
if (isThinking) {
currentBlock = { type: "thinking", thinking: "", thinkingSignature: undefined };
output.content.push(currentBlock);
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
} else {
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
}
}
if (currentBlock.type === "thinking") {
currentBlock.thinking += part.text;
currentBlock.thinkingSignature = part.thoughtSignature;
stream.push({
type: "thinking_delta",
contentIndex: blockIndex(),
delta: part.text,
partial: output,
});
} else {
currentBlock.text += part.text;
stream.push({
type: "text_delta",
contentIndex: blockIndex(),
delta: part.text,
partial: output,
});
}
}
if (part.functionCall) {
if (currentBlock) {
if (currentBlock.type === "text") {
stream.push({
type: "text_end",
contentIndex: blockIndex(),
content: currentBlock.text,
partial: output,
});
} else {
stream.push({
type: "thinking_end",
contentIndex: blockIndex(),
content: currentBlock.thinking,
partial: output,
});
}
currentBlock = null;
}
const providedId = part.functionCall.id;
const needsNewId =
!providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
const toolCallId = needsNewId
? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`
: providedId;
const toolCall: ToolCall = {
type: "toolCall",
id: toolCallId,
name: part.functionCall.name || "",
arguments: part.functionCall.args as Record<string, unknown>,
...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }),
};
output.content.push(toolCall);
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
stream.push({
type: "toolcall_delta",
contentIndex: blockIndex(),
delta: JSON.stringify(toolCall.arguments),
partial: output,
});
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
}
}
}
if (candidate?.finishReason) {
output.stopReason = mapStopReasonString(candidate.finishReason);
if (output.content.some((b) => b.type === "toolCall")) {
output.stopReason = "toolUse";
}
}
if (responseData.usageMetadata) {
output.usage = {
input: responseData.usageMetadata.promptTokenCount || 0,
output:
(responseData.usageMetadata.candidatesTokenCount || 0) +
(responseData.usageMetadata.thoughtsTokenCount || 0),
cacheRead: responseData.usageMetadata.cachedContentTokenCount || 0,
cacheWrite: 0,
totalTokens: responseData.usageMetadata.totalTokenCount || 0,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
};
calculateCost(model, output.usage);
}
}
}
if (currentBlock) {
if (currentBlock.type === "text") {
stream.push({
type: "text_end",
contentIndex: blockIndex(),
content: currentBlock.text,
partial: output,
});
} else {
stream.push({
type: "thinking_end",
contentIndex: blockIndex(),
content: currentBlock.thinking,
partial: output,
});
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
if (output.stopReason === "aborted" || output.stopReason === "error") {
throw new Error("An unknown error occurred");
}
stream.push({ type: "done", reason: output.stopReason, message: output });
stream.end();
} catch (error) {
for (const block of output.content) {
if ("index" in block) {
delete (block as { index?: number }).index;
}
}
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
};
function buildRequest(
model: Model<"google-cloud-code-assist">,
context: Context,
projectId: string,
options: GoogleCloudCodeAssistOptions = {},
): CloudCodeAssistRequest {
const contents = convertMessages(model, context);
const generationConfig: CloudCodeAssistRequest["request"]["generationConfig"] = {};
if (options.temperature !== undefined) {
generationConfig.temperature = options.temperature;
}
if (options.maxTokens !== undefined) {
generationConfig.maxOutputTokens = options.maxTokens;
}
// Thinking config
if (options.thinking?.enabled && model.reasoning) {
generationConfig.thinkingConfig = {
includeThoughts: true,
};
if (options.thinking.budgetTokens !== undefined) {
generationConfig.thinkingConfig.thinkingBudget = options.thinking.budgetTokens;
}
}
const request: CloudCodeAssistRequest["request"] = {
contents,
};
// System instruction must be object with parts, not plain string
if (context.systemPrompt) {
request.systemInstruction = {
parts: [{ text: sanitizeSurrogates(context.systemPrompt) }],
};
}
if (Object.keys(generationConfig).length > 0) {
request.generationConfig = generationConfig;
}
if (context.tools && context.tools.length > 0) {
request.tools = convertTools(context.tools);
if (options.toolChoice) {
request.toolConfig = {
functionCallingConfig: {
mode: mapToolChoice(options.toolChoice),
},
};
}
}
return {
project: projectId,
model: model.id,
request,
userAgent: "pi-coding-agent",
requestId: `pi-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`,
};
}

View file

@ -0,0 +1,203 @@
/**
* Shared utilities for Google Generative AI and Google Cloud Code Assist providers.
*/
import { type Content, FinishReason, FunctionCallingConfigMode, type Part, type Schema } from "@google/genai";
import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transorm-messages.js";
type GoogleApiType = "google-generative-ai" | "google-cloud-code-assist";
/**
* Convert internal messages to Gemini Content[] format.
*/
export function convertMessages<T extends GoogleApiType>(model: Model<T>, context: Context): Content[] {
const contents: Content[] = [];
const transformedMessages = transformMessages(context.messages, model);
for (const msg of transformedMessages) {
if (msg.role === "user") {
if (typeof msg.content === "string") {
contents.push({
role: "user",
parts: [{ text: sanitizeSurrogates(msg.content) }],
});
} else {
const parts: Part[] = msg.content.map((item) => {
if (item.type === "text") {
return { text: sanitizeSurrogates(item.text) };
} else {
return {
inlineData: {
mimeType: item.mimeType,
data: item.data,
},
};
}
});
const filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
if (filteredParts.length === 0) continue;
contents.push({
role: "user",
parts: filteredParts,
});
}
} else if (msg.role === "assistant") {
const parts: Part[] = [];
for (const block of msg.content) {
if (block.type === "text") {
parts.push({ text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking") {
const thinkingPart: Part = {
thought: true,
thoughtSignature: block.thinkingSignature,
text: sanitizeSurrogates(block.thinking),
};
parts.push(thinkingPart);
} else if (block.type === "toolCall") {
const part: Part = {
functionCall: {
id: block.id,
name: block.name,
args: block.arguments,
},
};
if (block.thoughtSignature) {
part.thoughtSignature = block.thoughtSignature;
}
parts.push(part);
}
}
if (parts.length === 0) continue;
contents.push({
role: "model",
parts,
});
} else if (msg.role === "toolResult") {
// Build parts array with functionResponse and/or images
const parts: Part[] = [];
// Extract text and image content
const textContent = msg.content.filter((c): c is TextContent => c.type === "text");
const textResult = textContent.map((c) => c.text).join("\n");
const imageContent = model.input.includes("image")
? msg.content.filter((c): c is ImageContent => c.type === "image")
: [];
// Always add functionResponse with text result (or placeholder if only images)
const hasText = textResult.length > 0;
const hasImages = imageContent.length > 0;
// Use "output" key for success, "error" key for errors as per SDK documentation
const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "";
parts.push({
functionResponse: {
id: msg.toolCallId,
name: msg.toolName,
response: msg.isError ? { error: responseValue } : { output: responseValue },
},
});
// Add any images as inlineData parts
for (const imageBlock of imageContent) {
parts.push({
inlineData: {
mimeType: imageBlock.mimeType,
data: imageBlock.data,
},
});
}
contents.push({
role: "user",
parts,
});
}
}
return contents;
}
/**
* Convert tools to Gemini function declarations format.
*/
export function convertTools(
tools: Tool[],
): { functionDeclarations: { name: string; description?: string; parameters: Schema }[] }[] | undefined {
if (tools.length === 0) return undefined;
return [
{
functionDeclarations: tools.map((tool) => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters as Schema,
})),
},
];
}
/**
* Map tool choice string to Gemini FunctionCallingConfigMode.
*/
export function mapToolChoice(choice: string): FunctionCallingConfigMode {
switch (choice) {
case "auto":
return FunctionCallingConfigMode.AUTO;
case "none":
return FunctionCallingConfigMode.NONE;
case "any":
return FunctionCallingConfigMode.ANY;
default:
return FunctionCallingConfigMode.AUTO;
}
}
/**
* Map Gemini FinishReason to our StopReason.
*/
export function mapStopReason(reason: FinishReason): StopReason {
switch (reason) {
case FinishReason.STOP:
return "stop";
case FinishReason.MAX_TOKENS:
return "length";
case FinishReason.BLOCKLIST:
case FinishReason.PROHIBITED_CONTENT:
case FinishReason.SPII:
case FinishReason.SAFETY:
case FinishReason.IMAGE_SAFETY:
case FinishReason.IMAGE_PROHIBITED_CONTENT:
case FinishReason.IMAGE_RECITATION:
case FinishReason.IMAGE_OTHER:
case FinishReason.RECITATION:
case FinishReason.FINISH_REASON_UNSPECIFIED:
case FinishReason.OTHER:
case FinishReason.LANGUAGE:
case FinishReason.MALFORMED_FUNCTION_CALL:
case FinishReason.UNEXPECTED_TOOL_CALL:
case FinishReason.NO_IMAGE:
return "error";
default: {
const _exhaustive: never = reason;
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
}
}
}
/**
* Map string finish reason to our StopReason (for raw API responses).
*/
export function mapStopReasonString(reason: string): StopReason {
switch (reason) {
case "STOP":
return "stop";
case "MAX_TOKENS":
return "length";
default:
return "error";
}
}

View file

@ -1,12 +1,7 @@
import {
type Content,
FinishReason,
FunctionCallingConfigMode,
type GenerateContentConfig,
type GenerateContentParameters,
GoogleGenAI,
type Part,
type Schema,
type ThinkingConfig,
type ThinkingLevel,
} from "@google/genai";
@ -15,20 +10,16 @@ import type {
Api,
AssistantMessage,
Context,
ImageContent,
Model,
StopReason,
StreamFunction,
StreamOptions,
TextContent,
ThinkingContent,
Tool,
ToolCall,
} from "../types.js";
import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transorm-messages.js";
import { convertMessages, convertTools, mapStopReason, mapToolChoice } from "./google-shared.js";
export interface GoogleOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any";
@ -337,169 +328,3 @@ function buildParams(
return params;
}
function convertMessages(model: Model<"google-generative-ai">, context: Context): Content[] {
const contents: Content[] = [];
const transformedMessages = transformMessages(context.messages, model);
for (const msg of transformedMessages) {
if (msg.role === "user") {
if (typeof msg.content === "string") {
contents.push({
role: "user",
parts: [{ text: sanitizeSurrogates(msg.content) }],
});
} else {
const parts: Part[] = msg.content.map((item) => {
if (item.type === "text") {
return { text: sanitizeSurrogates(item.text) };
} else {
return {
inlineData: {
mimeType: item.mimeType,
data: item.data,
},
};
}
});
const filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
if (filteredParts.length === 0) continue;
contents.push({
role: "user",
parts: filteredParts,
});
}
} else if (msg.role === "assistant") {
const parts: Part[] = [];
for (const block of msg.content) {
if (block.type === "text") {
parts.push({ text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking") {
const thinkingPart: Part = {
thought: true,
thoughtSignature: block.thinkingSignature,
text: sanitizeSurrogates(block.thinking),
};
parts.push(thinkingPart);
} else if (block.type === "toolCall") {
const part: Part = {
functionCall: {
id: block.id,
name: block.name,
args: block.arguments,
},
};
if (block.thoughtSignature) {
part.thoughtSignature = block.thoughtSignature;
}
parts.push(part);
}
}
if (parts.length === 0) continue;
contents.push({
role: "model",
parts,
});
} else if (msg.role === "toolResult") {
// Build parts array with functionResponse and/or images
const parts: Part[] = [];
// Extract text and image content
const textContent = msg.content.filter((c): c is TextContent => c.type === "text");
const textResult = textContent.map((c) => c.text).join("\n");
const imageContent = model.input.includes("image")
? msg.content.filter((c): c is ImageContent => c.type === "image")
: [];
// Always add functionResponse with text result (or placeholder if only images)
const hasText = textResult.length > 0;
const hasImages = imageContent.length > 0;
// Use "output" key for success, "error" key for errors as per SDK documentation
const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "";
parts.push({
functionResponse: {
id: msg.toolCallId,
name: msg.toolName,
response: msg.isError ? { error: responseValue } : { output: responseValue },
},
});
// Add any images as inlineData parts
for (const imageBlock of imageContent) {
parts.push({
inlineData: {
mimeType: imageBlock.mimeType,
data: imageBlock.data,
},
});
}
contents.push({
role: "user",
parts,
});
}
}
return contents;
}
function convertTools(
tools: Tool[],
): { functionDeclarations: { name: string; description?: string; parameters: Schema }[] }[] | undefined {
if (tools.length === 0) return undefined;
return [
{
functionDeclarations: tools.map((tool) => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters as Schema, // TypeBox generates JSON Schema compatible with SDK Schema type
})),
},
];
}
function mapToolChoice(choice: string): FunctionCallingConfigMode {
switch (choice) {
case "auto":
return FunctionCallingConfigMode.AUTO;
case "none":
return FunctionCallingConfigMode.NONE;
case "any":
return FunctionCallingConfigMode.ANY;
default:
return FunctionCallingConfigMode.AUTO;
}
}
function mapStopReason(reason: FinishReason): StopReason {
switch (reason) {
case FinishReason.STOP:
return "stop";
case FinishReason.MAX_TOKENS:
return "length";
case FinishReason.BLOCKLIST:
case FinishReason.PROHIBITED_CONTENT:
case FinishReason.SPII:
case FinishReason.SAFETY:
case FinishReason.IMAGE_SAFETY:
case FinishReason.IMAGE_PROHIBITED_CONTENT:
case FinishReason.IMAGE_RECITATION:
case FinishReason.IMAGE_OTHER:
case FinishReason.RECITATION:
case FinishReason.FINISH_REASON_UNSPECIFIED:
case FinishReason.OTHER:
case FinishReason.LANGUAGE:
case FinishReason.MALFORMED_FUNCTION_CALL:
case FinishReason.UNEXPECTED_TOOL_CALL:
case FinishReason.NO_IMAGE:
return "error";
default: {
const _exhaustive: never = reason;
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
}
}
}