mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-18 20:03:33 +00:00
feat(ai): add Google Cloud Code Assist provider
- Add new API type 'google-cloud-code-assist' for Gemini CLI / Antigravity auth - Extract shared Google utilities to google-shared.ts - Implement streaming provider for Cloud Code Assist endpoint - Add 7 models: gemini-3-pro-high/low, gemini-3-flash, claude-sonnet/opus, gpt-oss Models use OAuth authentication and have sh cost (uses Google account quota). OAuth flow will be implemented in coding-agent in a follow-up.
This commit is contained in:
parent
04dcdebbc6
commit
36e17933d5
10 changed files with 1208 additions and 178 deletions
203
packages/ai/src/providers/google-shared.ts
Normal file
203
packages/ai/src/providers/google-shared.ts
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
/**
|
||||
* Shared utilities for Google Generative AI and Google Cloud Code Assist providers.
|
||||
*/
|
||||
|
||||
import { type Content, FinishReason, FunctionCallingConfigMode, type Part, type Schema } from "@google/genai";
|
||||
import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types.js";
|
||||
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
|
||||
import { transformMessages } from "./transorm-messages.js";
|
||||
|
||||
type GoogleApiType = "google-generative-ai" | "google-cloud-code-assist";
|
||||
|
||||
/**
|
||||
* Convert internal messages to Gemini Content[] format.
|
||||
*/
|
||||
export function convertMessages<T extends GoogleApiType>(model: Model<T>, context: Context): Content[] {
|
||||
const contents: Content[] = [];
|
||||
const transformedMessages = transformMessages(context.messages, model);
|
||||
|
||||
for (const msg of transformedMessages) {
|
||||
if (msg.role === "user") {
|
||||
if (typeof msg.content === "string") {
|
||||
contents.push({
|
||||
role: "user",
|
||||
parts: [{ text: sanitizeSurrogates(msg.content) }],
|
||||
});
|
||||
} else {
|
||||
const parts: Part[] = msg.content.map((item) => {
|
||||
if (item.type === "text") {
|
||||
return { text: sanitizeSurrogates(item.text) };
|
||||
} else {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: item.mimeType,
|
||||
data: item.data,
|
||||
},
|
||||
};
|
||||
}
|
||||
});
|
||||
const filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
|
||||
if (filteredParts.length === 0) continue;
|
||||
contents.push({
|
||||
role: "user",
|
||||
parts: filteredParts,
|
||||
});
|
||||
}
|
||||
} else if (msg.role === "assistant") {
|
||||
const parts: Part[] = [];
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === "text") {
|
||||
parts.push({ text: sanitizeSurrogates(block.text) });
|
||||
} else if (block.type === "thinking") {
|
||||
const thinkingPart: Part = {
|
||||
thought: true,
|
||||
thoughtSignature: block.thinkingSignature,
|
||||
text: sanitizeSurrogates(block.thinking),
|
||||
};
|
||||
parts.push(thinkingPart);
|
||||
} else if (block.type === "toolCall") {
|
||||
const part: Part = {
|
||||
functionCall: {
|
||||
id: block.id,
|
||||
name: block.name,
|
||||
args: block.arguments,
|
||||
},
|
||||
};
|
||||
if (block.thoughtSignature) {
|
||||
part.thoughtSignature = block.thoughtSignature;
|
||||
}
|
||||
parts.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
if (parts.length === 0) continue;
|
||||
contents.push({
|
||||
role: "model",
|
||||
parts,
|
||||
});
|
||||
} else if (msg.role === "toolResult") {
|
||||
// Build parts array with functionResponse and/or images
|
||||
const parts: Part[] = [];
|
||||
|
||||
// Extract text and image content
|
||||
const textContent = msg.content.filter((c): c is TextContent => c.type === "text");
|
||||
const textResult = textContent.map((c) => c.text).join("\n");
|
||||
const imageContent = model.input.includes("image")
|
||||
? msg.content.filter((c): c is ImageContent => c.type === "image")
|
||||
: [];
|
||||
|
||||
// Always add functionResponse with text result (or placeholder if only images)
|
||||
const hasText = textResult.length > 0;
|
||||
const hasImages = imageContent.length > 0;
|
||||
|
||||
// Use "output" key for success, "error" key for errors as per SDK documentation
|
||||
const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "";
|
||||
|
||||
parts.push({
|
||||
functionResponse: {
|
||||
id: msg.toolCallId,
|
||||
name: msg.toolName,
|
||||
response: msg.isError ? { error: responseValue } : { output: responseValue },
|
||||
},
|
||||
});
|
||||
|
||||
// Add any images as inlineData parts
|
||||
for (const imageBlock of imageContent) {
|
||||
parts.push({
|
||||
inlineData: {
|
||||
mimeType: imageBlock.mimeType,
|
||||
data: imageBlock.data,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
contents.push({
|
||||
role: "user",
|
||||
parts,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return contents;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert tools to Gemini function declarations format.
|
||||
*/
|
||||
export function convertTools(
|
||||
tools: Tool[],
|
||||
): { functionDeclarations: { name: string; description?: string; parameters: Schema }[] }[] | undefined {
|
||||
if (tools.length === 0) return undefined;
|
||||
return [
|
||||
{
|
||||
functionDeclarations: tools.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters as Schema,
|
||||
})),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Map tool choice string to Gemini FunctionCallingConfigMode.
|
||||
*/
|
||||
export function mapToolChoice(choice: string): FunctionCallingConfigMode {
|
||||
switch (choice) {
|
||||
case "auto":
|
||||
return FunctionCallingConfigMode.AUTO;
|
||||
case "none":
|
||||
return FunctionCallingConfigMode.NONE;
|
||||
case "any":
|
||||
return FunctionCallingConfigMode.ANY;
|
||||
default:
|
||||
return FunctionCallingConfigMode.AUTO;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map Gemini FinishReason to our StopReason.
|
||||
*/
|
||||
export function mapStopReason(reason: FinishReason): StopReason {
|
||||
switch (reason) {
|
||||
case FinishReason.STOP:
|
||||
return "stop";
|
||||
case FinishReason.MAX_TOKENS:
|
||||
return "length";
|
||||
case FinishReason.BLOCKLIST:
|
||||
case FinishReason.PROHIBITED_CONTENT:
|
||||
case FinishReason.SPII:
|
||||
case FinishReason.SAFETY:
|
||||
case FinishReason.IMAGE_SAFETY:
|
||||
case FinishReason.IMAGE_PROHIBITED_CONTENT:
|
||||
case FinishReason.IMAGE_RECITATION:
|
||||
case FinishReason.IMAGE_OTHER:
|
||||
case FinishReason.RECITATION:
|
||||
case FinishReason.FINISH_REASON_UNSPECIFIED:
|
||||
case FinishReason.OTHER:
|
||||
case FinishReason.LANGUAGE:
|
||||
case FinishReason.MALFORMED_FUNCTION_CALL:
|
||||
case FinishReason.UNEXPECTED_TOOL_CALL:
|
||||
case FinishReason.NO_IMAGE:
|
||||
return "error";
|
||||
default: {
|
||||
const _exhaustive: never = reason;
|
||||
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map string finish reason to our StopReason (for raw API responses).
|
||||
*/
|
||||
export function mapStopReasonString(reason: string): StopReason {
|
||||
switch (reason) {
|
||||
case "STOP":
|
||||
return "stop";
|
||||
case "MAX_TOKENS":
|
||||
return "length";
|
||||
default:
|
||||
return "error";
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue