fix(ai): Use API type instead of model for message compatibility checks

- Add getApi() method to all providers to identify the API type
- Add api field to AssistantMessage to track which API generated it
- Update transformMessages to check API compatibility instead of model
- Fixes issue where OpenAI Responses API failed when switching models
- Preserves thinking blocks and signatures when staying within same API
This commit is contained in:
Mario Zechner 2025-09-02 00:20:06 +02:00
parent 3007b7a5ac
commit 2cfd8ff3c3
6 changed files with 46 additions and 11 deletions

View file

@ -6,6 +6,7 @@ import {
type GenerateContentParameters,
GoogleGenAI,
type Part,
setDefaultBaseUrls,
} from "@google/genai";
import { calculateCost } from "../models.js";
import type {
@ -52,10 +53,15 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
return this.modelInfo;
}
getApi(): string {
return "google-generative-ai";
}
async generate(context: Context, options?: GoogleLLMOptions): Promise<AssistantMessage> {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: this.getApi(),
provider: this.modelInfo.provider,
model: this.modelInfo.id,
usage: {
@ -247,7 +253,7 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
const contents: Content[] = [];
// Transform messages for cross-provider compatibility
const transformedMessages = transformMessages(messages, this.modelInfo);
const transformedMessages = transformMessages(messages, this.modelInfo, this.getApi());
for (const msg of transformedMessages) {
if (msg.role === "user") {
@ -272,9 +278,12 @@ export class GoogleLLM implements LLM<GoogleLLMOptions> {
};
}
});
const filteredParts = !this.modelInfo?.input.includes("image")
? parts.filter((p) => p.text !== undefined)
: parts;
contents.push({
role: "user",
parts,
parts: filteredParts,
});
}
} else if (msg.role === "assistant") {