fix(ai): Use API type instead of model for message compatibility checks

- Add getApi() method to all providers to identify the API type
- Add api field to AssistantMessage to track which API generated it
- Update transformMessages to check API compatibility instead of model
- Fixes issue where OpenAI Responses API failed when switching models
- Preserves thinking blocks and signatures when staying within same API
This commit is contained in:
Mario Zechner 2025-09-02 00:20:06 +02:00
parent 3007b7a5ac
commit 2cfd8ff3c3
6 changed files with 46 additions and 11 deletions

View file

@ -51,10 +51,15 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
return this.modelInfo;
}
getApi(): string {
return "openai-responses";
}
async generate(request: Context, options?: OpenAIResponsesLLMOptions): Promise<AssistantMessage> {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: this.getApi(),
provider: this.modelInfo.provider,
model: this.modelInfo.id,
usage: {
@ -287,7 +292,7 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
const input: ResponseInput = [];
// Transform messages for cross-provider compatibility
const transformedMessages = transformMessages(messages, this.modelInfo);
const transformedMessages = transformMessages(messages, this.modelInfo, this.getApi());
// Add system prompt if provided
if (systemPrompt) {
@ -324,9 +329,12 @@ export class OpenAIResponsesLLM implements LLM<OpenAIResponsesLLMOptions> {
} satisfies ResponseInputImage;
}
});
const filteredContent = !this.modelInfo?.input.includes("image")
? content.filter((c) => c.type !== "input_image")
: content;
input.push({
role: "user",
content,
content: filteredContent,
});
}
} else if (msg.role === "assistant") {