mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-21 15:01:26 +00:00
feat(ai): add strictResponsesPairing for Azure OpenAI Responses API
Split OpenAICompat into OpenAICompletionsCompat and OpenAIResponsesCompat for type-safe API-specific compat settings. Added strictResponsesPairing option to suppress orphaned reasoning/tool calls on incomplete turns, fixing 400 errors on Azure's Responses API which requires strict pairing. Closes #768
This commit is contained in:
parent
def9e4e9a9
commit
d43930c818
17 changed files with 112 additions and 23 deletions
|
|
@ -110,8 +110,10 @@ describe("AI Providers Abort Tests", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider Abort", () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
void _compat;
|
||||
const llm: Model<"openai-completions"> = {
|
||||
...getModel("openai", "gpt-4o-mini")!,
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -466,7 +466,12 @@ describe("Cross-Provider Handoff Tests", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider Handoff", () => {
|
||||
const model: Model<"openai-completions"> = { ...getModel("openai", "gpt-4o-mini"), api: "openai-completions" };
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini");
|
||||
void _compat;
|
||||
const model: Model<"openai-completions"> = {
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
it("should handle contexts from all providers", async () => {
|
||||
console.log("\nTesting OpenAI Completions with pre-built contexts:\n");
|
||||
|
|
|
|||
|
|
@ -356,7 +356,12 @@ describe("Image Limits E2E Tests", () => {
|
|||
// Limits: 500 images, ~20MB per image (documented)
|
||||
// -------------------------------------------------------------------------
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI (gpt-4o-mini)", () => {
|
||||
const model: Model<"openai-completions"> = { ...getModel("openai", "gpt-4o-mini"), api: "openai-completions" };
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini");
|
||||
void _compat;
|
||||
const model: Model<"openai-completions"> = {
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
it("should accept a small number of images (5)", async () => {
|
||||
const result = await testImageCount(model, 5, smallImage);
|
||||
|
|
|
|||
|
|
@ -215,7 +215,12 @@ describe("Tool Results with Images", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider (gpt-4o-mini)", () => {
|
||||
const llm: Model<"openai-completions"> = { ...getModel("openai", "gpt-4o-mini"), api: "openai-completions" };
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini");
|
||||
void _compat;
|
||||
const llm: Model<"openai-completions"> = {
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => {
|
||||
await handleToolWithImageResult(llm);
|
||||
|
|
|
|||
|
|
@ -411,7 +411,12 @@ describe("Generate E2E Tests", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider (gpt-4o-mini)", () => {
|
||||
const llm: Model<"openai-completions"> = { ...getModel("openai", "gpt-4o-mini"), api: "openai-completions" };
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini");
|
||||
void _compat;
|
||||
const llm: Model<"openai-completions"> = {
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
it("should complete basic text generation", { retry: 3 }, async () => {
|
||||
await basicTextGeneration(llm);
|
||||
|
|
|
|||
|
|
@ -86,8 +86,10 @@ describe("Token Statistics on Abort", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider", () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
void _compat;
|
||||
const llm: Model<"openai-completions"> = {
|
||||
...getModel("openai", "gpt-4o-mini")!,
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -105,8 +105,10 @@ describe("Tool Call Without Result Tests", () => {
|
|||
});
|
||||
|
||||
describe.skipIf(!process.env.OPENAI_API_KEY)("OpenAI Completions Provider", () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
void _compat;
|
||||
const model: Model<"openai-completions"> = {
|
||||
...getModel("openai", "gpt-4o-mini")!,
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -155,8 +155,10 @@ describe("totalTokens field", () => {
|
|||
"gpt-4o-mini - should return totalTokens equal to sum of components",
|
||||
{ retry: 3, timeout: 60000 },
|
||||
async () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
||||
void _compat;
|
||||
const llm: Model<"openai-completions"> = {
|
||||
...getModel("openai", "gpt-4o-mini")!,
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -51,8 +51,10 @@ describe.skipIf(!process.env.OPENAI_API_KEY)("xhigh reasoning", () => {
|
|||
});
|
||||
|
||||
it("should error with openai-completions when using xhigh", async () => {
|
||||
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-5-mini");
|
||||
void _compat;
|
||||
const model: Model<"openai-completions"> = {
|
||||
...getModel("openai", "gpt-5-mini"),
|
||||
...baseModel,
|
||||
api: "openai-completions",
|
||||
};
|
||||
const s = stream(model, makeContext(), { reasoningEffort: "xhigh" });
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue