From 085c378d34679daf36b1165bc26686ed3f9185c3 Mon Sep 17 00:00:00 2001 From: Markus Ylisiurunen Date: Wed, 21 Jan 2026 22:28:34 +0200 Subject: [PATCH] add Azure deployment name map and refresh generated models --- packages/ai/README.md | 4 +- .../src/providers/azure-openai-responses.ts | 23 ++++++++- packages/ai/test/abort.test.ts | 4 +- packages/ai/test/azure-utils.ts | 19 ++++++++ packages/ai/test/empty.test.ts | 4 +- packages/ai/test/image-tool-result.test.ts | 4 +- packages/ai/test/stream.test.ts | 4 +- packages/ai/test/tokens.test.ts | 4 +- .../ai/test/tool-call-without-result.test.ts | 4 +- packages/ai/test/total-tokens.test.ts | 4 +- packages/ai/test/unicode-surrogate.test.ts | 4 +- packages/coding-agent/README.md | 2 +- packages/coding-agent/src/cli/args.ts | 48 +++++++++---------- 13 files changed, 84 insertions(+), 44 deletions(-) diff --git a/packages/ai/README.md b/packages/ai/README.md index e936d55f..1329dca6 100644 --- a/packages/ai/README.md +++ b/packages/ai/README.md @@ -875,7 +875,7 @@ In Node.js environments, you can set environment variables to avoid passing API | Provider | Environment Variable(s) | |----------|------------------------| | OpenAI | `OPENAI_API_KEY` | -| Azure OpenAI | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME` (optional `AZURE_OPENAI_API_VERSION`, `AZURE_OPENAI_DEPLOYMENT_NAME`) | +| Azure OpenAI | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME` (optional `AZURE_OPENAI_API_VERSION`, `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` like `model=deployment,model2=deployment2`) | | Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_OAUTH_TOKEN` | | Google | `GEMINI_API_KEY` | | Vertex AI | `GOOGLE_CLOUD_PROJECT` (or `GCLOUD_PROJECT`) + `GOOGLE_CLOUD_LOCATION` + ADC | @@ -1048,7 +1048,7 @@ const response = await complete(model, { **OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options. -**Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. Use `AZURE_OPENAI_API_VERSION` (defaults to `v1`) to override the API version if needed. Deployment names are treated as model IDs by default, override with `azureDeploymentName` or `AZURE_OPENAI_DEPLOYMENT_NAME`. Legacy deployment-based URLs are intentionally unsupported. +**Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. Use `AZURE_OPENAI_API_VERSION` (defaults to `v1`) to override the API version if needed. Deployment names are treated as model IDs by default, override with `azureDeploymentName` or `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` using comma-separated `model-id=deployment` pairs (for example `gpt-4o-mini=my-deployment,gpt-4o=prod`). Legacy deployment-based URLs are intentionally unsupported. **GitHub Copilot**: If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable". diff --git a/packages/ai/src/providers/azure-openai-responses.ts b/packages/ai/src/providers/azure-openai-responses.ts index cd566422..185e19e8 100644 --- a/packages/ai/src/providers/azure-openai-responses.ts +++ b/packages/ai/src/providers/azure-openai-responses.ts @@ -47,6 +47,27 @@ function shortHash(str: string): string { const DEFAULT_AZURE_API_VERSION = "v1"; +function parseDeploymentNameMap(value: string | undefined): Map { + const map = new Map(); + if (!value) return map; + for (const entry of value.split(",")) { + const trimmed = entry.trim(); + if (!trimmed) continue; + const [modelId, deploymentName] = trimmed.split("=", 2); + if (!modelId || !deploymentName) continue; + map.set(modelId.trim(), deploymentName.trim()); + } + return map; +} + +function resolveDeploymentName(model: Model<"azure-openai-responses">, options?: AzureOpenAIResponsesOptions): string { + if (options?.azureDeploymentName) { + return options.azureDeploymentName; + } + const mappedDeployment = parseDeploymentNameMap(process.env.AZURE_OPENAI_DEPLOYMENT_NAME_MAP).get(model.id); + return mappedDeployment || model.id; +} + // Azure OpenAI Responses-specific options export interface AzureOpenAIResponsesOptions extends StreamOptions { reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh"; @@ -69,7 +90,7 @@ export const streamAzureOpenAIResponses: StreamFunction<"azure-openai-responses" // Start async processing (async () => { - const deploymentName = options?.azureDeploymentName || process.env.AZURE_OPENAI_DEPLOYMENT_NAME || model.id; + const deploymentName = resolveDeploymentName(model, options); const output: AssistantMessage = { role: "assistant", diff --git a/packages/ai/test/abort.test.ts b/packages/ai/test/abort.test.ts index a0bf06b9..baa38cc5 100644 --- a/packages/ai/test/abort.test.ts +++ b/packages/ai/test/abort.test.ts @@ -2,7 +2,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { complete, stream } from "../src/stream.js"; import type { Api, Context, Model, OptionsForApi } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -142,7 +142,7 @@ describe("AI Providers Abort Tests", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider Abort", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should abort mid-stream", { retry: 3 }, async () => { diff --git a/packages/ai/test/azure-utils.ts b/packages/ai/test/azure-utils.ts index 86157fda..d83b198a 100644 --- a/packages/ai/test/azure-utils.ts +++ b/packages/ai/test/azure-utils.ts @@ -2,8 +2,27 @@ * Utility functions for Azure OpenAI tests */ +function parseDeploymentNameMap(value: string | undefined): Map { + const map = new Map(); + if (!value) return map; + for (const entry of value.split(",")) { + const trimmed = entry.trim(); + if (!trimmed) continue; + const [modelId, deploymentName] = trimmed.split("=", 2); + if (!modelId || !deploymentName) continue; + map.set(modelId.trim(), deploymentName.trim()); + } + return map; +} + export function hasAzureOpenAICredentials(): boolean { const hasKey = !!process.env.AZURE_OPENAI_API_KEY; const hasBaseUrl = !!(process.env.AZURE_OPENAI_BASE_URL || process.env.AZURE_OPENAI_RESOURCE_NAME); return hasKey && hasBaseUrl; } + +export function resolveAzureDeploymentName(modelId: string): string | undefined { + const mapValue = process.env.AZURE_OPENAI_DEPLOYMENT_NAME_MAP; + if (!mapValue) return undefined; + return parseDeploymentNameMap(mapValue).get(modelId); +} diff --git a/packages/ai/test/empty.test.ts b/packages/ai/test/empty.test.ts index fb7d0e98..3d624c44 100644 --- a/packages/ai/test/empty.test.ts +++ b/packages/ai/test/empty.test.ts @@ -2,7 +2,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { complete } from "../src/stream.js"; import type { Api, AssistantMessage, Context, Model, OptionsForApi, UserMessage } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -205,7 +205,7 @@ describe("AI Providers Empty Message Tests", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider Empty Messages", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should handle empty content array", { retry: 3, timeout: 30000 }, async () => { diff --git a/packages/ai/test/image-tool-result.test.ts b/packages/ai/test/image-tool-result.test.ts index 7fb8b93c..aedd86f1 100644 --- a/packages/ai/test/image-tool-result.test.ts +++ b/packages/ai/test/image-tool-result.test.ts @@ -5,7 +5,7 @@ import { describe, expect, it } from "vitest"; import type { Api, Context, Model, Tool, ToolResultMessage } from "../src/index.js"; import { complete, getModel } from "../src/index.js"; import type { OptionsForApi } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -246,7 +246,7 @@ describe("Tool Results with Images", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider (gpt-4o-mini)", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should handle tool result with only image", { retry: 3, timeout: 30000 }, async () => { diff --git a/packages/ai/test/stream.test.ts b/packages/ai/test/stream.test.ts index 2ad803c2..9e9b66a9 100644 --- a/packages/ai/test/stream.test.ts +++ b/packages/ai/test/stream.test.ts @@ -8,7 +8,7 @@ import { getModel } from "../src/models.js"; import { complete, stream } from "../src/stream.js"; import type { Api, Context, ImageContent, Model, OptionsForApi, Tool, ToolResultMessage } from "../src/types.js"; import { StringEnum } from "../src/utils/typebox-helpers.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -509,7 +509,7 @@ describe("Generate E2E Tests", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider (gpt-4o-mini)", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should complete basic text generation", { retry: 3 }, async () => { diff --git a/packages/ai/test/tokens.test.ts b/packages/ai/test/tokens.test.ts index dd5584a9..df00aa23 100644 --- a/packages/ai/test/tokens.test.ts +++ b/packages/ai/test/tokens.test.ts @@ -2,7 +2,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { stream } from "../src/stream.js"; import type { Api, Context, Model, OptionsForApi } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -111,7 +111,7 @@ describe("Token Statistics on Abort", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should include token stats when aborted mid-stream", { retry: 3, timeout: 30000 }, async () => { diff --git a/packages/ai/test/tool-call-without-result.test.ts b/packages/ai/test/tool-call-without-result.test.ts index 500a7588..df503638 100644 --- a/packages/ai/test/tool-call-without-result.test.ts +++ b/packages/ai/test/tool-call-without-result.test.ts @@ -3,7 +3,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { complete } from "../src/stream.js"; import type { Api, Context, Model, OptionsForApi, Tool } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -128,7 +128,7 @@ describe("Tool Call Without Result Tests", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider", () => { const model = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(model.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should filter out tool calls without corresponding tool results", { retry: 3, timeout: 30000 }, async () => { diff --git a/packages/ai/test/total-tokens.test.ts b/packages/ai/test/total-tokens.test.ts index 6559b450..99ac9994 100644 --- a/packages/ai/test/total-tokens.test.ts +++ b/packages/ai/test/total-tokens.test.ts @@ -16,7 +16,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { complete } from "../src/stream.js"; import type { Api, Context, Model, OptionsForApi, Usage } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -196,7 +196,7 @@ describe("totalTokens field", () => { { retry: 3, timeout: 60000 }, async () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; console.log(`\nAzure OpenAI Responses / ${llm.id}:`); diff --git a/packages/ai/test/unicode-surrogate.test.ts b/packages/ai/test/unicode-surrogate.test.ts index 0ae02fbe..fc858f73 100644 --- a/packages/ai/test/unicode-surrogate.test.ts +++ b/packages/ai/test/unicode-surrogate.test.ts @@ -3,7 +3,7 @@ import { describe, expect, it } from "vitest"; import { getModel } from "../src/models.js"; import { complete } from "../src/stream.js"; import type { Api, Context, Model, OptionsForApi, ToolResultMessage } from "../src/types.js"; -import { hasAzureOpenAICredentials } from "./azure-utils.js"; +import { hasAzureOpenAICredentials, resolveAzureDeploymentName } from "./azure-utils.js"; import { hasBedrockCredentials } from "./bedrock-utils.js"; import { resolveApiKey } from "./oauth.js"; @@ -332,7 +332,7 @@ describe("AI Providers Unicode Surrogate Pair Tests", () => { describe.skipIf(!hasAzureOpenAICredentials())("Azure OpenAI Responses Provider Unicode Handling", () => { const llm = getModel("azure-openai-responses", "gpt-4o-mini"); - const azureDeploymentName = process.env.AZURE_OPENAI_DEPLOYMENT_NAME; + const azureDeploymentName = resolveAzureDeploymentName(llm.id); const azureOptions = azureDeploymentName ? { azureDeploymentName } : {}; it("should handle emoji in tool results", { retry: 3, timeout: 30000 }, async () => { diff --git a/packages/coding-agent/README.md b/packages/coding-agent/README.md index ce332ed2..1d30e24c 100644 --- a/packages/coding-agent/README.md +++ b/packages/coding-agent/README.md @@ -210,7 +210,7 @@ Add API keys to `~/.pi/agent/auth.json`: | MiniMax | `minimax` | `MINIMAX_API_KEY` | | MiniMax (China) | `minimax-cn` | `MINIMAX_CN_API_KEY` | -Azure OpenAI also requires `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. Optional: `AZURE_OPENAI_API_VERSION` (defaults to `v1`) and `AZURE_OPENAI_DEPLOYMENT_NAME` to override the deployment name. +Azure OpenAI also requires `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. Optional: `AZURE_OPENAI_API_VERSION` (defaults to `v1`) and `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` using comma-separated `model=deployment` pairs for overrides. Auth file keys take priority over environment variables. diff --git a/packages/coding-agent/src/cli/args.ts b/packages/coding-agent/src/cli/args.ts index cfba07c1..f65fd2ab 100644 --- a/packages/coding-agent/src/cli/args.ts +++ b/packages/coding-agent/src/cli/args.ts @@ -262,30 +262,30 @@ ${chalk.bold("Examples:")} ${APP_NAME} --export session.jsonl output.html ${chalk.bold("Environment Variables:")} - ANTHROPIC_API_KEY - Anthropic Claude API key - ANTHROPIC_OAUTH_TOKEN - Anthropic OAuth token (alternative to API key) - OPENAI_API_KEY - OpenAI GPT API key - AZURE_OPENAI_API_KEY - Azure OpenAI API key - AZURE_OPENAI_BASE_URL - Azure OpenAI base URL (https://{resource}.openai.azure.com/openai/v1) - AZURE_OPENAI_RESOURCE_NAME - Azure OpenAI resource name (alternative to base URL) - AZURE_OPENAI_API_VERSION - Azure OpenAI API version (default: v1) - AZURE_OPENAI_DEPLOYMENT_NAME - Azure OpenAI deployment name override - GEMINI_API_KEY - Google Gemini API key - GROQ_API_KEY - Groq API key - CEREBRAS_API_KEY - Cerebras API key - XAI_API_KEY - xAI Grok API key - OPENROUTER_API_KEY - OpenRouter API key - AI_GATEWAY_API_KEY - Vercel AI Gateway API key - ZAI_API_KEY - ZAI API key - MISTRAL_API_KEY - Mistral API key - MINIMAX_API_KEY - MiniMax API key - AWS_PROFILE - AWS profile for Amazon Bedrock - AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock - AWS_SECRET_ACCESS_KEY - AWS secret key for Amazon Bedrock - AWS_BEARER_TOKEN_BEDROCK - Bedrock API key (bearer token) - AWS_REGION - AWS region for Amazon Bedrock (e.g., us-east-1) - ${ENV_AGENT_DIR.padEnd(27)} - Session storage directory (default: ~/${CONFIG_DIR_NAME}/agent) - PI_SHARE_VIEWER_URL - Base URL for /share command (default: https://buildwithpi.ai/session/) + ANTHROPIC_API_KEY - Anthropic Claude API key + ANTHROPIC_OAUTH_TOKEN - Anthropic OAuth token (alternative to API key) + OPENAI_API_KEY - OpenAI GPT API key + AZURE_OPENAI_API_KEY - Azure OpenAI API key + AZURE_OPENAI_BASE_URL - Azure OpenAI base URL (https://{resource}.openai.azure.com/openai/v1) + AZURE_OPENAI_RESOURCE_NAME - Azure OpenAI resource name (alternative to base URL) + AZURE_OPENAI_API_VERSION - Azure OpenAI API version (default: v1) + AZURE_OPENAI_DEPLOYMENT_NAME_MAP - Azure OpenAI model=deployment map (comma-separated) + GEMINI_API_KEY - Google Gemini API key + GROQ_API_KEY - Groq API key + CEREBRAS_API_KEY - Cerebras API key + XAI_API_KEY - xAI Grok API key + OPENROUTER_API_KEY - OpenRouter API key + AI_GATEWAY_API_KEY - Vercel AI Gateway API key + ZAI_API_KEY - ZAI API key + MISTRAL_API_KEY - Mistral API key + MINIMAX_API_KEY - MiniMax API key + AWS_PROFILE - AWS profile for Amazon Bedrock + AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock + AWS_SECRET_ACCESS_KEY - AWS secret key for Amazon Bedrock + AWS_BEARER_TOKEN_BEDROCK - Bedrock API key (bearer token) + AWS_REGION - AWS region for Amazon Bedrock (e.g., us-east-1) + ${ENV_AGENT_DIR.padEnd(32)} - Session storage directory (default: ~/${CONFIG_DIR_NAME}/agent) + PI_SHARE_VIEWER_URL - Base URL for /share command (default: https://buildwithpi.ai/session/) ${chalk.bold("Available Tools (default: read, bash, edit, write):")} read - Read file contents