fix for gatewayz provider

This commit is contained in:
xes garcia 2026-01-09 10:58:05 +01:00
parent 5eb53cdb9e
commit 732d46123b
3 changed files with 13 additions and 2 deletions

View file

@ -2,6 +2,9 @@
## [Unreleased]
### Fixed
- Fixed 422 error with non-standard providers (gatewayz.ai, chutes.ai, etc.) by adding `supportsStreamOptions` compatibility flag and conditionally sending `stream_options` parameter.
## [0.42.0] - 2026-01-09
### Added

View file

@ -367,9 +367,12 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio
model: model.id,
messages,
stream: true,
stream_options: { include_usage: true },
};
if (compat.supportsStreamOptions) {
(params as any).stream_options = { include_usage: true };
}
if (compat.supportsStore) {
params.store = false;
}
@ -641,7 +644,8 @@ function detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {
baseUrl.includes("cerebras.ai") ||
baseUrl.includes("api.x.ai") ||
baseUrl.includes("mistral.ai") ||
baseUrl.includes("chutes.ai");
baseUrl.includes("chutes.ai") ||
baseUrl.includes("gatewayz.ai");
const useMaxTokens = baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
@ -653,6 +657,7 @@ function detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {
supportsStore: !isNonStandard,
supportsDeveloperRole: !isNonStandard,
supportsReasoningEffort: !isGrok,
supportsStreamOptions: !isNonStandard,
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
requiresToolResultName: isMistral,
requiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024
@ -673,6 +678,7 @@ function getCompat(model: Model<"openai-completions">): Required<OpenAICompat> {
supportsStore: model.compat.supportsStore ?? detected.supportsStore,
supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
supportsStreamOptions: model.compat.supportsStreamOptions ?? detected.supportsStreamOptions,
maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,
requiresAssistantAfterToolResult:

View file

@ -207,6 +207,8 @@ export interface OpenAICompat {
supportsDeveloperRole?: boolean;
/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */
supportsReasoningEffort?: boolean;
/** Whether the provider supports `stream_options`. Default: auto-detected from URL. */
supportsStreamOptions?: boolean;
/** Which field to use for max tokens. Default: auto-detected from URL. */
maxTokensField?: "max_completion_tokens" | "max_tokens";
/** Whether tool results require the `name` field. Default: auto-detected from URL. */