mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-21 01:01:42 +00:00
Add supportsUsageInStreaming compat flag for OpenAI-compatible providers
Renamed from supportsStreamOptions to clarify this controls stream_options: { include_usage: true }.
Defaults to true (no behavioral change for existing providers).
Providers like gatewayz.ai that reject this parameter can set supportsUsageInStreaming: false in model config.
Based on #596 by @XesGaDeus
This commit is contained in:
parent
732d46123b
commit
52ce113754
14 changed files with 8 additions and 9 deletions
|
|
@ -369,7 +369,7 @@ function buildParams(model: Model<"openai-completions">, context: Context, optio
|
|||
stream: true,
|
||||
};
|
||||
|
||||
if (compat.supportsStreamOptions) {
|
||||
if (compat.supportsUsageInStreaming !== false) {
|
||||
(params as any).stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
|
|
@ -644,8 +644,7 @@ function detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {
|
|||
baseUrl.includes("cerebras.ai") ||
|
||||
baseUrl.includes("api.x.ai") ||
|
||||
baseUrl.includes("mistral.ai") ||
|
||||
baseUrl.includes("chutes.ai") ||
|
||||
baseUrl.includes("gatewayz.ai");
|
||||
baseUrl.includes("chutes.ai");
|
||||
|
||||
const useMaxTokens = baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
|
||||
|
||||
|
|
@ -657,7 +656,7 @@ function detectCompatFromUrl(baseUrl: string): Required<OpenAICompat> {
|
|||
supportsStore: !isNonStandard,
|
||||
supportsDeveloperRole: !isNonStandard,
|
||||
supportsReasoningEffort: !isGrok,
|
||||
supportsStreamOptions: !isNonStandard,
|
||||
supportsUsageInStreaming: true,
|
||||
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
||||
requiresToolResultName: isMistral,
|
||||
requiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024
|
||||
|
|
@ -678,7 +677,7 @@ function getCompat(model: Model<"openai-completions">): Required<OpenAICompat> {
|
|||
supportsStore: model.compat.supportsStore ?? detected.supportsStore,
|
||||
supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
|
||||
supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
|
||||
supportsStreamOptions: model.compat.supportsStreamOptions ?? detected.supportsStreamOptions,
|
||||
supportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,
|
||||
maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
|
||||
requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,
|
||||
requiresAssistantAfterToolResult:
|
||||
|
|
|
|||
|
|
@ -207,8 +207,8 @@ export interface OpenAICompat {
|
|||
supportsDeveloperRole?: boolean;
|
||||
/** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */
|
||||
supportsReasoningEffort?: boolean;
|
||||
/** Whether the provider supports `stream_options`. Default: auto-detected from URL. */
|
||||
supportsStreamOptions?: boolean;
|
||||
/** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */
|
||||
supportsUsageInStreaming?: boolean;
|
||||
/** Which field to use for max tokens. Default: auto-detected from URL. */
|
||||
maxTokensField?: "max_completion_tokens" | "max_tokens";
|
||||
/** Whether tool results require the `name` field. Default: auto-detected from URL. */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue