mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-16 19:04:37 +00:00
Fix crash when bash mode outputs binary data
Sanitize shell output by removing Unicode Format characters and lone surrogates that crash string-width. This fixes crashes when running commands like curl that download binary files.
This commit is contained in:
parent
a054fecd11
commit
ad42ebf5f5
5 changed files with 141 additions and 111 deletions
|
|
@ -4499,8 +4499,8 @@ export const MODELS = {
|
|||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 0.049999999999999996,
|
||||
output: 0.22,
|
||||
input: 0.03,
|
||||
output: 0.11,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
|
|
@ -4983,9 +4983,9 @@ export const MODELS = {
|
|||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"anthropic/claude-3.5-haiku": {
|
||||
id: "anthropic/claude-3.5-haiku",
|
||||
name: "Anthropic: Claude 3.5 Haiku",
|
||||
"anthropic/claude-3.5-haiku-20241022": {
|
||||
id: "anthropic/claude-3.5-haiku-20241022",
|
||||
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
|
|
@ -5000,9 +5000,9 @@ export const MODELS = {
|
|||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"anthropic/claude-3.5-haiku-20241022": {
|
||||
id: "anthropic/claude-3.5-haiku-20241022",
|
||||
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
|
||||
"anthropic/claude-3.5-haiku": {
|
||||
id: "anthropic/claude-3.5-haiku",
|
||||
name: "Anthropic: Claude 3.5 Haiku",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
|
|
@ -5034,23 +5034,6 @@ export const MODELS = {
|
|||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/ministral-3b": {
|
||||
id: "mistralai/ministral-3b",
|
||||
name: "Mistral: Ministral 3B",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.04,
|
||||
output: 0.04,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/ministral-8b": {
|
||||
id: "mistralai/ministral-8b",
|
||||
name: "Mistral: Ministral 8B",
|
||||
|
|
@ -5068,6 +5051,23 @@ export const MODELS = {
|
|||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/ministral-3b": {
|
||||
id: "mistralai/ministral-3b",
|
||||
name: "Mistral: Ministral 3B",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.04,
|
||||
output: 0.04,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct": {
|
||||
id: "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
|
||||
|
|
@ -5153,23 +5153,6 @@ export const MODELS = {
|
|||
contextWindow: 32768,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-plus-08-2024": {
|
||||
id: "cohere/command-r-plus-08-2024",
|
||||
name: "Cohere: Command R+ (08-2024)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 2.5,
|
||||
output: 10,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-08-2024": {
|
||||
id: "cohere/command-r-08-2024",
|
||||
name: "Cohere: Command R (08-2024)",
|
||||
|
|
@ -5187,6 +5170,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"cohere/command-r-plus-08-2024": {
|
||||
id: "cohere/command-r-plus-08-2024",
|
||||
name: "Cohere: Command R+ (08-2024)",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 2.5,
|
||||
output: 10,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"sao10k/l3.1-euryale-70b": {
|
||||
id: "sao10k/l3.1-euryale-70b",
|
||||
name: "Sao10K: Llama 3.1 Euryale 70B v2.2",
|
||||
|
|
@ -5238,6 +5238,23 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-8b-instruct": {
|
||||
id: "meta-llama/llama-3.1-8b-instruct",
|
||||
name: "Meta: Llama 3.1 8B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.02,
|
||||
output: 0.03,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-405b-instruct": {
|
||||
id: "meta-llama/llama-3.1-405b-instruct",
|
||||
name: "Meta: Llama 3.1 405B Instruct",
|
||||
|
|
@ -5272,23 +5289,6 @@ export const MODELS = {
|
|||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3.1-8b-instruct": {
|
||||
id: "meta-llama/llama-3.1-8b-instruct",
|
||||
name: "Meta: Llama 3.1 8B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.02,
|
||||
output: 0.03,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 131072,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mistral-nemo": {
|
||||
id: "mistralai/mistral-nemo",
|
||||
name: "Mistral: Mistral Nemo",
|
||||
|
|
@ -5476,23 +5476,6 @@ export const MODELS = {
|
|||
contextWindow: 128000,
|
||||
maxTokens: 64000,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-8b-instruct": {
|
||||
id: "meta-llama/llama-3-8b-instruct",
|
||||
name: "Meta: Llama 3 8B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.03,
|
||||
output: 0.06,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-70b-instruct": {
|
||||
id: "meta-llama/llama-3-70b-instruct",
|
||||
name: "Meta: Llama 3 70B Instruct",
|
||||
|
|
@ -5510,6 +5493,23 @@ export const MODELS = {
|
|||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"meta-llama/llama-3-8b-instruct": {
|
||||
id: "meta-llama/llama-3-8b-instruct",
|
||||
name: "Meta: Llama 3 8B Instruct",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.03,
|
||||
output: 0.06,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 8192,
|
||||
maxTokens: 16384,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"mistralai/mixtral-8x22b-instruct": {
|
||||
id: "mistralai/mixtral-8x22b-instruct",
|
||||
name: "Mistral: Mixtral 8x22B Instruct",
|
||||
|
|
@ -5697,23 +5697,6 @@ export const MODELS = {
|
|||
contextWindow: 16385,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openai/gpt-3.5-turbo": {
|
||||
id: "openai/gpt-3.5-turbo",
|
||||
name: "OpenAI: GPT-3.5 Turbo",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.5,
|
||||
output: 1.5,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 16385,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openai/gpt-4-0314": {
|
||||
id: "openai/gpt-4-0314",
|
||||
name: "OpenAI: GPT-4 (older v0314)",
|
||||
|
|
@ -5748,6 +5731,23 @@ export const MODELS = {
|
|||
contextWindow: 8191,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openai/gpt-3.5-turbo": {
|
||||
id: "openai/gpt-3.5-turbo",
|
||||
name: "OpenAI: GPT-3.5 Turbo",
|
||||
api: "openai-completions",
|
||||
provider: "openrouter",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.5,
|
||||
output: 1.5,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 16385,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">,
|
||||
"openrouter/auto": {
|
||||
id: "openrouter/auto",
|
||||
name: "OpenRouter: Auto Router",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue