Make mini-lit a peer dependency in pi-web-ui

- Move mini-lit from dependencies to peerDependencies
- Keep in devDependencies for development
- Prevents bundlers from including mini-lit when consuming pi-web-ui
- Consumer (sitegeist) provides mini-lit, esbuild bundles it once
- Fixes duplicate mini-lit bundling issue permanently
This commit is contained in:
Mario Zechner 2025-10-06 18:59:08 +02:00
parent 6126380879
commit 33145c5f24
4 changed files with 32 additions and 25 deletions

View file

@ -1711,13 +1711,13 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.3,
output: 1.5,
input: 0.19,
output: 0.78,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-max": {
id: "qwen/qwen3-max",
@ -1830,13 +1830,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.7999999999999999,
input: 0.14,
output: 1.2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-next-80b-a3b-instruct": {
id: "qwen/qwen3-next-80b-a3b-instruct",
@ -3368,23 +3368,6 @@ export const MODELS = {
contextWindow: 65000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.7999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
@ -3402,6 +3385,23 @@ export const MODELS = {
contextWindow: 16384,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.7999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",