Make mini-lit a peer dependency in pi-web-ui

- Move mini-lit from dependencies to peerDependencies
- Keep in devDependencies for development
- Prevents bundlers from including mini-lit when consuming pi-web-ui
- Consumer (sitegeist) provides mini-lit, esbuild bundles it once
- Fixes duplicate mini-lit bundling issue permanently
This commit is contained in:
Mario Zechner 2025-10-06 18:59:08 +02:00
parent 6126380879
commit 33145c5f24
4 changed files with 32 additions and 25 deletions

5
package-lock.json generated
View file

@ -5446,7 +5446,6 @@
"version": "0.5.44",
"license": "MIT",
"dependencies": {
"@mariozechner/mini-lit": "^0.1.8",
"@mariozechner/pi-ai": "^0.5.43",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
@ -5456,9 +5455,13 @@
"xlsx": "^0.18.5"
},
"devDependencies": {
"@mariozechner/mini-lit": "^0.1.8",
"@tailwindcss/cli": "^4.0.0-beta.14",
"concurrently": "^9.2.1",
"typescript": "^5.7.3"
},
"peerDependencies": {
"@mariozechner/mini-lit": "^0.1.8"
}
}
}

View file

@ -1711,13 +1711,13 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.3,
output: 1.5,
input: 0.19,
output: 0.78,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-max": {
id: "qwen/qwen3-max",
@ -1830,13 +1830,13 @@ export const MODELS = {
reasoning: true,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.7999999999999999,
input: 0.14,
output: 1.2,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"qwen/qwen3-next-80b-a3b-instruct": {
id: "qwen/qwen3-next-80b-a3b-instruct",
@ -3368,23 +3368,6 @@ export const MODELS = {
contextWindow: 65000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.7999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
@ -3402,6 +3385,23 @@ export const MODELS = {
contextWindow: 16384,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.7999999999999999,
output: 0.7999999999999999,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",

View file

@ -17,7 +17,6 @@
"check": "npm run typecheck"
},
"dependencies": {
"@mariozechner/mini-lit": "^0.1.8",
"@mariozechner/pi-ai": "^0.5.43",
"docx-preview": "^0.3.7",
"jszip": "^3.10.1",
@ -26,7 +25,11 @@
"pdfjs-dist": "^5.4.149",
"xlsx": "^0.18.5"
},
"peerDependencies": {
"@mariozechner/mini-lit": "^0.1.8"
},
"devDependencies": {
"@mariozechner/mini-lit": "^0.1.8",
"@tailwindcss/cli": "^4.0.0-beta.14",
"concurrently": "^9.2.1",
"typescript": "^5.7.3"

View file

@ -141,6 +141,7 @@ export class ChatPanel extends LitElement {
this.artifactCount = this.artifactsPanel.artifacts.size;
this.requestUpdate();
console.log("ChatPanel: Agent set with tools:", tools);
}
render() {