docs(ai): Fix model names in README examples

- Changed gpt-4o-mini to gpt-5-mini in examples to match actual model names
- Minor formatting fixes
This commit is contained in:
Mario Zechner 2025-08-30 21:48:17 +02:00
parent dae40167a3
commit 889208dccd
10 changed files with 116 additions and 116 deletions

12
package-lock.json generated
View file

@ -2716,10 +2716,10 @@
},
"packages/agent": {
"name": "@mariozechner/pi-agent",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-tui": "^0.5.9",
"@mariozechner/pi-tui": "^0.5.10",
"@types/glob": "^8.1.0",
"chalk": "^5.5.0",
"glob": "^11.0.3",
@ -3098,7 +3098,7 @@
},
"packages/ai": {
"name": "@mariozechner/pi-ai",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.60.0",
@ -3134,10 +3134,10 @@
},
"packages/pods": {
"name": "@mariozechner/pi",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@mariozechner/pi-agent": "^0.5.9",
"@mariozechner/pi-agent": "^0.5.10",
"chalk": "^5.5.0"
},
"bin": {
@ -3150,7 +3150,7 @@
},
"packages/tui": {
"name": "@mariozechner/pi-tui",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.5.10",
"version": "0.5.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/pi-agent",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@mariozechner/tui": "^0.1.1",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-agent",
"version": "0.5.10",
"version": "0.5.11",
"description": "General-purpose agent with tool calling and session persistence",
"type": "module",
"bin": {
@ -18,7 +18,7 @@
"prepublishOnly": "npm run clean && npm run build"
},
"dependencies": {
"@mariozechner/pi-tui": "^0.5.10",
"@mariozechner/pi-tui": "^0.5.11",
"@types/glob": "^8.1.0",
"chalk": "^5.5.0",
"glob": "^11.0.3",

View file

@ -26,7 +26,7 @@ npm install @mariozechner/pi-ai
```typescript
import { createLLM } from '@mariozechner/pi-ai';
const llm = createLLM('openai', 'gpt-4o-mini');
const llm = createLLM('openai', 'gpt-5-mini');
const response = await llm.complete({
messages: [{ role: 'user', content: 'Hello!' }]
@ -79,16 +79,16 @@ if (response.toolCalls) {
for (const call of response.toolCalls) {
// Call your actual function
const result = await getWeather(call.arguments.location);
// Add tool result to context
messages.push({
role: 'toolResult',
content: JSON.stringify(result),
toolCallId: call.id,
isError: false
messages.push({
role: 'toolResult',
content: JSON.stringify(result),
toolCallId: call.id,
isError: false
});
}
// Continue conversation with tool results
const followUp = await llm.complete({ messages, tools });
messages.push(followUp);
@ -139,7 +139,7 @@ try {
### OpenAI Reasoning (o1, o3)
```typescript
const llm = createLLM('openai', 'o1-mini');
const llm = createLLM('openai', 'gpt-5-mini');
await llm.complete(context, {
reasoningEffort: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
@ -148,7 +148,7 @@ await llm.complete(context, {
### Anthropic Thinking
```typescript
const llm = createLLM('anthropic', 'claude-3-7-sonnet-latest');
const llm = createLLM('anthropic', 'claude-sonnet-4-0');
await llm.complete(context, {
thinking: {
@ -160,7 +160,7 @@ await llm.complete(context, {
### Google Gemini Thinking
```typescript
const llm = createLLM('google', 'gemini-2.0-flash-thinking-exp');
const llm = createLLM('google', 'gemini-2.5-flash');
await llm.complete(context, {
thinking: { enabled: true }
@ -279,10 +279,10 @@ OPENROUTER_API_KEY=sk-or-...
When set, you can omit the API key parameter:
```typescript
// Uses OPENAI_API_KEY from environment
const llm = createLLM('openai', 'gpt-4o-mini');
const llm = createLLM('openai', 'gpt-5-mini');
// Or pass explicitly
const llm = createLLM('openai', 'gpt-4o-mini', 'sk-...');
const llm = createLLM('openai', 'gpt-5-mini', 'sk-...');
```
## License

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-ai",
"version": "0.5.10",
"version": "0.5.11",
"description": "Unified LLM API with automatic model discovery and provider configuration",
"type": "module",
"main": "./dist/index.js",

View file

@ -1598,22 +1598,6 @@ export const PROVIDERS = {
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.28,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
@ -1630,6 +1614,22 @@ export const PROVIDERS = {
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model,
"meta-llama/llama-3.1-70b-instruct": {
id: "meta-llama/llama-3.1-70b-instruct",
name: "Meta: Llama 3.1 70B Instruct",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.09999999999999999,
output: 0.28,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -1646,22 +1646,6 @@ export const PROVIDERS = {
contextWindow: 32000,
maxTokens: 4096,
} satisfies Model,
"mistralai/mistral-7b-instruct-v0.3": {
id: "mistralai/mistral-7b-instruct-v0.3",
name: "Mistral: Mistral 7B Instruct v0.3",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.028,
output: 0.054,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model,
"mistralai/mistral-7b-instruct:free": {
id: "mistralai/mistral-7b-instruct:free",
name: "Mistral: Mistral 7B Instruct (free)",
@ -1694,6 +1678,22 @@ export const PROVIDERS = {
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model,
"mistralai/mistral-7b-instruct-v0.3": {
id: "mistralai/mistral-7b-instruct-v0.3",
name: "Mistral: Mistral 7B Instruct v0.3",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.028,
output: 0.054,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 16384,
} satisfies Model,
"microsoft/phi-3-mini-128k-instruct": {
id: "microsoft/phi-3-mini-128k-instruct",
name: "Microsoft: Phi-3 Mini 128K Instruct",
@ -1726,22 +1726,6 @@ export const PROVIDERS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.3,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model,
"meta-llama/llama-3-8b-instruct": {
id: "meta-llama/llama-3-8b-instruct",
name: "Meta: Llama 3 8B Instruct",
@ -1758,6 +1742,22 @@ export const PROVIDERS = {
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.3,
output: 0.39999999999999997,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8192,
maxTokens: 16384,
} satisfies Model,
"mistralai/mixtral-8x22b-instruct": {
id: "mistralai/mixtral-8x22b-instruct",
name: "Mistral: Mixtral 8x22B Instruct",
@ -1854,22 +1854,6 @@ export const PROVIDERS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model,
"mistralai/mistral-tiny": {
id: "mistralai/mistral-tiny",
name: "Mistral Tiny",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.25,
output: 0.25,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model,
"mistralai/mistral-small": {
id: "mistralai/mistral-small",
name: "Mistral Small",
@ -1886,6 +1870,22 @@ export const PROVIDERS = {
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model,
"mistralai/mistral-tiny": {
id: "mistralai/mistral-tiny",
name: "Mistral Tiny",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.25,
output: 0.25,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model,
"mistralai/mixtral-8x7b-instruct": {
id: "mistralai/mixtral-8x7b-instruct",
name: "Mistral: Mixtral 8x7B Instruct",
@ -2473,6 +2473,21 @@ export const PROVIDERS = {
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model,
"gpt-3.5-turbo": {
id: "gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
provider: "openai",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model,
"gpt-4": {
id: "gpt-4",
name: "OpenAI: GPT-4",
@ -2503,21 +2518,6 @@ export const PROVIDERS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model,
"gpt-3.5-turbo": {
id: "gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
provider: "openai",
reasoning: false,
input: ["text"],
cost: {
input: 0.5,
output: 1.5,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model,
},
},
anthropic: {
@ -2597,9 +2597,9 @@ export const PROVIDERS = {
contextWindow: 200000,
maxTokens: 64000,
} satisfies Model,
"claude-3-5-haiku-20241022": {
id: "claude-3-5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
"claude-3-5-haiku-latest": {
id: "claude-3-5-haiku-latest",
name: "Anthropic: Claude 3.5 Haiku",
provider: "anthropic",
reasoning: false,
input: ["text", "image"],
@ -2612,9 +2612,9 @@ export const PROVIDERS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model,
"claude-3-5-haiku-latest": {
id: "claude-3-5-haiku-latest",
name: "Anthropic: Claude 3.5 Haiku",
"claude-3-5-haiku-20241022": {
id: "claude-3-5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
provider: "anthropic",
reasoning: false,
input: ["text", "image"],

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/pi",
"version": "0.5.10",
"version": "0.5.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/pi",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@ai-sdk/openai": "^2.0.5",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi",
"version": "0.5.10",
"version": "0.5.11",
"description": "CLI tool for managing vLLM deployments on GPU pods",
"type": "module",
"bin": {
@ -34,7 +34,7 @@
"node": ">=20.0.0"
},
"dependencies": {
"@mariozechner/pi-agent": "^0.5.10",
"@mariozechner/pi-agent": "^0.5.11",
"chalk": "^5.5.0"
},
"devDependencies": {}

View file

@ -1,12 +1,12 @@
{
"name": "@mariozechner/tui",
"version": "0.5.10",
"version": "0.5.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@mariozechner/tui",
"version": "0.5.10",
"version": "0.5.11",
"license": "MIT",
"dependencies": {
"@types/mime-types": "^2.1.4",

View file

@ -1,6 +1,6 @@
{
"name": "@mariozechner/pi-tui",
"version": "0.5.10",
"version": "0.5.11",
"description": "Terminal User Interface library with differential rendering for efficient text-based applications",
"type": "module",
"main": "dist/index.js",