mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-20 20:01:06 +00:00
docs(ai): Update README for new content blocks API
- Add API changes section explaining v0.5.15+ changes - Update Quick Start example to show content array usage - Update Tool Calling example to filter tool calls from content blocks - Update Streaming example to use new onEvent callback - Fix model IDs in provider-specific examples
This commit is contained in:
parent
0f2eaa70e9
commit
80da49cd40
2 changed files with 80 additions and 28 deletions
|
|
@ -4,6 +4,22 @@ Unified LLM API with automatic model discovery, provider configuration, token an
|
||||||
|
|
||||||
**Note**: This library only includes models that support tool calling (function calling), as this is essential for agentic workflows.
|
**Note**: This library only includes models that support tool calling (function calling), as this is essential for agentic workflows.
|
||||||
|
|
||||||
|
## API Changes in v0.5.15+
|
||||||
|
|
||||||
|
The `AssistantMessage` response structure has been updated to support multiple content blocks of different types. Instead of separate fields for `text`, `thinking`, and `toolCalls`, responses now have a unified `content` array that can contain multiple blocks of each type in any order.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Old API (pre-0.5.15)
|
||||||
|
response.text // single text string
|
||||||
|
response.thinking // single thinking string
|
||||||
|
response.toolCalls // array of tool calls
|
||||||
|
|
||||||
|
// New API (0.5.15+)
|
||||||
|
response.content // array of TextContent | ThinkingContent | ToolCall blocks
|
||||||
|
```
|
||||||
|
|
||||||
|
This change allows models to return multiple thinking and text blocks, which is especially useful for complex reasoning tasks.
|
||||||
|
|
||||||
## Supported Providers
|
## Supported Providers
|
||||||
|
|
||||||
- **OpenAI**
|
- **OpenAI**
|
||||||
|
|
@ -26,13 +42,18 @@ npm install @mariozechner/pi-ai
|
||||||
```typescript
|
```typescript
|
||||||
import { createLLM } from '@mariozechner/pi-ai';
|
import { createLLM } from '@mariozechner/pi-ai';
|
||||||
|
|
||||||
const llm = createLLM('openai', 'gpt-5-mini');
|
const llm = createLLM('openai', 'gpt-4o-mini');
|
||||||
|
|
||||||
const response = await llm.complete({
|
const response = await llm.complete({
|
||||||
messages: [{ role: 'user', content: 'Hello!' }]
|
messages: [{ role: 'user', content: 'Hello!' }]
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(response.content);
|
// response.content is now an array of content blocks
|
||||||
|
for (const block of response.content) {
|
||||||
|
if (block.type === 'text') {
|
||||||
|
console.log(block.text);
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Image Input
|
## Image Input
|
||||||
|
|
@ -75,8 +96,10 @@ messages.push({ role: 'user', content: 'What is the weather in Paris?' });
|
||||||
const response = await llm.complete({ messages, tools });
|
const response = await llm.complete({ messages, tools });
|
||||||
messages.push(response);
|
messages.push(response);
|
||||||
|
|
||||||
if (response.toolCalls) {
|
// Check for tool calls in the content blocks
|
||||||
for (const call of response.toolCalls) {
|
const toolCalls = response.content.filter(block => block.type === 'toolCall');
|
||||||
|
|
||||||
|
for (const call of toolCalls) {
|
||||||
// Call your actual function
|
// Call your actual function
|
||||||
const result = await getWeather(call.arguments.location);
|
const result = await getWeather(call.arguments.location);
|
||||||
|
|
||||||
|
|
@ -85,14 +108,22 @@ if (response.toolCalls) {
|
||||||
role: 'toolResult',
|
role: 'toolResult',
|
||||||
content: JSON.stringify(result),
|
content: JSON.stringify(result),
|
||||||
toolCallId: call.id,
|
toolCallId: call.id,
|
||||||
|
toolName: call.name,
|
||||||
isError: false
|
isError: false
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (toolCalls.length > 0) {
|
||||||
// Continue conversation with tool results
|
// Continue conversation with tool results
|
||||||
const followUp = await llm.complete({ messages, tools });
|
const followUp = await llm.complete({ messages, tools });
|
||||||
messages.push(followUp);
|
messages.push(followUp);
|
||||||
console.log(followUp.content);
|
|
||||||
|
// Print text blocks from the response
|
||||||
|
for (const block of followUp.content) {
|
||||||
|
if (block.type === 'text') {
|
||||||
|
console.log(block.text);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -102,13 +133,30 @@ if (response.toolCalls) {
|
||||||
const response = await llm.complete({
|
const response = await llm.complete({
|
||||||
messages: [{ role: 'user', content: 'Write a story' }]
|
messages: [{ role: 'user', content: 'Write a story' }]
|
||||||
}, {
|
}, {
|
||||||
onText: (chunk, complete) => {
|
onEvent: (event) => {
|
||||||
process.stdout.write(chunk);
|
switch (event.type) {
|
||||||
if (complete) console.log('\n[Text streaming complete]');
|
case 'text_start':
|
||||||
},
|
console.log('[Starting text block]');
|
||||||
onThinking: (chunk, complete) => {
|
break;
|
||||||
process.stderr.write(chunk);
|
case 'text_delta':
|
||||||
if (complete) console.error('\n[Thinking complete]');
|
process.stdout.write(event.delta);
|
||||||
|
break;
|
||||||
|
case 'text_end':
|
||||||
|
console.log('\n[Text block complete]');
|
||||||
|
break;
|
||||||
|
case 'thinking_start':
|
||||||
|
console.error('[Starting thinking]');
|
||||||
|
break;
|
||||||
|
case 'thinking_delta':
|
||||||
|
process.stderr.write(event.delta);
|
||||||
|
break;
|
||||||
|
case 'thinking_end':
|
||||||
|
console.error('\n[Thinking complete]');
|
||||||
|
break;
|
||||||
|
case 'toolCall':
|
||||||
|
console.log('Tool called:', event.toolCall.name);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
@ -126,7 +174,11 @@ try {
|
||||||
messages: [{ role: 'user', content: 'Write a long story' }]
|
messages: [{ role: 'user', content: 'Write a long story' }]
|
||||||
}, {
|
}, {
|
||||||
signal: controller.signal,
|
signal: controller.signal,
|
||||||
onText: (chunk) => process.stdout.write(chunk)
|
onEvent: (event) => {
|
||||||
|
if (event.type === 'text_delta') {
|
||||||
|
process.stdout.write(event.delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (error.name === 'AbortError') {
|
if (error.name === 'AbortError') {
|
||||||
|
|
@ -139,7 +191,7 @@ try {
|
||||||
|
|
||||||
### OpenAI Reasoning (o1, o3)
|
### OpenAI Reasoning (o1, o3)
|
||||||
```typescript
|
```typescript
|
||||||
const llm = createLLM('openai', 'gpt-5-mini');
|
const llm = createLLM('openai', 'o1-mini');
|
||||||
|
|
||||||
await llm.complete(context, {
|
await llm.complete(context, {
|
||||||
reasoningEffort: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
|
reasoningEffort: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
|
||||||
|
|
@ -148,7 +200,7 @@ await llm.complete(context, {
|
||||||
|
|
||||||
### Anthropic Thinking
|
### Anthropic Thinking
|
||||||
```typescript
|
```typescript
|
||||||
const llm = createLLM('anthropic', 'claude-sonnet-4-0');
|
const llm = createLLM('anthropic', 'claude-3-5-sonnet-20241022');
|
||||||
|
|
||||||
await llm.complete(context, {
|
await llm.complete(context, {
|
||||||
thinking: {
|
thinking: {
|
||||||
|
|
@ -160,7 +212,7 @@ await llm.complete(context, {
|
||||||
|
|
||||||
### Google Gemini Thinking
|
### Google Gemini Thinking
|
||||||
```typescript
|
```typescript
|
||||||
const llm = createLLM('google', 'gemini-2.5-flash');
|
const llm = createLLM('google', 'gemini-2.0-flash-thinking-exp');
|
||||||
|
|
||||||
await llm.complete(context, {
|
await llm.complete(context, {
|
||||||
thinking: { enabled: true }
|
thinking: { enabled: true }
|
||||||
|
|
|
||||||
|
|
@ -486,8 +486,8 @@ export const PROVIDERS = {
|
||||||
reasoning: true,
|
reasoning: true,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.1999188,
|
input: 0.32986602,
|
||||||
output: 0.800064,
|
output: 1.3201056,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue