mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-20 17:02:11 +00:00
Fix AgentInterface to requestUpdate on all message lifecycle events
Was only handling message_update, now also handles message_start, message_end, turn_start, turn_end, agent_start, agent_end.
This commit is contained in:
parent
977e4ea6ef
commit
347d4cf729
2 changed files with 74 additions and 64 deletions
|
|
@ -6104,9 +6104,9 @@ export const MODELS = {
|
||||||
contextWindow: 32768,
|
contextWindow: 32768,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"anthropic/claude-3.5-haiku-20241022": {
|
"anthropic/claude-3.5-haiku": {
|
||||||
id: "anthropic/claude-3.5-haiku-20241022",
|
id: "anthropic/claude-3.5-haiku",
|
||||||
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
|
name: "Anthropic: Claude 3.5 Haiku",
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
|
@ -6121,9 +6121,9 @@ export const MODELS = {
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"anthropic/claude-3.5-haiku": {
|
"anthropic/claude-3.5-haiku-20241022": {
|
||||||
id: "anthropic/claude-3.5-haiku",
|
id: "anthropic/claude-3.5-haiku-20241022",
|
||||||
name: "Anthropic: Claude 3.5 Haiku",
|
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
|
@ -6359,23 +6359,6 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 16384,
|
maxTokens: 16384,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3.1-8b-instruct": {
|
|
||||||
id: "meta-llama/llama-3.1-8b-instruct",
|
|
||||||
name: "Meta: Llama 3.1 8B Instruct",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text"],
|
|
||||||
cost: {
|
|
||||||
input: 0.02,
|
|
||||||
output: 0.03,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 131072,
|
|
||||||
maxTokens: 16384,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"meta-llama/llama-3.1-405b-instruct": {
|
"meta-llama/llama-3.1-405b-instruct": {
|
||||||
id: "meta-llama/llama-3.1-405b-instruct",
|
id: "meta-llama/llama-3.1-405b-instruct",
|
||||||
name: "Meta: Llama 3.1 405B Instruct",
|
name: "Meta: Llama 3.1 405B Instruct",
|
||||||
|
|
@ -6410,6 +6393,23 @@ export const MODELS = {
|
||||||
contextWindow: 131072,
|
contextWindow: 131072,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"meta-llama/llama-3.1-8b-instruct": {
|
||||||
|
id: "meta-llama/llama-3.1-8b-instruct",
|
||||||
|
name: "Meta: Llama 3.1 8B Instruct",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 0.02,
|
||||||
|
output: 0.03,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 131072,
|
||||||
|
maxTokens: 16384,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"mistralai/mistral-nemo": {
|
"mistralai/mistral-nemo": {
|
||||||
id: "mistralai/mistral-nemo",
|
id: "mistralai/mistral-nemo",
|
||||||
name: "Mistral: Mistral Nemo",
|
name: "Mistral: Mistral Nemo",
|
||||||
|
|
@ -6546,23 +6546,6 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-4o-2024-05-13": {
|
|
||||||
id: "openai/gpt-4o-2024-05-13",
|
|
||||||
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text", "image"],
|
|
||||||
cost: {
|
|
||||||
input: 5,
|
|
||||||
output: 15,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 128000,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"openai/gpt-4o": {
|
"openai/gpt-4o": {
|
||||||
id: "openai/gpt-4o",
|
id: "openai/gpt-4o",
|
||||||
name: "OpenAI: GPT-4o",
|
name: "OpenAI: GPT-4o",
|
||||||
|
|
@ -6597,6 +6580,23 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 64000,
|
maxTokens: 64000,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"openai/gpt-4o-2024-05-13": {
|
||||||
|
id: "openai/gpt-4o-2024-05-13",
|
||||||
|
name: "OpenAI: GPT-4o (2024-05-13)",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text", "image"],
|
||||||
|
cost: {
|
||||||
|
input: 5,
|
||||||
|
output: 15,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 128000,
|
||||||
|
maxTokens: 4096,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3-70b-instruct": {
|
"meta-llama/llama-3-70b-instruct": {
|
||||||
id: "meta-llama/llama-3-70b-instruct",
|
id: "meta-llama/llama-3-70b-instruct",
|
||||||
name: "Meta: Llama 3 70B Instruct",
|
name: "Meta: Llama 3 70B Instruct",
|
||||||
|
|
@ -6835,23 +6835,6 @@ export const MODELS = {
|
||||||
contextWindow: 8191,
|
contextWindow: 8191,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-4": {
|
|
||||||
id: "openai/gpt-4",
|
|
||||||
name: "OpenAI: GPT-4",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text"],
|
|
||||||
cost: {
|
|
||||||
input: 30,
|
|
||||||
output: 60,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 8191,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"openai/gpt-3.5-turbo": {
|
"openai/gpt-3.5-turbo": {
|
||||||
id: "openai/gpt-3.5-turbo",
|
id: "openai/gpt-3.5-turbo",
|
||||||
name: "OpenAI: GPT-3.5 Turbo",
|
name: "OpenAI: GPT-3.5 Turbo",
|
||||||
|
|
@ -6869,6 +6852,23 @@ export const MODELS = {
|
||||||
contextWindow: 16385,
|
contextWindow: 16385,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"openai/gpt-4": {
|
||||||
|
id: "openai/gpt-4",
|
||||||
|
name: "OpenAI: GPT-4",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 30,
|
||||||
|
output: 60,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 8191,
|
||||||
|
maxTokens: 4096,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"openrouter/auto": {
|
"openrouter/auto": {
|
||||||
id: "openrouter/auto",
|
id: "openrouter/auto",
|
||||||
name: "OpenRouter: Auto Router",
|
name: "OpenRouter: Auto Router",
|
||||||
|
|
|
||||||
|
|
@ -131,13 +131,23 @@ export class AgentInterface extends LitElement {
|
||||||
}
|
}
|
||||||
if (!this.session) return;
|
if (!this.session) return;
|
||||||
this._unsubscribeSession = this.session.subscribe(async (ev: AgentEvent) => {
|
this._unsubscribeSession = this.session.subscribe(async (ev: AgentEvent) => {
|
||||||
if (ev.type === "message_update") {
|
switch (ev.type) {
|
||||||
if (this._streamingContainer) {
|
case "message_start":
|
||||||
const isStreaming = this.session?.state.isStreaming || false;
|
case "message_end":
|
||||||
this._streamingContainer.isStreaming = isStreaming;
|
case "turn_start":
|
||||||
this._streamingContainer.setMessage(ev.message, !isStreaming);
|
case "turn_end":
|
||||||
}
|
case "agent_start":
|
||||||
this.requestUpdate();
|
case "agent_end":
|
||||||
|
this.requestUpdate();
|
||||||
|
break;
|
||||||
|
case "message_update":
|
||||||
|
if (this._streamingContainer) {
|
||||||
|
const isStreaming = this.session?.state.isStreaming || false;
|
||||||
|
this._streamingContainer.isStreaming = isStreaming;
|
||||||
|
this._streamingContainer.setMessage(ev.message, !isStreaming);
|
||||||
|
}
|
||||||
|
this.requestUpdate();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue