Fix AgentInterface to requestUpdate on all message lifecycle events

Was only handling message_update, now also handles message_start,
message_end, turn_start, turn_end, agent_start, agent_end.
This commit is contained in:
Mario Zechner 2025-12-28 11:22:56 +01:00
parent 977e4ea6ef
commit 347d4cf729
2 changed files with 74 additions and 64 deletions

View file

@ -6104,9 +6104,9 @@ export const MODELS = {
contextWindow: 32768,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -6121,9 +6121,9 @@ export const MODELS = {
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
"anthropic/claude-3.5-haiku": {
id: "anthropic/claude-3.5-haiku",
name: "Anthropic: Claude 3.5 Haiku",
"anthropic/claude-3.5-haiku-20241022": {
id: "anthropic/claude-3.5-haiku-20241022",
name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
@ -6359,23 +6359,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.02,
output: 0.03,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-405b-instruct": {
id: "meta-llama/llama-3.1-405b-instruct",
name: "Meta: Llama 3.1 405B Instruct",
@ -6410,6 +6393,23 @@ export const MODELS = {
contextWindow: 131072,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.1-8b-instruct": {
id: "meta-llama/llama-3.1-8b-instruct",
name: "Meta: Llama 3.1 8B Instruct",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0.02,
output: 0.03,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"mistralai/mistral-nemo": {
id: "mistralai/mistral-nemo",
name: "Mistral: Mistral Nemo",
@ -6546,23 +6546,6 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4o": {
id: "openai/gpt-4o",
name: "OpenAI: GPT-4o",
@ -6597,6 +6580,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 64000,
} satisfies Model<"openai-completions">,
"openai/gpt-4o-2024-05-13": {
id: "openai/gpt-4o-2024-05-13",
name: "OpenAI: GPT-4o (2024-05-13)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text", "image"],
cost: {
input: 5,
output: 15,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3-70b-instruct": {
id: "meta-llama/llama-3-70b-instruct",
name: "Meta: Llama 3 70B Instruct",
@ -6835,23 +6835,6 @@ export const MODELS = {
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-3.5-turbo": {
id: "openai/gpt-3.5-turbo",
name: "OpenAI: GPT-3.5 Turbo",
@ -6869,6 +6852,23 @@ export const MODELS = {
contextWindow: 16385,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openai/gpt-4": {
id: "openai/gpt-4",
name: "OpenAI: GPT-4",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 30,
output: 60,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 8191,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/auto": {
id: "openrouter/auto",
name: "OpenRouter: Auto Router",

View file

@ -131,13 +131,23 @@ export class AgentInterface extends LitElement {
}
if (!this.session) return;
this._unsubscribeSession = this.session.subscribe(async (ev: AgentEvent) => {
if (ev.type === "message_update") {
if (this._streamingContainer) {
const isStreaming = this.session?.state.isStreaming || false;
this._streamingContainer.isStreaming = isStreaming;
this._streamingContainer.setMessage(ev.message, !isStreaming);
}
this.requestUpdate();
switch (ev.type) {
case "message_start":
case "message_end":
case "turn_start":
case "turn_end":
case "agent_start":
case "agent_end":
this.requestUpdate();
break;
case "message_update":
if (this._streamingContainer) {
const isStreaming = this.session?.state.isStreaming || false;
this._streamingContainer.isStreaming = isStreaming;
this._streamingContainer.setMessage(ev.message, !isStreaming);
}
this.requestUpdate();
break;
}
});
}