mirror of
https://github.com/getcompanion-ai/co-mono.git
synced 2026-04-21 10:00:39 +00:00
mom: fix duplicate bot response logging, remove debug logs
- Remove bot response logging from agent.ts (already done in ctx.respond) - Remove all debug console.log statements - Clean up unused store parameter
This commit is contained in:
parent
cc71c0a49e
commit
e513127b3b
3 changed files with 98 additions and 107 deletions
|
|
@ -3481,13 +3481,13 @@ export const MODELS = {
|
||||||
reasoning: false,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.09999999999999999,
|
input: 0.09,
|
||||||
output: 0.7999999999999999,
|
output: 1.1,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 262144,
|
contextWindow: 262144,
|
||||||
maxTokens: 262144,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meituan/longcat-flash-chat:free": {
|
"meituan/longcat-flash-chat:free": {
|
||||||
id: "meituan/longcat-flash-chat:free",
|
id: "meituan/longcat-flash-chat:free",
|
||||||
|
|
@ -4226,7 +4226,7 @@ export const MODELS = {
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
reasoning: true,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.071,
|
input: 0.071,
|
||||||
|
|
@ -5317,13 +5317,13 @@ export const MODELS = {
|
||||||
reasoning: false,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.108,
|
input: 0.09999999999999999,
|
||||||
output: 0.32,
|
output: 0.32,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 131072,
|
contextWindow: 131072,
|
||||||
maxTokens: 120000,
|
maxTokens: 16384,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"amazon/nova-lite-v1": {
|
"amazon/nova-lite-v1": {
|
||||||
id: "amazon/nova-lite-v1",
|
id: "amazon/nova-lite-v1",
|
||||||
|
|
@ -5512,23 +5512,6 @@ export const MODELS = {
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"mistralai/ministral-8b": {
|
|
||||||
id: "mistralai/ministral-8b",
|
|
||||||
name: "Mistral: Ministral 8B",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text"],
|
|
||||||
cost: {
|
|
||||||
input: 0.09999999999999999,
|
|
||||||
output: 0.09999999999999999,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 131072,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"mistralai/ministral-3b": {
|
"mistralai/ministral-3b": {
|
||||||
id: "mistralai/ministral-3b",
|
id: "mistralai/ministral-3b",
|
||||||
name: "Mistral: Ministral 3B",
|
name: "Mistral: Ministral 3B",
|
||||||
|
|
@ -5546,6 +5529,23 @@ export const MODELS = {
|
||||||
contextWindow: 131072,
|
contextWindow: 131072,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"mistralai/ministral-8b": {
|
||||||
|
id: "mistralai/ministral-8b",
|
||||||
|
name: "Mistral: Ministral 8B",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 0.09999999999999999,
|
||||||
|
output: 0.09999999999999999,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 131072,
|
||||||
|
maxTokens: 4096,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"nvidia/llama-3.1-nemotron-70b-instruct": {
|
"nvidia/llama-3.1-nemotron-70b-instruct": {
|
||||||
id: "nvidia/llama-3.1-nemotron-70b-instruct",
|
id: "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||||
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
|
name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
|
||||||
|
|
@ -5716,22 +5716,22 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 16384,
|
maxTokens: 16384,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3.1-8b-instruct": {
|
"meta-llama/llama-3.1-70b-instruct": {
|
||||||
id: "meta-llama/llama-3.1-8b-instruct",
|
id: "meta-llama/llama-3.1-70b-instruct",
|
||||||
name: "Meta: Llama 3.1 8B Instruct",
|
name: "Meta: Llama 3.1 70B Instruct",
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
reasoning: false,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.02,
|
input: 0.39999999999999997,
|
||||||
output: 0.03,
|
output: 0.39999999999999997,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 131072,
|
contextWindow: 131072,
|
||||||
maxTokens: 16384,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3.1-405b-instruct": {
|
"meta-llama/llama-3.1-405b-instruct": {
|
||||||
id: "meta-llama/llama-3.1-405b-instruct",
|
id: "meta-llama/llama-3.1-405b-instruct",
|
||||||
|
|
@ -5750,22 +5750,22 @@ export const MODELS = {
|
||||||
contextWindow: 130815,
|
contextWindow: 130815,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3.1-70b-instruct": {
|
"meta-llama/llama-3.1-8b-instruct": {
|
||||||
id: "meta-llama/llama-3.1-70b-instruct",
|
id: "meta-llama/llama-3.1-8b-instruct",
|
||||||
name: "Meta: Llama 3.1 70B Instruct",
|
name: "Meta: Llama 3.1 8B Instruct",
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
reasoning: false,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.39999999999999997,
|
input: 0.02,
|
||||||
output: 0.39999999999999997,
|
output: 0.03,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 131072,
|
contextWindow: 131072,
|
||||||
maxTokens: 4096,
|
maxTokens: 16384,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"mistralai/mistral-nemo": {
|
"mistralai/mistral-nemo": {
|
||||||
id: "mistralai/mistral-nemo",
|
id: "mistralai/mistral-nemo",
|
||||||
|
|
@ -5903,23 +5903,6 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-4o-2024-05-13": {
|
|
||||||
id: "openai/gpt-4o-2024-05-13",
|
|
||||||
name: "OpenAI: GPT-4o (2024-05-13)",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text", "image"],
|
|
||||||
cost: {
|
|
||||||
input: 5,
|
|
||||||
output: 15,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 128000,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"openai/gpt-4o": {
|
"openai/gpt-4o": {
|
||||||
id: "openai/gpt-4o",
|
id: "openai/gpt-4o",
|
||||||
name: "OpenAI: GPT-4o",
|
name: "OpenAI: GPT-4o",
|
||||||
|
|
@ -5954,22 +5937,22 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 64000,
|
maxTokens: 64000,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3-70b-instruct": {
|
"openai/gpt-4o-2024-05-13": {
|
||||||
id: "meta-llama/llama-3-70b-instruct",
|
id: "openai/gpt-4o-2024-05-13",
|
||||||
name: "Meta: Llama 3 70B Instruct",
|
name: "OpenAI: GPT-4o (2024-05-13)",
|
||||||
api: "openai-completions",
|
api: "openai-completions",
|
||||||
provider: "openrouter",
|
provider: "openrouter",
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
reasoning: false,
|
reasoning: false,
|
||||||
input: ["text"],
|
input: ["text", "image"],
|
||||||
cost: {
|
cost: {
|
||||||
input: 0.3,
|
input: 5,
|
||||||
output: 0.39999999999999997,
|
output: 15,
|
||||||
cacheRead: 0,
|
cacheRead: 0,
|
||||||
cacheWrite: 0,
|
cacheWrite: 0,
|
||||||
},
|
},
|
||||||
contextWindow: 8192,
|
contextWindow: 128000,
|
||||||
maxTokens: 16384,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"meta-llama/llama-3-8b-instruct": {
|
"meta-llama/llama-3-8b-instruct": {
|
||||||
id: "meta-llama/llama-3-8b-instruct",
|
id: "meta-llama/llama-3-8b-instruct",
|
||||||
|
|
@ -5988,6 +5971,23 @@ export const MODELS = {
|
||||||
contextWindow: 8192,
|
contextWindow: 8192,
|
||||||
maxTokens: 16384,
|
maxTokens: 16384,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"meta-llama/llama-3-70b-instruct": {
|
||||||
|
id: "meta-llama/llama-3-70b-instruct",
|
||||||
|
name: "Meta: Llama 3 70B Instruct",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 0.3,
|
||||||
|
output: 0.39999999999999997,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 8192,
|
||||||
|
maxTokens: 16384,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"mistralai/mixtral-8x22b-instruct": {
|
"mistralai/mixtral-8x22b-instruct": {
|
||||||
id: "mistralai/mixtral-8x22b-instruct",
|
id: "mistralai/mixtral-8x22b-instruct",
|
||||||
name: "Mistral: Mixtral 8x22B Instruct",
|
name: "Mistral: Mixtral 8x22B Instruct",
|
||||||
|
|
@ -6073,23 +6073,6 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-3.5-turbo-0613": {
|
|
||||||
id: "openai/gpt-3.5-turbo-0613",
|
|
||||||
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text"],
|
|
||||||
cost: {
|
|
||||||
input: 1,
|
|
||||||
output: 2,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 4095,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"openai/gpt-4-turbo-preview": {
|
"openai/gpt-4-turbo-preview": {
|
||||||
id: "openai/gpt-4-turbo-preview",
|
id: "openai/gpt-4-turbo-preview",
|
||||||
name: "OpenAI: GPT-4 Turbo Preview",
|
name: "OpenAI: GPT-4 Turbo Preview",
|
||||||
|
|
@ -6107,6 +6090,23 @@ export const MODELS = {
|
||||||
contextWindow: 128000,
|
contextWindow: 128000,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"openai/gpt-3.5-turbo-0613": {
|
||||||
|
id: "openai/gpt-3.5-turbo-0613",
|
||||||
|
name: "OpenAI: GPT-3.5 Turbo (older v0613)",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 1,
|
||||||
|
output: 2,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 4095,
|
||||||
|
maxTokens: 4096,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"mistralai/mistral-tiny": {
|
"mistralai/mistral-tiny": {
|
||||||
id: "mistralai/mistral-tiny",
|
id: "mistralai/mistral-tiny",
|
||||||
name: "Mistral Tiny",
|
name: "Mistral Tiny",
|
||||||
|
|
@ -6175,6 +6175,23 @@ export const MODELS = {
|
||||||
contextWindow: 16385,
|
contextWindow: 16385,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
|
"openai/gpt-3.5-turbo": {
|
||||||
|
id: "openai/gpt-3.5-turbo",
|
||||||
|
name: "OpenAI: GPT-3.5 Turbo",
|
||||||
|
api: "openai-completions",
|
||||||
|
provider: "openrouter",
|
||||||
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"],
|
||||||
|
cost: {
|
||||||
|
input: 0.5,
|
||||||
|
output: 1.5,
|
||||||
|
cacheRead: 0,
|
||||||
|
cacheWrite: 0,
|
||||||
|
},
|
||||||
|
contextWindow: 16385,
|
||||||
|
maxTokens: 4096,
|
||||||
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-4-0314": {
|
"openai/gpt-4-0314": {
|
||||||
id: "openai/gpt-4-0314",
|
id: "openai/gpt-4-0314",
|
||||||
name: "OpenAI: GPT-4 (older v0314)",
|
name: "OpenAI: GPT-4 (older v0314)",
|
||||||
|
|
@ -6209,23 +6226,6 @@ export const MODELS = {
|
||||||
contextWindow: 8191,
|
contextWindow: 8191,
|
||||||
maxTokens: 4096,
|
maxTokens: 4096,
|
||||||
} satisfies Model<"openai-completions">,
|
} satisfies Model<"openai-completions">,
|
||||||
"openai/gpt-3.5-turbo": {
|
|
||||||
id: "openai/gpt-3.5-turbo",
|
|
||||||
name: "OpenAI: GPT-3.5 Turbo",
|
|
||||||
api: "openai-completions",
|
|
||||||
provider: "openrouter",
|
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
|
||||||
reasoning: false,
|
|
||||||
input: ["text"],
|
|
||||||
cost: {
|
|
||||||
input: 0.5,
|
|
||||||
output: 1.5,
|
|
||||||
cacheRead: 0,
|
|
||||||
cacheWrite: 0,
|
|
||||||
},
|
|
||||||
contextWindow: 16385,
|
|
||||||
maxTokens: 4096,
|
|
||||||
} satisfies Model<"openai-completions">,
|
|
||||||
"openrouter/auto": {
|
"openrouter/auto": {
|
||||||
id: "openrouter/auto",
|
id: "openrouter/auto",
|
||||||
name: "OpenRouter: Auto Router",
|
name: "OpenRouter: Auto Router",
|
||||||
|
|
|
||||||
|
|
@ -262,7 +262,7 @@ export function createAgentRunner(sandboxConfig: SandboxConfig): AgentRunner {
|
||||||
const executor = createExecutor(sandboxConfig);
|
const executor = createExecutor(sandboxConfig);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
async run(ctx: SlackContext, channelDir: string, store: ChannelStore): Promise<{ stopReason: string }> {
|
async run(ctx: SlackContext, channelDir: string, _store: ChannelStore): Promise<{ stopReason: string }> {
|
||||||
// Ensure channel directory exists
|
// Ensure channel directory exists
|
||||||
await mkdir(channelDir, { recursive: true });
|
await mkdir(channelDir, { recursive: true });
|
||||||
|
|
||||||
|
|
@ -424,7 +424,7 @@ export function createAgentRunner(sandboxConfig: SandboxConfig): AgentRunner {
|
||||||
|
|
||||||
// Subscribe to session events
|
// Subscribe to session events
|
||||||
const unsubscribe = session.subscribe(async (event) => {
|
const unsubscribe = session.subscribe(async (event) => {
|
||||||
// Handle core agent events
|
// Handle agent events
|
||||||
if (event.type === "tool_execution_start") {
|
if (event.type === "tool_execution_start") {
|
||||||
const agentEvent = event as AgentEvent & { type: "tool_execution_start" };
|
const agentEvent = event as AgentEvent & { type: "tool_execution_start" };
|
||||||
const args = agentEvent.args as { label?: string };
|
const args = agentEvent.args as { label?: string };
|
||||||
|
|
@ -571,16 +571,7 @@ export function createAgentRunner(sandboxConfig: SandboxConfig): AgentRunner {
|
||||||
.join("\n") || "";
|
.join("\n") || "";
|
||||||
|
|
||||||
if (finalText.trim()) {
|
if (finalText.trim()) {
|
||||||
// Log final response to log.jsonl (human-readable history)
|
// Note: Bot response is logged via ctx.respond() in the event handler
|
||||||
await store.logMessage(ctx.message.channel, {
|
|
||||||
date: new Date().toISOString(),
|
|
||||||
ts: toSlackTs(),
|
|
||||||
user: "bot",
|
|
||||||
text: finalText,
|
|
||||||
attachments: [],
|
|
||||||
isBot: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const mainText =
|
const mainText =
|
||||||
finalText.length > SLACK_MAX_LENGTH
|
finalText.length > SLACK_MAX_LENGTH
|
||||||
|
|
|
||||||
|
|
@ -161,7 +161,7 @@ export class MomSessionManager {
|
||||||
appendFileSync(this.contextFile, JSON.stringify(entry) + "\n");
|
appendFileSync(this.contextFile, JSON.stringify(entry) + "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(`[mom] Synced ${newMessages.length} messages from log.jsonl to context.jsonl`);
|
// Sync complete - newMessages.length messages added
|
||||||
}
|
}
|
||||||
|
|
||||||
private extractSessionId(): string | null {
|
private extractSessionId(): string | null {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue