initial: webhook telegram adapter for pi with streaming replies

- webhook server with secret validation, rate limiting, body guards
- streaming replies via sendMessage + editMessageText throttled loop
- RPC session management for persistent conversations
- 15/15 tests passing
This commit is contained in:
Harivansh Rathi 2026-04-03 05:30:05 +00:00
parent 809e9b1df5
commit ce9abc2a8e
18 changed files with 6991 additions and 1 deletions

View file

@ -0,0 +1,107 @@
/**
* Tests for streaming reply logic.
*/
import { describe, it, expect, vi } from "vitest";
import { StreamingReply } from "../src/streaming-reply.js";
import type { TelegramAPI } from "../src/telegram-api.js";
describe("streaming-reply", () => {
it("should throttle edits", async () => {
const sent: Array<{ type: string; text: string }> = [];
const mockApi = {
sendMessage: vi.fn(async (_chatId: string, text: string) => {
sent.push({ type: "send", text });
return { message_id: 1 };
}),
editMessageText: vi.fn(async (_chatId: string, _msgId: number, text: string) => {
sent.push({ type: "edit", text });
}),
} as unknown as TelegramAPI;
const streaming = new StreamingReply(mockApi, "123", 1, {
throttleMs: 100,
minInitialChars: 10,
});
streaming.update("Hello");
streaming.update("Hello world");
streaming.update("Hello world!!!");
// Wait for throttle + initial char check
await new Promise((resolve) => setTimeout(resolve, 150));
expect(sent.length).toBeGreaterThan(0);
expect(sent[0]?.type).toBe("send");
expect(sent[0]?.text).toContain("Hello");
await streaming.stop();
});
it("should track generation to prevent clobbering", () => {
const mockApi = {
sendMessage: vi.fn(async () => ({ message_id: 1 })),
editMessageText: vi.fn(),
} as unknown as TelegramAPI;
const streaming1 = new StreamingReply(mockApi, "123", 1, { throttleMs: 1000 });
const streaming2 = new StreamingReply(mockApi, "123", 2, { throttleMs: 1000 });
expect(streaming1.getGeneration()).toBe(1);
expect(streaming2.getGeneration()).toBe(2);
expect(streaming1.getGeneration()).not.toBe(streaming2.getGeneration());
});
it("should truncate text exceeding 4096 chars", async () => {
const sent: string[] = [];
const mockApi = {
sendMessage: vi.fn(async (_chatId: string, text: string) => {
sent.push(text);
return { message_id: 1 };
}),
editMessageText: vi.fn(),
} as unknown as TelegramAPI;
const streaming = new StreamingReply(mockApi, "123", 1, {
throttleMs: 50,
minInitialChars: 0,
});
streaming.update("x".repeat(5000));
await streaming.flush();
expect(sent.length).toBe(1);
expect(sent[0]!.length).toBe(4096);
await streaming.stop();
});
it("should debounce first send based on minInitialChars", async () => {
const sent: string[] = [];
const mockApi = {
sendMessage: vi.fn(async (_chatId: string, text: string) => {
sent.push(text);
return { message_id: 1 };
}),
editMessageText: vi.fn(),
} as unknown as TelegramAPI;
const streaming = new StreamingReply(mockApi, "123", 1, {
throttleMs: 50,
minInitialChars: 20,
});
streaming.update("Short");
await streaming.flush();
// Should not send yet
expect(sent.length).toBe(0);
streaming.update("This is now long enough to send");
await streaming.flush();
expect(sent.length).toBe(1);
await streaming.stop();
});
});