mirror of
https://github.com/harivansh-afk/sandbox-agent.git
synced 2026-04-15 19:05:18 +00:00
* Fix Foundry auth: migrate to Better Auth adapter, fix access token retrieval - Remove @ts-nocheck from better-auth.ts, auth-user/index.ts, app-shell.ts and fix all type errors - Fix getAccessTokenForSession: read GitHub token directly from account record instead of calling Better Auth's internal /get-access-token endpoint which returns 403 on server-side calls - Re-implement workspaceAuth helper functions (workspaceAuthColumn, normalizeAuthValue, workspaceAuthClause, workspaceAuthWhere) that were accidentally deleted - Remove all retry logic (withRetries, isRetryableAppActorError) - Implement CORS origin allowlist from configured environment - Document cachedAppWorkspace singleton pattern - Add inline org sync fallback in buildAppSnapshot for post-OAuth flow - Add no-retry rule to CLAUDE.md Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * Add Foundry dev panel from fix-git-data branch Port the dev panel component that was left out when PR #243 was replaced by PR #247. Adapted to remove runtime/mock-debug references that don't exist on the current branch. - Toggle with Shift+D, persists visibility to localStorage - Shows context, session, GitHub sync status sections - Dev-only (import.meta.env.DEV) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * Add full Docker image defaults, fix actor deadlocks, and improve dev experience - Add Dockerfile.full and --all flag to install-agent CLI for pre-built images - Centralize Docker image constant (FULL_IMAGE) pinned to 0.3.1-full - Remove examples/shared/Dockerfile{,.dev} and daytona snapshot example - Expand Docker docs with full runnable Dockerfile - Fix self-deadlock in createWorkbenchSession (fire-and-forget provisioning) - Audit and convert 12 task actions from wait:true to wait:false - Add bun --hot for dev backend hot reload - Remove --force from pnpm install in dev Dockerfile for faster startup - Add env_file support to compose.dev.yaml for automatic credential loading - Add mock frontend compose config and dev panel - Update CLAUDE.md with wait:true policy and dev environment setup Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * WIP: async action fixes and interest manager Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * Fix Foundry UI bugs: org names, hanging sessions, and wrong repo creation - Fix org display name using GitHub description instead of name field - Fix createWorkbenchSession hanging when sandbox is provisioning - Fix auto-session creation retry storm on errors - Fix task creation using wrong repo due to React state race conditions - Remove Bun hot-reload from backend Dockerfile (causes port drift) - Add GitHub sync/install status to dev panel Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
171 lines
5.1 KiB
TypeScript
171 lines
5.1 KiB
TypeScript
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
|
import type { WorkspaceEvent, WorkspaceSummarySnapshot } from "@sandbox-agent/foundry-shared";
|
|
import type { ActorConn, BackendClient } from "../src/backend-client.js";
|
|
import { RemoteInterestManager } from "../src/interest/remote-manager.js";
|
|
|
|
class FakeActorConn implements ActorConn {
|
|
private readonly listeners = new Map<string, Set<(payload: any) => void>>();
|
|
private readonly errorListeners = new Set<(error: unknown) => void>();
|
|
disposeCount = 0;
|
|
|
|
on(event: string, listener: (payload: any) => void): () => void {
|
|
let current = this.listeners.get(event);
|
|
if (!current) {
|
|
current = new Set();
|
|
this.listeners.set(event, current);
|
|
}
|
|
current.add(listener);
|
|
return () => {
|
|
current?.delete(listener);
|
|
if (current?.size === 0) {
|
|
this.listeners.delete(event);
|
|
}
|
|
};
|
|
}
|
|
|
|
onError(listener: (error: unknown) => void): () => void {
|
|
this.errorListeners.add(listener);
|
|
return () => {
|
|
this.errorListeners.delete(listener);
|
|
};
|
|
}
|
|
|
|
emit(event: string, payload: unknown): void {
|
|
for (const listener of this.listeners.get(event) ?? []) {
|
|
listener(payload);
|
|
}
|
|
}
|
|
|
|
emitError(error: unknown): void {
|
|
for (const listener of this.errorListeners) {
|
|
listener(error);
|
|
}
|
|
}
|
|
|
|
async dispose(): Promise<void> {
|
|
this.disposeCount += 1;
|
|
}
|
|
}
|
|
|
|
function workspaceSnapshot(): WorkspaceSummarySnapshot {
|
|
return {
|
|
workspaceId: "ws-1",
|
|
repos: [{ id: "repo-1", label: "repo-1", taskCount: 1, latestActivityMs: 10 }],
|
|
taskSummaries: [
|
|
{
|
|
id: "task-1",
|
|
repoId: "repo-1",
|
|
title: "Initial task",
|
|
status: "idle",
|
|
repoName: "repo-1",
|
|
updatedAtMs: 10,
|
|
branch: "main",
|
|
pullRequest: null,
|
|
sessionsSummary: [],
|
|
},
|
|
],
|
|
};
|
|
}
|
|
|
|
function createBackend(conn: FakeActorConn, snapshot: WorkspaceSummarySnapshot): BackendClient {
|
|
return {
|
|
connectWorkspace: vi.fn(async () => conn),
|
|
getWorkspaceSummary: vi.fn(async () => snapshot),
|
|
} as unknown as BackendClient;
|
|
}
|
|
|
|
async function flushAsyncWork(): Promise<void> {
|
|
await Promise.resolve();
|
|
await Promise.resolve();
|
|
}
|
|
|
|
describe("RemoteInterestManager", () => {
|
|
beforeEach(() => {
|
|
vi.useFakeTimers();
|
|
});
|
|
|
|
afterEach(() => {
|
|
vi.useRealTimers();
|
|
});
|
|
|
|
it("shares one connection per topic key and applies incoming events", async () => {
|
|
const conn = new FakeActorConn();
|
|
const backend = createBackend(conn, workspaceSnapshot());
|
|
const manager = new RemoteInterestManager(backend);
|
|
const params = { workspaceId: "ws-1" } as const;
|
|
const listenerA = vi.fn();
|
|
const listenerB = vi.fn();
|
|
|
|
const unsubscribeA = manager.subscribe("workspace", params, listenerA);
|
|
const unsubscribeB = manager.subscribe("workspace", params, listenerB);
|
|
await flushAsyncWork();
|
|
|
|
expect(backend.connectWorkspace).toHaveBeenCalledTimes(1);
|
|
expect(backend.getWorkspaceSummary).toHaveBeenCalledTimes(1);
|
|
expect(manager.getStatus("workspace", params)).toBe("connected");
|
|
expect(manager.getSnapshot("workspace", params)?.taskSummaries[0]?.title).toBe("Initial task");
|
|
|
|
conn.emit("workspaceUpdated", {
|
|
type: "taskSummaryUpdated",
|
|
taskSummary: {
|
|
id: "task-1",
|
|
repoId: "repo-1",
|
|
title: "Updated task",
|
|
status: "running",
|
|
repoName: "repo-1",
|
|
updatedAtMs: 20,
|
|
branch: "feature/live",
|
|
pullRequest: null,
|
|
sessionsSummary: [],
|
|
},
|
|
} satisfies WorkspaceEvent);
|
|
|
|
expect(manager.getSnapshot("workspace", params)?.taskSummaries[0]?.title).toBe("Updated task");
|
|
expect(listenerA).toHaveBeenCalled();
|
|
expect(listenerB).toHaveBeenCalled();
|
|
|
|
unsubscribeA();
|
|
unsubscribeB();
|
|
manager.dispose();
|
|
});
|
|
|
|
it("keeps a topic warm during the grace period and tears it down afterwards", async () => {
|
|
const conn = new FakeActorConn();
|
|
const backend = createBackend(conn, workspaceSnapshot());
|
|
const manager = new RemoteInterestManager(backend);
|
|
const params = { workspaceId: "ws-1" } as const;
|
|
|
|
const unsubscribeA = manager.subscribe("workspace", params, () => {});
|
|
await flushAsyncWork();
|
|
unsubscribeA();
|
|
|
|
vi.advanceTimersByTime(29_000);
|
|
|
|
const unsubscribeB = manager.subscribe("workspace", params, () => {});
|
|
await flushAsyncWork();
|
|
|
|
expect(backend.connectWorkspace).toHaveBeenCalledTimes(1);
|
|
expect(conn.disposeCount).toBe(0);
|
|
|
|
unsubscribeB();
|
|
vi.advanceTimersByTime(30_000);
|
|
|
|
expect(conn.disposeCount).toBe(1);
|
|
expect(manager.getSnapshot("workspace", params)).toBeUndefined();
|
|
});
|
|
|
|
it("surfaces connection errors to subscribers", async () => {
|
|
const conn = new FakeActorConn();
|
|
const backend = createBackend(conn, workspaceSnapshot());
|
|
const manager = new RemoteInterestManager(backend);
|
|
const params = { workspaceId: "ws-1" } as const;
|
|
|
|
manager.subscribe("workspace", params, () => {});
|
|
await flushAsyncWork();
|
|
|
|
conn.emitError(new Error("socket dropped"));
|
|
|
|
expect(manager.getStatus("workspace", params)).toBe("error");
|
|
expect(manager.getError("workspace", params)?.message).toBe("socket dropped");
|
|
});
|
|
});
|