SDK: Add ensureServer() for automatic server recovery (#260)

* SDK sandbox provisioning: built-in providers, docs restructure, and quickstart overhaul

- Add built-in sandbox providers (local, docker, e2b, daytona, vercel, cloudflare) to the TypeScript SDK so users import directly instead of passing client instances
- Restructure docs: rename architecture to orchestration-architecture, add new architecture page for server overview, improve getting started flow
- Rewrite quickstart to be TypeScript-first with provider CodeGroup and custom provider accordion
- Update all examples to use new provider APIs
- Update persist drivers and foundry for new SDK surface

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix SDK typecheck errors and update persist drivers for insertEvent signature

- Fix insertEvent call in client.ts to pass sessionId as first argument
- Update Daytona provider create options to use Partial type (image has default)
- Update StrictUniqueSessionPersistDriver in tests to match new insertEvent signature
- Sync persist packages, openapi spec, and docs with upstream changes

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Add Modal and ComputeSDK built-in providers, update examples and docs

- Add `sandbox-agent/modal` provider using Modal SDK with node:22-slim image
- Add `sandbox-agent/computesdk` provider using ComputeSDK's unified sandbox API
- Update Modal and ComputeSDK examples to use new SDK providers
- Update Modal and ComputeSDK deploy docs with provider-based examples
- Add Modal to quickstart CodeGroup and docs.json navigation
- Add provider test entries for Modal and ComputeSDK
- Remove old standalone example files (modal.ts, computesdk.ts)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix Modal provider: pre-install agents in image, fire-and-forget exec for server

- Pre-install agents in Dockerfile commands so they are cached across creates
- Use fire-and-forget exec (no wait) to keep server alive in Modal sandbox
- Add memoryMiB option (default 2GB) to avoid OOM during agent install

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Sync upstream changes: multiplayer docs, logos, openapi spec, foundry config

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* SDK: Add ensureServer() for automatic server recovery

Add ensureServer() to SandboxProvider interface to handle cases where the
sandbox-agent server stops or goes to sleep. The SDK now calls this method
after 3 consecutive health-check failures, allowing providers to restart the
server if needed. Most built-in providers (E2B, Daytona, Vercel, Modal,
ComputeSDK) implement this. Docker and Cloudflare manage server lifecycle
differently, and Local uses managed child processes.

Also update docs for quickstart, architecture, multiplayer, and session
persistence; mark persist-* packages as deprecated; and add ensureServer
implementations to all applicable providers.

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>

* wip

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Nathan Flurry 2026-03-15 20:29:28 -07:00 committed by GitHub
parent 3426cbc6ec
commit cf7e2a92c6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
112 changed files with 3739 additions and 3537 deletions

View file

@ -0,0 +1,5 @@
# @sandbox-agent/persist-postgres
> **Deprecated:** This package has been deprecated and removed.
Install `pg` directly and copy the driver source into your project. See the [full example](https://github.com/rivet-dev/sandbox-agent/tree/main/examples/persist-postgres) and the [session persistence docs](https://sandboxagent.dev/session-persistence) for guidance.

View file

@ -1,7 +1,7 @@
{
"name": "@sandbox-agent/persist-postgres",
"version": "0.3.2",
"description": "PostgreSQL persistence driver for the Sandbox Agent TypeScript SDK",
"description": "PostgreSQL persistence driver for the Sandbox Agent TypeScript SDK (DEPRECATED)",
"license": "Apache-2.0",
"repository": {
"type": "git",
@ -16,24 +16,16 @@
"import": "./dist/index.js"
}
},
"dependencies": {
"pg": "^8.16.3",
"sandbox-agent": "workspace:*"
},
"files": [
"dist"
],
"scripts": {
"build": "tsup",
"typecheck": "tsc --noEmit",
"test": "vitest run",
"test:watch": "vitest"
"typecheck": "tsc --noEmit"
},
"devDependencies": {
"@types/node": "^22.0.0",
"@types/pg": "^8.15.6",
"tsup": "^8.0.0",
"typescript": "^5.7.0",
"vitest": "^3.0.0"
"typescript": "^5.7.0"
}
}

View file

@ -1,306 +1,5 @@
import { Pool, type PoolConfig } from "pg";
import type { ListEventsRequest, ListPage, ListPageRequest, SessionEvent, SessionPersistDriver, SessionRecord } from "sandbox-agent";
const DEFAULT_LIST_LIMIT = 100;
export interface PostgresSessionPersistDriverOptions {
connectionString?: string;
pool?: Pool;
poolConfig?: PoolConfig;
schema?: string;
}
export class PostgresSessionPersistDriver implements SessionPersistDriver {
private readonly pool: Pool;
private readonly ownsPool: boolean;
private readonly schema: string;
private readonly initialized: Promise<void>;
constructor(options: PostgresSessionPersistDriverOptions = {}) {
this.schema = normalizeSchema(options.schema ?? "public");
if (options.pool) {
this.pool = options.pool;
this.ownsPool = false;
} else {
this.pool = new Pool({
connectionString: options.connectionString,
...options.poolConfig,
});
this.ownsPool = true;
}
this.initialized = this.initialize();
}
async getSession(id: string): Promise<SessionRecord | null> {
await this.ready();
const result = await this.pool.query<SessionRow>(
`SELECT id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, session_init_json
FROM ${this.table("sessions")}
WHERE id = $1`,
[id],
);
if (result.rows.length === 0) {
return null;
}
return decodeSessionRow(result.rows[0]);
}
async listSessions(request: ListPageRequest = {}): Promise<ListPage<SessionRecord>> {
await this.ready();
const offset = parseCursor(request.cursor);
const limit = normalizeLimit(request.limit);
const rowsResult = await this.pool.query<SessionRow>(
`SELECT id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, session_init_json
FROM ${this.table("sessions")}
ORDER BY created_at ASC, id ASC
LIMIT $1 OFFSET $2`,
[limit, offset],
);
const countResult = await this.pool.query<{ count: string }>(`SELECT COUNT(*) AS count FROM ${this.table("sessions")}`);
const total = parseInteger(countResult.rows[0]?.count ?? "0");
const nextOffset = offset + rowsResult.rows.length;
return {
items: rowsResult.rows.map(decodeSessionRow),
nextCursor: nextOffset < total ? String(nextOffset) : undefined,
};
}
async updateSession(session: SessionRecord): Promise<void> {
await this.ready();
await this.pool.query(
`INSERT INTO ${this.table("sessions")} (
id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, session_init_json
) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT(id) DO UPDATE SET
agent = EXCLUDED.agent,
agent_session_id = EXCLUDED.agent_session_id,
last_connection_id = EXCLUDED.last_connection_id,
created_at = EXCLUDED.created_at,
destroyed_at = EXCLUDED.destroyed_at,
session_init_json = EXCLUDED.session_init_json`,
[
session.id,
session.agent,
session.agentSessionId,
session.lastConnectionId,
session.createdAt,
session.destroyedAt ?? null,
session.sessionInit ?? null,
],
);
}
async listEvents(request: ListEventsRequest): Promise<ListPage<SessionEvent>> {
await this.ready();
const offset = parseCursor(request.cursor);
const limit = normalizeLimit(request.limit);
const rowsResult = await this.pool.query<EventRow>(
`SELECT id, event_index, session_id, created_at, connection_id, sender, payload_json
FROM ${this.table("events")}
WHERE session_id = $1
ORDER BY event_index ASC, id ASC
LIMIT $2 OFFSET $3`,
[request.sessionId, limit, offset],
);
const countResult = await this.pool.query<{ count: string }>(`SELECT COUNT(*) AS count FROM ${this.table("events")} WHERE session_id = $1`, [
request.sessionId,
]);
const total = parseInteger(countResult.rows[0]?.count ?? "0");
const nextOffset = offset + rowsResult.rows.length;
return {
items: rowsResult.rows.map(decodeEventRow),
nextCursor: nextOffset < total ? String(nextOffset) : undefined,
};
}
async insertEvent(event: SessionEvent): Promise<void> {
await this.ready();
await this.pool.query(
`INSERT INTO ${this.table("events")} (
id, event_index, session_id, created_at, connection_id, sender, payload_json
) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT(id) DO UPDATE SET
event_index = EXCLUDED.event_index,
session_id = EXCLUDED.session_id,
created_at = EXCLUDED.created_at,
connection_id = EXCLUDED.connection_id,
sender = EXCLUDED.sender,
payload_json = EXCLUDED.payload_json`,
[event.id, event.eventIndex, event.sessionId, event.createdAt, event.connectionId, event.sender, event.payload],
);
}
async close(): Promise<void> {
if (!this.ownsPool) {
return;
}
await this.pool.end();
}
private async ready(): Promise<void> {
await this.initialized;
}
private table(name: "sessions" | "events"): string {
return `"${this.schema}"."${name}"`;
}
private async initialize(): Promise<void> {
await this.pool.query(`CREATE SCHEMA IF NOT EXISTS "${this.schema}"`);
await this.pool.query(`
CREATE TABLE IF NOT EXISTS ${this.table("sessions")} (
id TEXT PRIMARY KEY,
agent TEXT NOT NULL,
agent_session_id TEXT NOT NULL,
last_connection_id TEXT NOT NULL,
created_at BIGINT NOT NULL,
destroyed_at BIGINT,
session_init_json JSONB
)
`);
await this.pool.query(`
CREATE TABLE IF NOT EXISTS ${this.table("events")} (
id TEXT PRIMARY KEY,
event_index BIGINT NOT NULL,
session_id TEXT NOT NULL,
created_at BIGINT NOT NULL,
connection_id TEXT NOT NULL,
sender TEXT NOT NULL,
payload_json JSONB NOT NULL
)
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ALTER COLUMN id TYPE TEXT USING id::TEXT
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ADD COLUMN IF NOT EXISTS event_index BIGINT
`);
await this.pool.query(`
WITH ranked AS (
SELECT id, ROW_NUMBER() OVER (PARTITION BY session_id ORDER BY created_at ASC, id ASC) AS ranked_index
FROM ${this.table("events")}
)
UPDATE ${this.table("events")} AS current_events
SET event_index = ranked.ranked_index
FROM ranked
WHERE current_events.id = ranked.id
AND current_events.event_index IS NULL
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ALTER COLUMN event_index SET NOT NULL
`);
await this.pool.query(`
CREATE INDEX IF NOT EXISTS idx_events_session_order
ON ${this.table("events")}(session_id, event_index, id)
`);
}
}
type SessionRow = {
id: string;
agent: string;
agent_session_id: string;
last_connection_id: string;
created_at: string | number;
destroyed_at: string | number | null;
session_init_json: unknown | null;
};
type EventRow = {
id: string | number;
event_index: string | number;
session_id: string;
created_at: string | number;
connection_id: string;
sender: string;
payload_json: unknown;
};
function decodeSessionRow(row: SessionRow): SessionRecord {
return {
id: row.id,
agent: row.agent,
agentSessionId: row.agent_session_id,
lastConnectionId: row.last_connection_id,
createdAt: parseInteger(row.created_at),
destroyedAt: row.destroyed_at === null ? undefined : parseInteger(row.destroyed_at),
sessionInit: row.session_init_json ? (row.session_init_json as SessionRecord["sessionInit"]) : undefined,
};
}
function decodeEventRow(row: EventRow): SessionEvent {
return {
id: String(row.id),
eventIndex: parseInteger(row.event_index),
sessionId: row.session_id,
createdAt: parseInteger(row.created_at),
connectionId: row.connection_id,
sender: parseSender(row.sender),
payload: row.payload_json as SessionEvent["payload"],
};
}
function normalizeLimit(limit: number | undefined): number {
if (!Number.isFinite(limit) || (limit ?? 0) < 1) {
return DEFAULT_LIST_LIMIT;
}
return Math.floor(limit as number);
}
function parseCursor(cursor: string | undefined): number {
if (!cursor) {
return 0;
}
const parsed = Number.parseInt(cursor, 10);
if (!Number.isFinite(parsed) || parsed < 0) {
return 0;
}
return parsed;
}
function parseInteger(value: string | number): number {
const parsed = typeof value === "number" ? value : Number.parseInt(value, 10);
if (!Number.isFinite(parsed)) {
throw new Error(`Invalid integer value returned by postgres: ${String(value)}`);
}
return parsed;
}
function parseSender(value: string): SessionEvent["sender"] {
if (value === "agent" || value === "client") {
return value;
}
throw new Error(`Invalid sender value returned by postgres: ${value}`);
}
function normalizeSchema(schema: string): string {
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(schema)) {
throw new Error(`Invalid schema name '${schema}'. Use letters, numbers, and underscores only.`);
}
return schema;
}
throw new Error(
"@sandbox-agent/persist-postgres has been deprecated and removed. " +
"Copy the reference implementation from examples/persist-postgres into your project instead. " +
"See https://github.com/rivet-dev/sandbox-agent/tree/main/examples/persist-postgres",
);

View file

@ -1,245 +0,0 @@
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from "vitest";
import { execFileSync } from "node:child_process";
import { existsSync, mkdtempSync, rmSync } from "node:fs";
import { dirname, join, resolve } from "node:path";
import { fileURLToPath } from "node:url";
import { tmpdir } from "node:os";
import { randomUUID } from "node:crypto";
import { Client } from "pg";
import { SandboxAgent } from "sandbox-agent";
import { spawnSandboxAgent, type SandboxAgentSpawnHandle } from "../../typescript/src/spawn.ts";
import { prepareMockAgentDataHome } from "../../typescript/tests/helpers/mock-agent.ts";
import { PostgresSessionPersistDriver } from "../src/index.ts";
const __dirname = dirname(fileURLToPath(import.meta.url));
function findBinary(): string | null {
if (process.env.SANDBOX_AGENT_BIN) {
return process.env.SANDBOX_AGENT_BIN;
}
const cargoPaths = [resolve(__dirname, "../../../target/debug/sandbox-agent"), resolve(__dirname, "../../../target/release/sandbox-agent")];
for (const p of cargoPaths) {
if (existsSync(p)) {
return p;
}
}
return null;
}
const BINARY_PATH = findBinary();
if (!BINARY_PATH) {
throw new Error("sandbox-agent binary not found. Build it (cargo build -p sandbox-agent) or set SANDBOX_AGENT_BIN.");
}
if (!process.env.SANDBOX_AGENT_BIN) {
process.env.SANDBOX_AGENT_BIN = BINARY_PATH;
}
interface PostgresContainer {
containerId: string;
connectionString: string;
}
describe("Postgres persistence driver", () => {
let handle: SandboxAgentSpawnHandle;
let baseUrl: string;
let token: string;
let dataHome: string;
let postgres: PostgresContainer | null = null;
beforeAll(async () => {
dataHome = mkdtempSync(join(tmpdir(), "postgres-integration-"));
prepareMockAgentDataHome(dataHome);
handle = await spawnSandboxAgent({
enabled: true,
log: "silent",
timeoutMs: 30000,
env: {
XDG_DATA_HOME: dataHome,
HOME: dataHome,
USERPROFILE: dataHome,
APPDATA: join(dataHome, "AppData", "Roaming"),
LOCALAPPDATA: join(dataHome, "AppData", "Local"),
},
});
baseUrl = handle.baseUrl;
token = handle.token;
});
beforeEach(async () => {
postgres = await startPostgresContainer();
});
afterEach(() => {
if (postgres) {
stopPostgresContainer(postgres.containerId);
postgres = null;
}
});
afterAll(async () => {
await handle.dispose();
rmSync(dataHome, { recursive: true, force: true });
});
it("persists session/event history across SDK instances and supports replay restore", async () => {
const connectionString = requirePostgres(postgres).connectionString;
const persist1 = new PostgresSessionPersistDriver({
connectionString,
});
const sdk1 = await SandboxAgent.connect({
baseUrl,
token,
persist: persist1,
replayMaxEvents: 40,
replayMaxChars: 16000,
});
const created = await sdk1.createSession({ agent: "mock" });
await created.prompt([{ type: "text", text: "postgres-first" }]);
const firstConnectionId = created.lastConnectionId;
await sdk1.dispose();
await persist1.close();
const persist2 = new PostgresSessionPersistDriver({
connectionString,
});
const sdk2 = await SandboxAgent.connect({
baseUrl,
token,
persist: persist2,
replayMaxEvents: 40,
replayMaxChars: 16000,
});
const restored = await sdk2.resumeSession(created.id);
expect(restored.lastConnectionId).not.toBe(firstConnectionId);
await restored.prompt([{ type: "text", text: "postgres-second" }]);
const sessions = await sdk2.listSessions({ limit: 20 });
expect(sessions.items.some((entry) => entry.id === created.id)).toBe(true);
const events = await sdk2.getEvents({ sessionId: created.id, limit: 1000 });
expect(events.items.length).toBeGreaterThan(0);
expect(events.items.every((event) => typeof event.id === "string")).toBe(true);
expect(events.items.every((event) => Number.isInteger(event.eventIndex))).toBe(true);
for (let i = 1; i < events.items.length; i += 1) {
expect(events.items[i]!.eventIndex).toBeGreaterThanOrEqual(events.items[i - 1]!.eventIndex);
}
const replayInjected = events.items.find((event) => {
if (event.sender !== "client") {
return false;
}
const payload = event.payload as Record<string, unknown>;
const method = payload.method;
const params = payload.params as Record<string, unknown> | undefined;
const prompt = Array.isArray(params?.prompt) ? params?.prompt : [];
const firstBlock = prompt[0] as Record<string, unknown> | undefined;
return method === "session/prompt" && typeof firstBlock?.text === "string" && firstBlock.text.includes("Previous session history is replayed below");
});
expect(replayInjected).toBeTruthy();
await sdk2.dispose();
await persist2.close();
});
});
async function startPostgresContainer(): Promise<PostgresContainer> {
const name = `sandbox-agent-postgres-${randomUUID()}`;
const containerId = runDockerCommand([
"run",
"-d",
"--rm",
"--name",
name,
"-e",
"POSTGRES_USER=postgres",
"-e",
"POSTGRES_PASSWORD=postgres",
"-e",
"POSTGRES_DB=sandboxagent",
"-p",
"127.0.0.1::5432",
"postgres:16-alpine",
]);
const portOutput = runDockerCommand(["port", containerId, "5432/tcp"]);
const port = parsePort(portOutput);
const connectionString = `postgres://postgres:postgres@127.0.0.1:${port}/sandboxagent`;
await waitForPostgres(connectionString);
return {
containerId,
connectionString,
};
}
function stopPostgresContainer(containerId: string): void {
try {
runDockerCommand(["rm", "-f", containerId]);
} catch {
// Container may already be gone when test teardown runs.
}
}
function runDockerCommand(args: string[]): string {
return execFileSync("docker", args, {
encoding: "utf8",
stdio: ["ignore", "pipe", "pipe"],
}).trim();
}
function parsePort(output: string): string {
const firstLine = output.split("\n")[0]?.trim() ?? "";
const match = firstLine.match(/:(\d+)$/);
if (!match) {
throw new Error(`Failed to parse docker port output: '${output}'`);
}
return match[1];
}
async function waitForPostgres(connectionString: string): Promise<void> {
const timeoutMs = 30000;
const deadline = Date.now() + timeoutMs;
let lastError: unknown;
while (Date.now() < deadline) {
const client = new Client({ connectionString });
try {
await client.connect();
await client.query("SELECT 1");
await client.end();
return;
} catch (error) {
lastError = error;
try {
await client.end();
} catch {
// Ignore cleanup failures while retrying.
}
await delay(250);
}
}
throw new Error(`Postgres container did not become ready: ${String(lastError)}`);
}
function delay(ms: number): Promise<void> {
return new Promise((resolvePromise) => setTimeout(resolvePromise, ms));
}
function requirePostgres(container: PostgresContainer | null): PostgresContainer {
if (!container) {
throw new Error("Postgres container was not initialized for this test.");
}
return container;
}