SDK: Add ensureServer() for automatic server recovery (#260)

* SDK sandbox provisioning: built-in providers, docs restructure, and quickstart overhaul

- Add built-in sandbox providers (local, docker, e2b, daytona, vercel, cloudflare) to the TypeScript SDK so users import directly instead of passing client instances
- Restructure docs: rename architecture to orchestration-architecture, add new architecture page for server overview, improve getting started flow
- Rewrite quickstart to be TypeScript-first with provider CodeGroup and custom provider accordion
- Update all examples to use new provider APIs
- Update persist drivers and foundry for new SDK surface

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix SDK typecheck errors and update persist drivers for insertEvent signature

- Fix insertEvent call in client.ts to pass sessionId as first argument
- Update Daytona provider create options to use Partial type (image has default)
- Update StrictUniqueSessionPersistDriver in tests to match new insertEvent signature
- Sync persist packages, openapi spec, and docs with upstream changes

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Add Modal and ComputeSDK built-in providers, update examples and docs

- Add `sandbox-agent/modal` provider using Modal SDK with node:22-slim image
- Add `sandbox-agent/computesdk` provider using ComputeSDK's unified sandbox API
- Update Modal and ComputeSDK examples to use new SDK providers
- Update Modal and ComputeSDK deploy docs with provider-based examples
- Add Modal to quickstart CodeGroup and docs.json navigation
- Add provider test entries for Modal and ComputeSDK
- Remove old standalone example files (modal.ts, computesdk.ts)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix Modal provider: pre-install agents in image, fire-and-forget exec for server

- Pre-install agents in Dockerfile commands so they are cached across creates
- Use fire-and-forget exec (no wait) to keep server alive in Modal sandbox
- Add memoryMiB option (default 2GB) to avoid OOM during agent install

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Sync upstream changes: multiplayer docs, logos, openapi spec, foundry config

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* SDK: Add ensureServer() for automatic server recovery

Add ensureServer() to SandboxProvider interface to handle cases where the
sandbox-agent server stops or goes to sleep. The SDK now calls this method
after 3 consecutive health-check failures, allowing providers to restart the
server if needed. Most built-in providers (E2B, Daytona, Vercel, Modal,
ComputeSDK) implement this. Docker and Cloudflare manage server lifecycle
differently, and Local uses managed child processes.

Also update docs for quickstart, architecture, multiplayer, and session
persistence; mark persist-* packages as deprecated; and add ensureServer
implementations to all applicable providers.

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>

* wip

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Nathan Flurry 2026-03-15 20:29:28 -07:00 committed by GitHub
parent 3426cbc6ec
commit cf7e2a92c6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
112 changed files with 3739 additions and 3537 deletions

View file

@ -8,7 +8,6 @@
},
"dependencies": {
"@sandbox-agent/example-shared": "workspace:*",
"@sandbox-agent/persist-postgres": "workspace:*",
"pg": "latest",
"sandbox-agent": "workspace:*"
},

View file

@ -3,7 +3,7 @@ import { randomUUID } from "node:crypto";
import { Client } from "pg";
import { setTimeout as delay } from "node:timers/promises";
import { SandboxAgent } from "sandbox-agent";
import { PostgresSessionPersistDriver } from "@sandbox-agent/persist-postgres";
import { PostgresSessionPersistDriver } from "./persist.ts";
import { startDockerSandbox } from "@sandbox-agent/example-shared/docker";
import { detectAgent } from "@sandbox-agent/example-shared";

View file

@ -0,0 +1,336 @@
import { Pool, type PoolConfig } from "pg";
import type { ListEventsRequest, ListPage, ListPageRequest, SessionEvent, SessionPersistDriver, SessionRecord } from "sandbox-agent";
const DEFAULT_LIST_LIMIT = 100;
export interface PostgresSessionPersistDriverOptions {
connectionString?: string;
pool?: Pool;
poolConfig?: PoolConfig;
schema?: string;
}
export class PostgresSessionPersistDriver implements SessionPersistDriver {
private readonly pool: Pool;
private readonly ownsPool: boolean;
private readonly schema: string;
private readonly initialized: Promise<void>;
constructor(options: PostgresSessionPersistDriverOptions = {}) {
this.schema = normalizeSchema(options.schema ?? "public");
if (options.pool) {
this.pool = options.pool;
this.ownsPool = false;
} else {
this.pool = new Pool({
connectionString: options.connectionString,
...options.poolConfig,
});
this.ownsPool = true;
}
this.initialized = this.initialize();
}
async getSession(id: string): Promise<SessionRecord | undefined> {
await this.ready();
const result = await this.pool.query<SessionRow>(
`SELECT id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, sandbox_id, session_init_json, config_options_json, modes_json
FROM ${this.table("sessions")}
WHERE id = $1`,
[id],
);
if (result.rows.length === 0) {
return undefined;
}
return decodeSessionRow(result.rows[0]);
}
async listSessions(request: ListPageRequest = {}): Promise<ListPage<SessionRecord>> {
await this.ready();
const offset = parseCursor(request.cursor);
const limit = normalizeLimit(request.limit);
const rowsResult = await this.pool.query<SessionRow>(
`SELECT id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, sandbox_id, session_init_json, config_options_json, modes_json
FROM ${this.table("sessions")}
ORDER BY created_at ASC, id ASC
LIMIT $1 OFFSET $2`,
[limit, offset],
);
const countResult = await this.pool.query<{ count: string }>(`SELECT COUNT(*) AS count FROM ${this.table("sessions")}`);
const total = parseInteger(countResult.rows[0]?.count ?? "0");
const nextOffset = offset + rowsResult.rows.length;
return {
items: rowsResult.rows.map(decodeSessionRow),
nextCursor: nextOffset < total ? String(nextOffset) : undefined,
};
}
async updateSession(session: SessionRecord): Promise<void> {
await this.ready();
await this.pool.query(
`INSERT INTO ${this.table("sessions")} (
id, agent, agent_session_id, last_connection_id, created_at, destroyed_at, sandbox_id, session_init_json, config_options_json, modes_json
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT(id) DO UPDATE SET
agent = EXCLUDED.agent,
agent_session_id = EXCLUDED.agent_session_id,
last_connection_id = EXCLUDED.last_connection_id,
created_at = EXCLUDED.created_at,
destroyed_at = EXCLUDED.destroyed_at,
sandbox_id = EXCLUDED.sandbox_id,
session_init_json = EXCLUDED.session_init_json,
config_options_json = EXCLUDED.config_options_json,
modes_json = EXCLUDED.modes_json`,
[
session.id,
session.agent,
session.agentSessionId,
session.lastConnectionId,
session.createdAt,
session.destroyedAt ?? null,
session.sandboxId ?? null,
session.sessionInit ? JSON.stringify(session.sessionInit) : null,
session.configOptions ? JSON.stringify(session.configOptions) : null,
session.modes !== undefined ? JSON.stringify(session.modes) : null,
],
);
}
async listEvents(request: ListEventsRequest): Promise<ListPage<SessionEvent>> {
await this.ready();
const offset = parseCursor(request.cursor);
const limit = normalizeLimit(request.limit);
const rowsResult = await this.pool.query<EventRow>(
`SELECT id, event_index, session_id, created_at, connection_id, sender, payload_json
FROM ${this.table("events")}
WHERE session_id = $1
ORDER BY event_index ASC, id ASC
LIMIT $2 OFFSET $3`,
[request.sessionId, limit, offset],
);
const countResult = await this.pool.query<{ count: string }>(`SELECT COUNT(*) AS count FROM ${this.table("events")} WHERE session_id = $1`, [
request.sessionId,
]);
const total = parseInteger(countResult.rows[0]?.count ?? "0");
const nextOffset = offset + rowsResult.rows.length;
return {
items: rowsResult.rows.map(decodeEventRow),
nextCursor: nextOffset < total ? String(nextOffset) : undefined,
};
}
async insertEvent(_sessionId: string, event: SessionEvent): Promise<void> {
await this.ready();
await this.pool.query(
`INSERT INTO ${this.table("events")} (
id, event_index, session_id, created_at, connection_id, sender, payload_json
) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT(id) DO UPDATE SET
event_index = EXCLUDED.event_index,
session_id = EXCLUDED.session_id,
created_at = EXCLUDED.created_at,
connection_id = EXCLUDED.connection_id,
sender = EXCLUDED.sender,
payload_json = EXCLUDED.payload_json`,
[event.id, event.eventIndex, event.sessionId, event.createdAt, event.connectionId, event.sender, event.payload],
);
}
async close(): Promise<void> {
if (!this.ownsPool) {
return;
}
await this.pool.end();
}
private async ready(): Promise<void> {
await this.initialized;
}
private table(name: "sessions" | "events"): string {
return `"${this.schema}"."${name}"`;
}
private async initialize(): Promise<void> {
await this.pool.query(`CREATE SCHEMA IF NOT EXISTS "${this.schema}"`);
await this.pool.query(`
CREATE TABLE IF NOT EXISTS ${this.table("sessions")} (
id TEXT PRIMARY KEY,
agent TEXT NOT NULL,
agent_session_id TEXT NOT NULL,
last_connection_id TEXT NOT NULL,
created_at BIGINT NOT NULL,
destroyed_at BIGINT,
sandbox_id TEXT,
session_init_json JSONB,
config_options_json JSONB,
modes_json JSONB
)
`);
await this.pool.query(`
ALTER TABLE ${this.table("sessions")}
ADD COLUMN IF NOT EXISTS sandbox_id TEXT
`);
await this.pool.query(`
ALTER TABLE ${this.table("sessions")}
ADD COLUMN IF NOT EXISTS config_options_json JSONB
`);
await this.pool.query(`
ALTER TABLE ${this.table("sessions")}
ADD COLUMN IF NOT EXISTS modes_json JSONB
`);
await this.pool.query(`
CREATE TABLE IF NOT EXISTS ${this.table("events")} (
id TEXT PRIMARY KEY,
event_index BIGINT NOT NULL,
session_id TEXT NOT NULL,
created_at BIGINT NOT NULL,
connection_id TEXT NOT NULL,
sender TEXT NOT NULL,
payload_json JSONB NOT NULL
)
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ALTER COLUMN id TYPE TEXT USING id::TEXT
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ADD COLUMN IF NOT EXISTS event_index BIGINT
`);
await this.pool.query(`
WITH ranked AS (
SELECT id, ROW_NUMBER() OVER (PARTITION BY session_id ORDER BY created_at ASC, id ASC) AS ranked_index
FROM ${this.table("events")}
)
UPDATE ${this.table("events")} AS current_events
SET event_index = ranked.ranked_index
FROM ranked
WHERE current_events.id = ranked.id
AND current_events.event_index IS NULL
`);
await this.pool.query(`
ALTER TABLE ${this.table("events")}
ALTER COLUMN event_index SET NOT NULL
`);
await this.pool.query(`
CREATE INDEX IF NOT EXISTS idx_events_session_order
ON ${this.table("events")}(session_id, event_index, id)
`);
}
}
type SessionRow = {
id: string;
agent: string;
agent_session_id: string;
last_connection_id: string;
created_at: string | number;
destroyed_at: string | number | null;
sandbox_id: string | null;
session_init_json: unknown | null;
config_options_json: unknown | null;
modes_json: unknown | null;
};
type EventRow = {
id: string | number;
event_index: string | number;
session_id: string;
created_at: string | number;
connection_id: string;
sender: string;
payload_json: unknown;
};
function decodeSessionRow(row: SessionRow): SessionRecord {
return {
id: row.id,
agent: row.agent,
agentSessionId: row.agent_session_id,
lastConnectionId: row.last_connection_id,
createdAt: parseInteger(row.created_at),
destroyedAt: row.destroyed_at === null ? undefined : parseInteger(row.destroyed_at),
sandboxId: row.sandbox_id ?? undefined,
sessionInit: row.session_init_json ? (row.session_init_json as SessionRecord["sessionInit"]) : undefined,
configOptions: row.config_options_json ? (row.config_options_json as SessionRecord["configOptions"]) : undefined,
modes: row.modes_json ? (row.modes_json as SessionRecord["modes"]) : undefined,
};
}
function decodeEventRow(row: EventRow): SessionEvent {
return {
id: String(row.id),
eventIndex: parseInteger(row.event_index),
sessionId: row.session_id,
createdAt: parseInteger(row.created_at),
connectionId: row.connection_id,
sender: parseSender(row.sender),
payload: row.payload_json as SessionEvent["payload"],
};
}
function normalizeLimit(limit: number | undefined): number {
if (!Number.isFinite(limit) || (limit ?? 0) < 1) {
return DEFAULT_LIST_LIMIT;
}
return Math.floor(limit as number);
}
function parseCursor(cursor: string | undefined): number {
if (!cursor) {
return 0;
}
const parsed = Number.parseInt(cursor, 10);
if (!Number.isFinite(parsed) || parsed < 0) {
return 0;
}
return parsed;
}
function parseInteger(value: string | number): number {
const parsed = typeof value === "number" ? value : Number.parseInt(value, 10);
if (!Number.isFinite(parsed)) {
throw new Error(`Invalid integer value returned by postgres: ${String(value)}`);
}
return parsed;
}
function parseSender(value: string): SessionEvent["sender"] {
if (value === "agent" || value === "client") {
return value;
}
throw new Error(`Invalid sender value returned by postgres: ${value}`);
}
function normalizeSchema(schema: string): string {
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(schema)) {
throw new Error(`Invalid schema name '${schema}'. Use letters, numbers, and underscores only.`);
}
return schema;
}