mirror of
https://github.com/harivansh-afk/sandbox-agent.git
synced 2026-04-15 04:03:31 +00:00
feat: download batch
This commit is contained in:
parent
3545139cd3
commit
e1a09564e4
14 changed files with 702 additions and 91 deletions
|
|
@ -122,22 +122,13 @@ Batch upload accepts `application/x-tar` and extracts into the destination direc
|
|||
<CodeGroup>
|
||||
```ts TypeScript
|
||||
import { SandboxAgent } from "sandbox-agent";
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import tar from "tar";
|
||||
|
||||
const sdk = await SandboxAgent.connect({
|
||||
baseUrl: "http://127.0.0.1:2468",
|
||||
});
|
||||
|
||||
const archivePath = path.join(process.cwd(), "skills.tar");
|
||||
await tar.c({
|
||||
cwd: "./skills",
|
||||
file: archivePath,
|
||||
}, ["."]);
|
||||
|
||||
const tarBuffer = await fs.promises.readFile(archivePath);
|
||||
const result = await sdk.uploadFsBatch(tarBuffer, {
|
||||
// Requires `tar` to be installed (it's an optional peer dependency).
|
||||
const result = await sdk.uploadFsBatch({ sourcePath: "./skills" }, {
|
||||
path: "./skills",
|
||||
});
|
||||
|
||||
|
|
@ -152,3 +143,27 @@ curl -X POST "http://127.0.0.1:2468/v1/fs/upload-batch?path=./skills" \
|
|||
--data-binary @skills.tar
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
## Batch download (tar)
|
||||
|
||||
Batch download returns `application/x-tar` bytes for a file or directory. If the path is a directory,
|
||||
the archive contains the directory contents (similar to `tar -C <dir> .`).
|
||||
|
||||
<CodeGroup>
|
||||
```ts TypeScript
|
||||
import { SandboxAgent } from "sandbox-agent";
|
||||
|
||||
const sdk = await SandboxAgent.connect({
|
||||
baseUrl: "http://127.0.0.1:2468",
|
||||
});
|
||||
|
||||
// Requires `tar` to be installed if you want to extract (it's an optional peer dependency).
|
||||
await sdk.downloadFsBatch({ path: "./skills" }, { outPath: "./skills.tar" });
|
||||
await sdk.downloadFsBatch({ path: "./skills" }, { extractTo: "./skills-extracted" });
|
||||
```
|
||||
|
||||
```bash cURL
|
||||
curl -X GET "http://127.0.0.1:2468/v1/fs/download-batch?path=./skills" \
|
||||
--output ./skills.tar
|
||||
```
|
||||
</CodeGroup>
|
||||
|
|
|
|||
|
|
@ -654,6 +654,33 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"/v1/fs/download-batch": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"v1"
|
||||
],
|
||||
"summary": "Download a tar archive of a file or directory.",
|
||||
"description": "Returns `application/x-tar` bytes containing the requested path. If the path is a directory,\nthe archive contains its contents (similar to `tar -C <dir> .`).",
|
||||
"operationId": "get_v1_fs_download_batch",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "path",
|
||||
"in": "query",
|
||||
"description": "Source path (file or directory)",
|
||||
"required": false,
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "tar archive bytes"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/fs/entries": {
|
||||
"get": {
|
||||
"tags": [
|
||||
|
|
@ -1267,6 +1294,15 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"FsDownloadBatchQuery": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"FsEntriesQuery": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import { SandboxAgent } from "sandbox-agent";
|
||||
import { detectAgent, buildInspectorUrl } from "@sandbox-agent/example-shared";
|
||||
import { startDockerSandbox } from "@sandbox-agent/example-shared/docker";
|
||||
import * as tar from "tar";
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
|
@ -23,14 +22,9 @@ console.log(" Created 3 files in my-project/");
|
|||
console.log("Uploading files via batch tar...");
|
||||
const client = await SandboxAgent.connect({ baseUrl });
|
||||
|
||||
const tarPath = path.join(tmpDir, "upload.tar");
|
||||
await tar.create(
|
||||
{ file: tarPath, cwd: tmpDir },
|
||||
["my-project"],
|
||||
);
|
||||
const tarBuffer = await fs.promises.readFile(tarPath);
|
||||
const uploadResult = await client.uploadFsBatch(tarBuffer, { path: "/opt" });
|
||||
console.log(` Uploaded ${uploadResult.paths.length} files: ${uploadResult.paths.join(", ")}`);
|
||||
// Requires `tar` to be installed (optional peer dependency of `sandbox-agent`).
|
||||
const uploadResult = await client.uploadFsBatch({ sourcePath: projectDir }, { path: "/opt/my-project" });
|
||||
console.log(` Uploaded ${uploadResult.paths.length} entries: ${uploadResult.paths.join(", ")}`);
|
||||
|
||||
// Cleanup temp files
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
|
|
@ -46,6 +40,22 @@ const readmeBytes = await client.readFsFile({ path: "/opt/my-project/README.md"
|
|||
const readmeText = new TextDecoder().decode(readmeBytes);
|
||||
console.log(` README.md content: ${readmeText.trim()}`);
|
||||
|
||||
console.log("Downloading the uploaded project via batch tar...");
|
||||
const downloadTmp = path.resolve(__dirname, "../.tmp-download");
|
||||
fs.rmSync(downloadTmp, { recursive: true, force: true });
|
||||
fs.mkdirSync(downloadTmp, { recursive: true });
|
||||
await client.downloadFsBatch(
|
||||
{ path: "/opt/my-project" },
|
||||
{ outPath: path.join(downloadTmp, "my-project.tar"), extractTo: downloadTmp },
|
||||
);
|
||||
console.log(` Extracted to: ${downloadTmp}`);
|
||||
for (const entry of fs.readdirSync(downloadTmp)) {
|
||||
if (entry.endsWith(".tar")) {
|
||||
continue;
|
||||
}
|
||||
console.log(` ${entry}`);
|
||||
}
|
||||
|
||||
console.log("Creating session...");
|
||||
const session = await client.createSession({ agent: detectAgent(), sessionInit: { cwd: "/opt/my-project", mcpServers: [] } });
|
||||
const sessionId = session.id;
|
||||
|
|
|
|||
125
pnpm-lock.yaml
generated
125
pnpm-lock.yaml
generated
|
|
@ -36,7 +36,7 @@ importers:
|
|||
devDependencies:
|
||||
'@cloudflare/workers-types':
|
||||
specifier: latest
|
||||
version: 4.20260210.0
|
||||
version: 4.20260213.0
|
||||
'@types/node':
|
||||
specifier: latest
|
||||
version: 25.2.3
|
||||
|
|
@ -60,7 +60,7 @@ importers:
|
|||
version: 3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@1.21.7)(tsx@4.21.0)(yaml@2.8.2)
|
||||
wrangler:
|
||||
specifier: latest
|
||||
version: 4.64.0(@cloudflare/workers-types@4.20260210.0)
|
||||
version: 4.65.0(@cloudflare/workers-types@4.20260213.0)
|
||||
|
||||
examples/computesdk:
|
||||
dependencies:
|
||||
|
|
@ -91,7 +91,7 @@ importers:
|
|||
dependencies:
|
||||
'@daytonaio/sdk':
|
||||
specifier: latest
|
||||
version: 0.141.0(ws@8.19.0)
|
||||
version: 0.142.0(ws@8.19.0)
|
||||
'@sandbox-agent/example-shared':
|
||||
specifier: workspace:*
|
||||
version: link:../shared
|
||||
|
|
@ -531,7 +531,7 @@ importers:
|
|||
dependencies:
|
||||
'@daytonaio/sdk':
|
||||
specifier: latest
|
||||
version: 0.141.0(ws@8.19.0)
|
||||
version: 0.142.0(ws@8.19.0)
|
||||
'@e2b/code-interpreter':
|
||||
specifier: latest
|
||||
version: 2.3.3
|
||||
|
|
@ -762,6 +762,9 @@ importers:
|
|||
openapi-typescript:
|
||||
specifier: ^6.7.0
|
||||
version: 6.7.6
|
||||
tar:
|
||||
specifier: ^7.0.0
|
||||
version: 7.5.7
|
||||
tsup:
|
||||
specifier: ^8.0.0
|
||||
version: 8.5.1(jiti@1.21.7)(postcss@8.5.6)(tsx@4.21.0)(typescript@5.9.3)(yaml@2.8.2)
|
||||
|
|
@ -1165,38 +1168,38 @@ packages:
|
|||
workerd:
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workerd-darwin-64@1.20260210.0':
|
||||
resolution: {integrity: sha512-e3vMgzr8ZM6VjpJVFrnMBhjvFhlMIkhT+BLpBk3pKaWsrXao+azDlmzzxB3Zf4CZ8LmCEtaP7n5d2mNGL6Dqww==}
|
||||
'@cloudflare/workerd-darwin-64@1.20260212.0':
|
||||
resolution: {integrity: sha512-kLxuYutk88Wlo7edp8mlkN68TgZZ9237SUnuX9kNaD5jcOdblUqiBctMRZeRcPsuoX/3g2t0vS4ga02NBEVRNg==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@cloudflare/workerd-darwin-arm64@1.20260210.0':
|
||||
resolution: {integrity: sha512-ng2uLJVMrI5VrcAS26gDGM+qxCuWD4ZA8VR4i88RdyM8TLn+AqPFisrvn7AMA+QSv0+ck+ZdFtXek7qNp2gNuA==}
|
||||
'@cloudflare/workerd-darwin-arm64@1.20260212.0':
|
||||
resolution: {integrity: sha512-fqoqQWMA1D0ZzDOD8sp0allREM2M8GHdpxMXQ8EdZpZ70z5bJbJ9Vr4qe35++FNIZJspsDHfTw3Xm/M4ELm/dQ==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@cloudflare/workerd-linux-64@1.20260210.0':
|
||||
resolution: {integrity: sha512-frn2/+6DV59h13JbGSk9ATvJw3uORWssFIKZ/G/to+WRrIDQgCpSrjLtGbFSSn5eBEhYOvwxPKc7IrppkmIj/w==}
|
||||
'@cloudflare/workerd-linux-64@1.20260212.0':
|
||||
resolution: {integrity: sha512-bCSQoZzDzV5MSh4ueWo1DgmOn4Hf3QBu4Yo3eQFXA2llYFIu/sZgRtkEehw1X2/SY5Sn6O0EMCqxJYRf82Wdeg==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@cloudflare/workerd-linux-arm64@1.20260210.0':
|
||||
resolution: {integrity: sha512-0fmxEHaDcAF+7gcqnBcQdBCOzNvGz3mTMwqxEYJc5xZgFwQf65/dYK5fnV8z56GVNqu88NEnLMG3DD2G7Ey1vw==}
|
||||
'@cloudflare/workerd-linux-arm64@1.20260212.0':
|
||||
resolution: {integrity: sha512-GPvp1iiKQodtbUDi6OmR5I0vD75lawB54tdYGtmypuHC7ZOI2WhBmhb3wCxgnQNOG1z7mhCQrzRCoqrKwYbVWQ==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@cloudflare/workerd-windows-64@1.20260210.0':
|
||||
resolution: {integrity: sha512-G/Apjk/QLNnwbu8B0JO9FuAJKHNr+gl8X3G/7qaUrpwIkPx5JFQElVE6LKk4teSrycvAy5AzLFAL0lOB1xsUIQ==}
|
||||
'@cloudflare/workerd-windows-64@1.20260212.0':
|
||||
resolution: {integrity: sha512-wHRI218Xn4ndgWJCUHH4Zx0YlU5q/o6OmcxXkcw95tJOsQn4lDrhppioPh4eScxJZALf2X+ODeZcyQTCq5exGw==}
|
||||
engines: {node: '>=16'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@cloudflare/workers-types@4.20260210.0':
|
||||
resolution: {integrity: sha512-zHaF0RZVYUQwNCJCECnNAJdMur72Lk3FMiD6wU78Dx3Bv7DQRcuXNmPNuJmsGnosVZCcWintHlPTQ/4BEiDG5w==}
|
||||
'@cloudflare/workers-types@4.20260213.0':
|
||||
resolution: {integrity: sha512-dr905ft/1R0mnfdT9aun4vanLgIBN27ZyPxTCENKmhctSz6zNmBOvHbzDWAhGE0RBAKFf3X7ifMRcd0MkmBvgA==}
|
||||
|
||||
'@computesdk/cmd@0.4.1':
|
||||
resolution: {integrity: sha512-hhcYrwMnOpRSwWma3gkUeAVsDFG56nURwSaQx8vCepv0IuUv39bK4mMkgszolnUQrVjBDdW7b3lV+l5B2S8fRA==}
|
||||
|
|
@ -1216,14 +1219,14 @@ packages:
|
|||
resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
'@daytonaio/api-client@0.141.0':
|
||||
resolution: {integrity: sha512-DSPCurIEjfFyXCd07jkDgfsoFppVhTLyIJdvfb0LgG1EgV75BPqqzk2WM4ragBFJUuK2URF5CK7qkaHW0AXKMA==}
|
||||
'@daytonaio/api-client@0.142.0':
|
||||
resolution: {integrity: sha512-WCKaVAN4aM1VqfrIR8soze1KbF5b6F8saJ/fVtSto90F+kW5vpYMHgiW8PaARPz1D/UhJFzWmkqa3HPAPeZ44g==}
|
||||
|
||||
'@daytonaio/sdk@0.141.0':
|
||||
resolution: {integrity: sha512-JUopkS9SkO7h4WN8CjparOrP9k954euOF5KG//PeCEFOxUWTPFOME70GrmHXQKa1qkdZiF/4tz9jtZ744B1I2w==}
|
||||
'@daytonaio/sdk@0.142.0':
|
||||
resolution: {integrity: sha512-Wp3wuJFVcWUt0+ExWaDHSE444HE9NC6B+kI6f9JdC6nfrSoSBfRNrLT8Ewl5czRaWnU1kbqO3ZZTNbSrt68BOA==}
|
||||
|
||||
'@daytonaio/toolbox-api-client@0.141.0':
|
||||
resolution: {integrity: sha512-KGkCLDLAltd9FCic3PhSJGrTp3RwGsUwWEGp5vyWZFQGWpJV8CVp08CH5SBdo4YhuqFUVlyQcwha1HpzpVH++A==}
|
||||
'@daytonaio/toolbox-api-client@0.142.0':
|
||||
resolution: {integrity: sha512-HtQWxY9EdecJ7ZEXJlQszkdOCQFilPrc5BjSc1GRkYOm7dRj24NydH58va+x0yBCoU3JcDyrhUKn0bp99O0xeg==}
|
||||
|
||||
'@e2b/code-interpreter@2.3.3':
|
||||
resolution: {integrity: sha512-WOpSwc1WpvxyOijf6WMbR76BUuvd2O9ddXgCHHi65lkuy6YgQGq7oyd8PNsT331O9Tqbccjy6uF4xanSdLX1UA==}
|
||||
|
|
@ -4137,8 +4140,8 @@ packages:
|
|||
resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
miniflare@4.20260210.0:
|
||||
resolution: {integrity: sha512-HXR6m53IOqEzq52DuGF1x7I1K6lSIqzhbCbQXv/cTmPnPJmNkr7EBtLDm4nfSkOvlDtnwDCLUjWII5fyGJI5Tw==}
|
||||
miniflare@4.20260212.0:
|
||||
resolution: {integrity: sha512-Lgxq83EuR2q/0/DAVOSGXhXS1V7GDB04HVggoPsenQng8sqEDR3hO4FigIw5ZI2Sv2X7kIc30NCzGHJlCFIYWg==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
|
||||
|
|
@ -5426,17 +5429,17 @@ packages:
|
|||
resolution: {integrity: sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==}
|
||||
engines: {node: '>=18'}
|
||||
|
||||
workerd@1.20260210.0:
|
||||
resolution: {integrity: sha512-Sb0WXhrvf+XHQigP2trAxQnXo7wxZFC4PWnn6I7LhFxiTvzxvOAqMEiLkIz58wggRCb54T/KAA8hdjkTniR5FA==}
|
||||
workerd@1.20260212.0:
|
||||
resolution: {integrity: sha512-4B9BoZUzKSRv3pVZGEPh7OX+Q817hpUqAUtz5O0TxJVqo4OsYJAUA/sY177Q5ha/twjT9KaJt2DtQzE+oyCOzw==}
|
||||
engines: {node: '>=16'}
|
||||
hasBin: true
|
||||
|
||||
wrangler@4.64.0:
|
||||
resolution: {integrity: sha512-0PBiVEbshQT4Av/KLHbOAks4ioIKp/eAO7Xr2BgAX5v7cFYYgeOvudBrbtZa/hDDIA6858QuJnTQ8mI+cm8Vqw==}
|
||||
wrangler@4.65.0:
|
||||
resolution: {integrity: sha512-R+n3o3tlGzLK9I4fGocPReOuvcnjhtOL2aCVKkHMeuEwt9pPbOO4FxJtx/ec5cIUG/otRyJnfQGCAr9DplBVng==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
hasBin: true
|
||||
peerDependencies:
|
||||
'@cloudflare/workers-types': ^4.20260210.0
|
||||
'@cloudflare/workers-types': ^4.20260212.0
|
||||
peerDependenciesMeta:
|
||||
'@cloudflare/workers-types':
|
||||
optional: true
|
||||
|
|
@ -6348,28 +6351,28 @@ snapshots:
|
|||
dependencies:
|
||||
'@cloudflare/containers': 0.0.30
|
||||
|
||||
'@cloudflare/unenv-preset@2.12.1(unenv@2.0.0-rc.24)(workerd@1.20260210.0)':
|
||||
'@cloudflare/unenv-preset@2.12.1(unenv@2.0.0-rc.24)(workerd@1.20260212.0)':
|
||||
dependencies:
|
||||
unenv: 2.0.0-rc.24
|
||||
optionalDependencies:
|
||||
workerd: 1.20260210.0
|
||||
workerd: 1.20260212.0
|
||||
|
||||
'@cloudflare/workerd-darwin-64@1.20260210.0':
|
||||
'@cloudflare/workerd-darwin-64@1.20260212.0':
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workerd-darwin-arm64@1.20260210.0':
|
||||
'@cloudflare/workerd-darwin-arm64@1.20260212.0':
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workerd-linux-64@1.20260210.0':
|
||||
'@cloudflare/workerd-linux-64@1.20260212.0':
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workerd-linux-arm64@1.20260210.0':
|
||||
'@cloudflare/workerd-linux-arm64@1.20260212.0':
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workerd-windows-64@1.20260210.0':
|
||||
'@cloudflare/workerd-windows-64@1.20260212.0':
|
||||
optional: true
|
||||
|
||||
'@cloudflare/workers-types@4.20260210.0': {}
|
||||
'@cloudflare/workers-types@4.20260213.0': {}
|
||||
|
||||
'@computesdk/cmd@0.4.1': {}
|
||||
|
||||
|
|
@ -6386,18 +6389,18 @@ snapshots:
|
|||
dependencies:
|
||||
'@jridgewell/trace-mapping': 0.3.9
|
||||
|
||||
'@daytonaio/api-client@0.141.0':
|
||||
'@daytonaio/api-client@0.142.0':
|
||||
dependencies:
|
||||
axios: 1.13.5
|
||||
transitivePeerDependencies:
|
||||
- debug
|
||||
|
||||
'@daytonaio/sdk@0.141.0(ws@8.19.0)':
|
||||
'@daytonaio/sdk@0.142.0(ws@8.19.0)':
|
||||
dependencies:
|
||||
'@aws-sdk/client-s3': 3.975.0
|
||||
'@aws-sdk/lib-storage': 3.975.0(@aws-sdk/client-s3@3.975.0)
|
||||
'@daytonaio/api-client': 0.141.0
|
||||
'@daytonaio/toolbox-api-client': 0.141.0
|
||||
'@daytonaio/api-client': 0.142.0
|
||||
'@daytonaio/toolbox-api-client': 0.142.0
|
||||
'@iarna/toml': 2.2.5
|
||||
'@opentelemetry/api': 1.9.0
|
||||
'@opentelemetry/exporter-trace-otlp-http': 0.207.0(@opentelemetry/api@1.9.0)
|
||||
|
|
@ -6423,7 +6426,7 @@ snapshots:
|
|||
- supports-color
|
||||
- ws
|
||||
|
||||
'@daytonaio/toolbox-api-client@0.141.0':
|
||||
'@daytonaio/toolbox-api-client@0.142.0':
|
||||
dependencies:
|
||||
axios: 1.13.5
|
||||
transitivePeerDependencies:
|
||||
|
|
@ -7908,14 +7911,6 @@ snapshots:
|
|||
chai: 5.3.3
|
||||
tinyrainbow: 2.0.0
|
||||
|
||||
'@vitest/mocker@3.2.4(vite@5.4.21(@types/node@22.19.7))':
|
||||
dependencies:
|
||||
'@vitest/spy': 3.2.4
|
||||
estree-walker: 3.0.3
|
||||
magic-string: 0.30.21
|
||||
optionalDependencies:
|
||||
vite: 5.4.21(@types/node@22.19.7)
|
||||
|
||||
'@vitest/mocker@3.2.4(vite@5.4.21(@types/node@25.2.3))':
|
||||
dependencies:
|
||||
'@vitest/spy': 3.2.4
|
||||
|
|
@ -8540,7 +8535,7 @@ snapshots:
|
|||
glob: 11.1.0
|
||||
openapi-fetch: 0.14.1
|
||||
platform: 1.3.6
|
||||
tar: 7.5.6
|
||||
tar: 7.5.7
|
||||
|
||||
eastasianwidth@0.2.0: {}
|
||||
|
||||
|
|
@ -9582,12 +9577,12 @@ snapshots:
|
|||
|
||||
mimic-response@3.1.0: {}
|
||||
|
||||
miniflare@4.20260210.0:
|
||||
miniflare@4.20260212.0:
|
||||
dependencies:
|
||||
'@cspotcode/source-map-support': 0.8.1
|
||||
sharp: 0.34.5
|
||||
undici: 7.18.2
|
||||
workerd: 1.20260210.0
|
||||
workerd: 1.20260212.0
|
||||
ws: 8.18.0
|
||||
youch: 4.1.0-beta.10
|
||||
transitivePeerDependencies:
|
||||
|
|
@ -10947,7 +10942,7 @@ snapshots:
|
|||
dependencies:
|
||||
'@types/chai': 5.2.3
|
||||
'@vitest/expect': 3.2.4
|
||||
'@vitest/mocker': 3.2.4(vite@5.4.21(@types/node@22.19.7))
|
||||
'@vitest/mocker': 3.2.4(vite@5.4.21(@types/node@25.2.3))
|
||||
'@vitest/pretty-format': 3.2.4
|
||||
'@vitest/runner': 3.2.4
|
||||
'@vitest/snapshot': 3.2.4
|
||||
|
|
@ -11048,26 +11043,26 @@ snapshots:
|
|||
dependencies:
|
||||
string-width: 7.2.0
|
||||
|
||||
workerd@1.20260210.0:
|
||||
workerd@1.20260212.0:
|
||||
optionalDependencies:
|
||||
'@cloudflare/workerd-darwin-64': 1.20260210.0
|
||||
'@cloudflare/workerd-darwin-arm64': 1.20260210.0
|
||||
'@cloudflare/workerd-linux-64': 1.20260210.0
|
||||
'@cloudflare/workerd-linux-arm64': 1.20260210.0
|
||||
'@cloudflare/workerd-windows-64': 1.20260210.0
|
||||
'@cloudflare/workerd-darwin-64': 1.20260212.0
|
||||
'@cloudflare/workerd-darwin-arm64': 1.20260212.0
|
||||
'@cloudflare/workerd-linux-64': 1.20260212.0
|
||||
'@cloudflare/workerd-linux-arm64': 1.20260212.0
|
||||
'@cloudflare/workerd-windows-64': 1.20260212.0
|
||||
|
||||
wrangler@4.64.0(@cloudflare/workers-types@4.20260210.0):
|
||||
wrangler@4.65.0(@cloudflare/workers-types@4.20260213.0):
|
||||
dependencies:
|
||||
'@cloudflare/kv-asset-handler': 0.4.2
|
||||
'@cloudflare/unenv-preset': 2.12.1(unenv@2.0.0-rc.24)(workerd@1.20260210.0)
|
||||
'@cloudflare/unenv-preset': 2.12.1(unenv@2.0.0-rc.24)(workerd@1.20260212.0)
|
||||
blake3-wasm: 2.1.5
|
||||
esbuild: 0.27.3
|
||||
miniflare: 4.20260210.0
|
||||
miniflare: 4.20260212.0
|
||||
path-to-regexp: 6.3.0
|
||||
unenv: 2.0.0-rc.24
|
||||
workerd: 1.20260210.0
|
||||
workerd: 1.20260212.0
|
||||
optionalDependencies:
|
||||
'@cloudflare/workers-types': 4.20260210.0
|
||||
'@cloudflare/workers-types': 4.20260213.0
|
||||
fsevents: 2.3.3
|
||||
transitivePeerDependencies:
|
||||
- bufferutil
|
||||
|
|
|
|||
|
|
@ -53,6 +53,13 @@ export type QueryValue = string | number | boolean | null | undefined;
|
|||
export interface AcpHttpTransportOptions {
|
||||
path?: string;
|
||||
bootstrapQuery?: Record<string, QueryValue>;
|
||||
/**
|
||||
* Disable the background SSE GET loop. When true, the client operates in
|
||||
* POST-only mode where all responses are read from the POST response body.
|
||||
* Useful for environments where streaming GET requests are not supported
|
||||
* (e.g. Cloudflare Workers `containerFetch`).
|
||||
*/
|
||||
disableSse?: boolean;
|
||||
}
|
||||
|
||||
export interface AcpHttpClientOptions {
|
||||
|
|
@ -271,6 +278,7 @@ class StreamableHttpAcpTransport {
|
|||
private closed = false;
|
||||
private closingPromise: Promise<void> | null = null;
|
||||
private postedOnce = false;
|
||||
private readonly sseDisabled: boolean;
|
||||
|
||||
constructor(options: StreamableHttpAcpTransportOptions) {
|
||||
this.baseUrl = options.baseUrl.replace(/\/$/, "");
|
||||
|
|
@ -279,6 +287,7 @@ class StreamableHttpAcpTransport {
|
|||
this.token = options.token;
|
||||
this.defaultHeaders = options.defaultHeaders;
|
||||
this.onEnvelope = options.onEnvelope;
|
||||
this.sseDisabled = options.transport?.disableSse ?? false;
|
||||
this.bootstrapQuery = options.transport?.bootstrapQuery
|
||||
? buildQueryParams(options.transport.bootstrapQuery)
|
||||
: null;
|
||||
|
|
@ -405,7 +414,7 @@ class StreamableHttpAcpTransport {
|
|||
}
|
||||
|
||||
private ensureSseLoop(): void {
|
||||
if (this.sseLoop || this.closed || !this.postedOnce) {
|
||||
if (this.sseDisabled || this.sseLoop || this.closed || !this.postedOnce) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,10 +35,19 @@
|
|||
"devDependencies": {
|
||||
"@types/node": "^22.0.0",
|
||||
"openapi-typescript": "^6.7.0",
|
||||
"tar": "^7.0.0",
|
||||
"tsup": "^8.0.0",
|
||||
"typescript": "^5.7.0",
|
||||
"vitest": "^3.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"tar": "^7.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"tar": {
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@sandbox-agent/cli": "workspace:*"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import {
|
|||
type FsMoveResponse,
|
||||
type FsPathQuery,
|
||||
type FsStat,
|
||||
type FsDownloadBatchQuery,
|
||||
type FsUploadBatchQuery,
|
||||
type FsUploadBatchResponse,
|
||||
type FsWriteResponse,
|
||||
|
|
@ -53,6 +54,101 @@ const DEFAULT_REPLAY_MAX_EVENTS = 50;
|
|||
const DEFAULT_REPLAY_MAX_CHARS = 12_000;
|
||||
const EVENT_INDEX_SCAN_EVENTS_LIMIT = 500;
|
||||
|
||||
function isNodeRuntime(): boolean {
|
||||
return typeof process !== "undefined" && !!process.versions?.node;
|
||||
}
|
||||
|
||||
type TarModule = {
|
||||
create: (options: Record<string, unknown>, files: string[]) => Promise<unknown> | unknown;
|
||||
extract: (options: Record<string, unknown>) => unknown;
|
||||
};
|
||||
|
||||
async function importTarOrThrow(): Promise<TarModule> {
|
||||
try {
|
||||
return (await import("tar")) as unknown as TarModule;
|
||||
} catch {
|
||||
throw new Error(
|
||||
"`tar` is required for this operation. Install it (e.g. `npm i tar`) or use the raw byte APIs instead.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async function createTarBytesFromSourcePath(sourcePath: string): Promise<ArrayBuffer> {
|
||||
if (!isNodeRuntime()) {
|
||||
throw new Error("Path-based batch upload requires a Node.js runtime.");
|
||||
}
|
||||
|
||||
const tar = await importTarOrThrow();
|
||||
const fs = await import("node:fs/promises");
|
||||
const os = await import("node:os");
|
||||
const path = await import("node:path");
|
||||
|
||||
const stat = await fs.stat(sourcePath);
|
||||
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "sandbox-agent-upload-"));
|
||||
const tarPath = path.join(tmpDir, "upload.tar");
|
||||
|
||||
try {
|
||||
if (stat.isDirectory()) {
|
||||
// Pack directory contents (equivalent to: tar -cf upload.tar -C <dir> .)
|
||||
await tar.create(
|
||||
{
|
||||
file: tarPath,
|
||||
cwd: sourcePath,
|
||||
},
|
||||
["."],
|
||||
);
|
||||
} else if (stat.isFile()) {
|
||||
// Pack a single file as ./<basename>
|
||||
await tar.create(
|
||||
{
|
||||
file: tarPath,
|
||||
cwd: path.dirname(sourcePath),
|
||||
},
|
||||
[path.basename(sourcePath)],
|
||||
);
|
||||
} else {
|
||||
throw new Error(`Unsupported path type for batch upload: ${sourcePath}`);
|
||||
}
|
||||
|
||||
const bytes = await fs.readFile(tarPath);
|
||||
// Slice to avoid sharing a larger underlying buffer.
|
||||
return bytes.buffer.slice(bytes.byteOffset, bytes.byteOffset + bytes.byteLength);
|
||||
} finally {
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function writeBytesToPath(outPath: string, bytes: Uint8Array): Promise<void> {
|
||||
if (!isNodeRuntime()) {
|
||||
throw new Error("Path-based batch download requires a Node.js runtime.");
|
||||
}
|
||||
const fs = await import("node:fs/promises");
|
||||
const path = await import("node:path");
|
||||
await fs.mkdir(path.dirname(outPath), { recursive: true });
|
||||
await fs.writeFile(outPath, bytes);
|
||||
}
|
||||
|
||||
async function extractTarBytesToDir(destDir: string, tarBytes: Uint8Array): Promise<void> {
|
||||
if (!isNodeRuntime()) {
|
||||
throw new Error("Extracting batch downloads requires a Node.js runtime.");
|
||||
}
|
||||
const tar = await importTarOrThrow();
|
||||
const fs = await import("node:fs/promises");
|
||||
const stream = await import("node:stream");
|
||||
const streamPromises = await import("node:stream/promises");
|
||||
const buffer = await import("node:buffer");
|
||||
|
||||
await fs.mkdir(destDir, { recursive: true });
|
||||
const readable = new stream.PassThrough();
|
||||
readable.end(buffer.Buffer.from(tarBytes));
|
||||
await streamPromises.pipeline(
|
||||
readable as any,
|
||||
tar.extract({
|
||||
cwd: destDir,
|
||||
}) as any,
|
||||
);
|
||||
}
|
||||
|
||||
export interface SandboxAgentConnectOptions {
|
||||
baseUrl: string;
|
||||
token?: string;
|
||||
|
|
@ -61,6 +157,13 @@ export interface SandboxAgentConnectOptions {
|
|||
persist?: SessionPersistDriver;
|
||||
replayMaxEvents?: number;
|
||||
replayMaxChars?: number;
|
||||
/**
|
||||
* Disable the background SSE GET loop for ACP connections. When true,
|
||||
* all responses are read from POST response bodies. Useful for environments
|
||||
* where streaming GET requests are not supported (e.g. Cloudflare Workers
|
||||
* `containerFetch`).
|
||||
*/
|
||||
disableSse?: boolean;
|
||||
}
|
||||
|
||||
export interface SandboxAgentStartOptions extends Omit<SandboxAgentConnectOptions, "baseUrl" | "token"> {
|
||||
|
|
@ -207,6 +310,7 @@ export class LiveAcpConnection {
|
|||
headers?: HeadersInit;
|
||||
agent: string;
|
||||
serverId: string;
|
||||
disableSse?: boolean;
|
||||
onObservedEnvelope: (
|
||||
connection: LiveAcpConnection,
|
||||
envelope: AnyMessage,
|
||||
|
|
@ -225,6 +329,7 @@ export class LiveAcpConnection {
|
|||
transport: {
|
||||
path: `${API_PREFIX}/acp/${encodeURIComponent(options.serverId)}`,
|
||||
bootstrapQuery: { agent: options.agent },
|
||||
disableSse: options.disableSse,
|
||||
},
|
||||
client: {
|
||||
sessionUpdate: async (_notification: SessionNotification) => {
|
||||
|
|
@ -409,6 +514,7 @@ export class SandboxAgent {
|
|||
private readonly persist: SessionPersistDriver;
|
||||
private readonly replayMaxEvents: number;
|
||||
private readonly replayMaxChars: number;
|
||||
private readonly disableSse: boolean;
|
||||
|
||||
private spawnHandle?: SandboxAgentSpawnHandle;
|
||||
|
||||
|
|
@ -427,6 +533,7 @@ export class SandboxAgent {
|
|||
|
||||
this.replayMaxEvents = normalizePositiveInt(options.replayMaxEvents, DEFAULT_REPLAY_MAX_EVENTS);
|
||||
this.replayMaxChars = normalizePositiveInt(options.replayMaxChars, DEFAULT_REPLAY_MAX_CHARS);
|
||||
this.disableSse = options.disableSse ?? false;
|
||||
|
||||
if (!this.fetcher) {
|
||||
throw new Error("Fetch API is not available; provide a fetch implementation.");
|
||||
|
|
@ -454,6 +561,7 @@ export class SandboxAgent {
|
|||
persist: options.persist,
|
||||
replayMaxEvents: options.replayMaxEvents,
|
||||
replayMaxChars: options.replayMaxChars,
|
||||
disableSse: options.disableSse,
|
||||
});
|
||||
|
||||
client.spawnHandle = handle;
|
||||
|
|
@ -685,16 +793,44 @@ export class SandboxAgent {
|
|||
return this.requestJson("GET", `${FS_PATH}/stat`, { query });
|
||||
}
|
||||
|
||||
async uploadFsBatch(body: BodyInit, query?: FsUploadBatchQuery): Promise<FsUploadBatchResponse> {
|
||||
async uploadFsBatch(
|
||||
body: BodyInit | { sourcePath: string },
|
||||
query?: FsUploadBatchQuery,
|
||||
): Promise<FsUploadBatchResponse> {
|
||||
const resolvedBody =
|
||||
typeof body === "object" && body !== null && "sourcePath" in body
|
||||
? await createTarBytesFromSourcePath((body as { sourcePath: string }).sourcePath)
|
||||
: body;
|
||||
const response = await this.requestRaw("POST", `${FS_PATH}/upload-batch`, {
|
||||
query,
|
||||
rawBody: body,
|
||||
rawBody: resolvedBody,
|
||||
contentType: "application/x-tar",
|
||||
accept: "application/json",
|
||||
});
|
||||
return (await response.json()) as FsUploadBatchResponse;
|
||||
}
|
||||
|
||||
async downloadFsBatch(
|
||||
query: FsDownloadBatchQuery = {},
|
||||
options?: { outPath?: string; extractTo?: string },
|
||||
): Promise<Uint8Array> {
|
||||
const response = await this.requestRaw("GET", `${FS_PATH}/download-batch`, {
|
||||
query,
|
||||
accept: "application/x-tar",
|
||||
});
|
||||
const buffer = await response.arrayBuffer();
|
||||
const bytes = new Uint8Array(buffer);
|
||||
|
||||
if (options?.outPath) {
|
||||
await writeBytesToPath(options.outPath, bytes);
|
||||
}
|
||||
if (options?.extractTo) {
|
||||
await extractTarBytesToDir(options.extractTo, bytes);
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
async getMcpConfig(query: McpConfigQuery): Promise<McpServerConfig> {
|
||||
return this.requestJson("GET", `${API_PREFIX}/config/mcp`, { query });
|
||||
}
|
||||
|
|
@ -733,6 +869,7 @@ export class SandboxAgent {
|
|||
headers: this.defaultHeaders,
|
||||
agent,
|
||||
serverId,
|
||||
disableSse: this.disableSse,
|
||||
onObservedEnvelope: (connection, envelope, direction, localSessionId) => {
|
||||
void this.persistObservedEnvelope(connection, envelope, direction, localSessionId);
|
||||
},
|
||||
|
|
|
|||
|
|
@ -32,6 +32,14 @@ export interface paths {
|
|||
put: operations["put_v1_config_skills"];
|
||||
delete: operations["delete_v1_config_skills"];
|
||||
};
|
||||
"/v1/fs/download-batch": {
|
||||
/**
|
||||
* Download a tar archive of a file or directory.
|
||||
* @description Returns `application/x-tar` bytes containing the requested path. If the path is a directory,
|
||||
* the archive contains its contents (similar to `tar -C <dir> .`).
|
||||
*/
|
||||
get: operations["get_v1_fs_download_batch"];
|
||||
};
|
||||
"/v1/fs/entries": {
|
||||
get: operations["get_v1_fs_entries"];
|
||||
};
|
||||
|
|
@ -141,6 +149,9 @@ export interface components {
|
|||
path: string;
|
||||
recursive?: boolean | null;
|
||||
};
|
||||
FsDownloadBatchQuery: {
|
||||
path?: string | null;
|
||||
};
|
||||
FsEntriesQuery: {
|
||||
path?: string | null;
|
||||
};
|
||||
|
|
@ -599,6 +610,25 @@ export interface operations {
|
|||
};
|
||||
};
|
||||
};
|
||||
/**
|
||||
* Download a tar archive of a file or directory.
|
||||
* @description Returns `application/x-tar` bytes containing the requested path. If the path is a directory,
|
||||
* the archive contains its contents (similar to `tar -C <dir> .`).
|
||||
*/
|
||||
get_v1_fs_download_batch: {
|
||||
parameters: {
|
||||
query?: {
|
||||
/** @description Source path (file or directory) */
|
||||
path?: string | null;
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
/** @description tar archive bytes */
|
||||
200: {
|
||||
content: never;
|
||||
};
|
||||
};
|
||||
};
|
||||
get_v1_fs_entries: {
|
||||
parameters: {
|
||||
query?: {
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ export type {
|
|||
FsEntry,
|
||||
FsMoveRequest,
|
||||
FsMoveResponse,
|
||||
FsDownloadBatchQuery,
|
||||
FsPathQuery,
|
||||
FsStat,
|
||||
FsUploadBatchQuery,
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ export type FsEntry = components["schemas"]["FsEntry"];
|
|||
export type FsPathQuery = QueryParams<operations["get_v1_fs_file"]>;
|
||||
export type FsDeleteQuery = QueryParams<operations["delete_v1_fs_entry"]>;
|
||||
export type FsUploadBatchQuery = QueryParams<operations["post_v1_fs_upload_batch"]>;
|
||||
export type FsDownloadBatchQuery = QueryParams<operations["get_v1_fs_download_batch"]>;
|
||||
export type FsWriteResponse = JsonResponse<operations["put_v1_fs_file"], 200>;
|
||||
export type FsActionResponse = JsonResponse<operations["delete_v1_fs_entry"], 200>;
|
||||
export type FsMoveRequest = JsonRequestBody<operations["post_v1_fs_move"]>;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,12 @@
|
|||
import { describe, it, expect, beforeAll, afterAll } from "vitest";
|
||||
import { existsSync } from "node:fs";
|
||||
import { mkdtempSync, rmSync } from "node:fs";
|
||||
import {
|
||||
existsSync,
|
||||
mkdtempSync,
|
||||
rmSync,
|
||||
readFileSync,
|
||||
writeFileSync,
|
||||
mkdirSync,
|
||||
} from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { join } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
|
@ -15,6 +21,77 @@ import { prepareMockAgentDataHome } from "./helpers/mock-agent.ts";
|
|||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
function isZeroBlock(block: Uint8Array): boolean {
|
||||
for (const b of block) {
|
||||
if (b !== 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function readTarString(block: Uint8Array, offset: number, length: number): string {
|
||||
const slice = block.subarray(offset, offset + length);
|
||||
let end = 0;
|
||||
while (end < slice.length && slice[end] !== 0) {
|
||||
end += 1;
|
||||
}
|
||||
return new TextDecoder().decode(slice.subarray(0, end));
|
||||
}
|
||||
|
||||
function readTarOctal(block: Uint8Array, offset: number, length: number): number {
|
||||
const raw = readTarString(block, offset, length).trim();
|
||||
if (!raw) {
|
||||
return 0;
|
||||
}
|
||||
return Number.parseInt(raw, 8);
|
||||
}
|
||||
|
||||
function normalizeTarPath(p: string): string {
|
||||
let out = p.replaceAll("\\", "/");
|
||||
while (out.startsWith("./")) {
|
||||
out = out.slice(2);
|
||||
}
|
||||
while (out.startsWith("/")) {
|
||||
out = out.slice(1);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function untarFiles(tarBytes: Uint8Array): Map<string, Uint8Array> {
|
||||
// Minimal ustar tar reader for tests. Supports regular files and directories.
|
||||
const files = new Map<string, Uint8Array>();
|
||||
let offset = 0;
|
||||
while (offset + 512 <= tarBytes.length) {
|
||||
const header = tarBytes.subarray(offset, offset + 512);
|
||||
if (isZeroBlock(header)) {
|
||||
const next = tarBytes.subarray(offset + 512, offset + 1024);
|
||||
if (next.length === 512 && isZeroBlock(next)) {
|
||||
break;
|
||||
}
|
||||
offset += 512;
|
||||
continue;
|
||||
}
|
||||
|
||||
const name = readTarString(header, 0, 100);
|
||||
const prefix = readTarString(header, 345, 155);
|
||||
const fullName = normalizeTarPath(prefix ? `${prefix}/${name}` : name);
|
||||
const size = readTarOctal(header, 124, 12);
|
||||
const typeflag = readTarString(header, 156, 1);
|
||||
|
||||
offset += 512;
|
||||
const content = tarBytes.subarray(offset, offset + size);
|
||||
|
||||
// Regular file type is "0" (or NUL). Directories are "5".
|
||||
if ((typeflag === "" || typeflag === "0") && fullName) {
|
||||
files.set(fullName, content);
|
||||
}
|
||||
|
||||
offset += Math.ceil(size / 512) * 512;
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
function findBinary(): string | null {
|
||||
if (process.env.SANDBOX_AGENT_BIN) {
|
||||
return process.env.SANDBOX_AGENT_BIN;
|
||||
|
|
@ -281,4 +358,94 @@ describe("Integration: TypeScript SDK flat session API", () => {
|
|||
await sdk.dispose();
|
||||
rmSync(directory, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("supports filesystem download batch (tar)", async () => {
|
||||
const sdk = await SandboxAgent.connect({
|
||||
baseUrl,
|
||||
token,
|
||||
});
|
||||
|
||||
const root = mkdtempSync(join(tmpdir(), "sdk-fs-download-batch-"));
|
||||
const dir = join(root, "docs");
|
||||
const nested = join(dir, "nested");
|
||||
await sdk.mkdirFs({ path: nested });
|
||||
await sdk.writeFsFile({ path: join(dir, "a.txt") }, new TextEncoder().encode("aaa"));
|
||||
await sdk.writeFsFile({ path: join(nested, "b.txt") }, new TextEncoder().encode("bbb"));
|
||||
|
||||
const tarBytes = await sdk.downloadFsBatch({ path: dir });
|
||||
expect(tarBytes.length).toBeGreaterThan(0);
|
||||
|
||||
const files = untarFiles(tarBytes);
|
||||
const a = files.get("a.txt");
|
||||
const b = files.get("nested/b.txt");
|
||||
expect(a).toBeTruthy();
|
||||
expect(b).toBeTruthy();
|
||||
expect(new TextDecoder().decode(a!)).toBe("aaa");
|
||||
expect(new TextDecoder().decode(b!)).toBe("bbb");
|
||||
|
||||
await sdk.dispose();
|
||||
rmSync(root, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("supports filesystem upload batch from sourcePath (requires tar)", async () => {
|
||||
const sdk = await SandboxAgent.connect({
|
||||
baseUrl,
|
||||
token,
|
||||
});
|
||||
|
||||
const sourceRoot = mkdtempSync(join(tmpdir(), "sdk-upload-source-"));
|
||||
const sourceDir = join(sourceRoot, "project");
|
||||
mkdirSync(join(sourceDir, "nested"), { recursive: true });
|
||||
writeFileSync(join(sourceDir, "a.txt"), "aaa");
|
||||
writeFileSync(join(sourceDir, "nested", "b.txt"), "bbb");
|
||||
|
||||
const destRoot = mkdtempSync(join(tmpdir(), "sdk-upload-dest-"));
|
||||
const destDir = join(destRoot, "uploaded");
|
||||
|
||||
await sdk.uploadFsBatch({ sourcePath: sourceDir }, { path: destDir });
|
||||
|
||||
const a = await sdk.readFsFile({ path: join(destDir, "a.txt") });
|
||||
const b = await sdk.readFsFile({ path: join(destDir, "nested", "b.txt") });
|
||||
expect(new TextDecoder().decode(a)).toBe("aaa");
|
||||
expect(new TextDecoder().decode(b)).toBe("bbb");
|
||||
|
||||
await sdk.dispose();
|
||||
rmSync(sourceRoot, { recursive: true, force: true });
|
||||
rmSync(destRoot, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("supports filesystem download batch to outPath and extractTo (requires tar for extract)", async () => {
|
||||
const sdk = await SandboxAgent.connect({
|
||||
baseUrl,
|
||||
token,
|
||||
});
|
||||
|
||||
const serverRoot = mkdtempSync(join(tmpdir(), "sdk-download-server-"));
|
||||
const serverDir = join(serverRoot, "docs");
|
||||
await sdk.mkdirFs({ path: join(serverDir, "nested") });
|
||||
await sdk.writeFsFile({ path: join(serverDir, "a.txt") }, new TextEncoder().encode("aaa"));
|
||||
await sdk.writeFsFile(
|
||||
{ path: join(serverDir, "nested", "b.txt") },
|
||||
new TextEncoder().encode("bbb"),
|
||||
);
|
||||
|
||||
const localRoot = mkdtempSync(join(tmpdir(), "sdk-download-local-"));
|
||||
const outTar = join(localRoot, "docs.tar");
|
||||
const extractTo = join(localRoot, "extracted");
|
||||
|
||||
const bytes = await sdk.downloadFsBatch(
|
||||
{ path: serverDir },
|
||||
{ outPath: outTar, extractTo },
|
||||
);
|
||||
expect(bytes.length).toBeGreaterThan(0);
|
||||
|
||||
const extractedA = readFileSync(join(extractTo, "a.txt"), "utf8");
|
||||
const extractedB = readFileSync(join(extractTo, "nested", "b.txt"), "utf8");
|
||||
expect(extractedA).toBe("aaa");
|
||||
expect(extractedB).toBe("bbb");
|
||||
|
||||
await sdk.dispose();
|
||||
rmSync(serverRoot, { recursive: true, force: true });
|
||||
rmSync(localRoot, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ use schemars::JsonSchema;
|
|||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
use tar::Archive;
|
||||
use tar::{Archive, Builder};
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::Span;
|
||||
use utoipa::{Modify, OpenApi, ToSchema};
|
||||
|
|
@ -166,6 +166,7 @@ pub fn build_router_with_state(shared: Arc<AppState>) -> (Router, Arc<AppState>)
|
|||
.route("/fs/move", post(post_v1_fs_move))
|
||||
.route("/fs/stat", get(get_v1_fs_stat))
|
||||
.route("/fs/upload-batch", post(post_v1_fs_upload_batch))
|
||||
.route("/fs/download-batch", get(get_v1_fs_download_batch))
|
||||
.route(
|
||||
"/config/mcp",
|
||||
get(get_v1_config_mcp)
|
||||
|
|
@ -295,6 +296,7 @@ pub async fn shutdown_servers(state: &Arc<AppState>) {
|
|||
post_v1_fs_move,
|
||||
get_v1_fs_stat,
|
||||
post_v1_fs_upload_batch,
|
||||
get_v1_fs_download_batch,
|
||||
get_v1_config_mcp,
|
||||
put_v1_config_mcp,
|
||||
delete_v1_config_mcp,
|
||||
|
|
@ -321,6 +323,7 @@ pub async fn shutdown_servers(state: &Arc<AppState>) {
|
|||
FsEntriesQuery,
|
||||
FsDeleteQuery,
|
||||
FsUploadBatchQuery,
|
||||
FsDownloadBatchQuery,
|
||||
FsEntryType,
|
||||
FsEntry,
|
||||
FsStat,
|
||||
|
|
@ -1075,6 +1078,129 @@ async fn post_v1_fs_upload_batch(
|
|||
}))
|
||||
}
|
||||
|
||||
fn tar_add_path(
|
||||
builder: &mut Builder<&mut Vec<u8>>,
|
||||
base: &StdPath,
|
||||
path: &StdPath,
|
||||
) -> Result<(), SandboxError> {
|
||||
let metadata = fs::symlink_metadata(path).map_err(|err| map_fs_error(path, err))?;
|
||||
if metadata.file_type().is_symlink() {
|
||||
return Err(SandboxError::InvalidRequest {
|
||||
message: format!(
|
||||
"symlinks are not supported in download-batch: {}",
|
||||
path.display()
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
let rel = path
|
||||
.strip_prefix(base)
|
||||
.map_err(|_| SandboxError::InvalidRequest {
|
||||
message: format!("path is not under base: {}", path.display()),
|
||||
})?;
|
||||
let name = StdPath::new(".").join(rel);
|
||||
|
||||
if metadata.is_dir() {
|
||||
builder
|
||||
.append_dir(&name, path)
|
||||
.map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
for entry in fs::read_dir(path).map_err(|err| map_fs_error(path, err))? {
|
||||
let entry = entry.map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
tar_add_path(builder, base, &entry.path())?;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if metadata.is_file() {
|
||||
builder
|
||||
.append_path_with_name(path, &name)
|
||||
.map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(SandboxError::InvalidRequest {
|
||||
message: format!("unsupported filesystem entry type: {}", path.display()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Download a tar archive of a file or directory.
|
||||
///
|
||||
/// Returns `application/x-tar` bytes containing the requested path. If the path is a directory,
|
||||
/// the archive contains its contents (similar to `tar -C <dir> .`).
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/fs/download-batch",
|
||||
tag = "v1",
|
||||
params(
|
||||
("path" = Option<String>, Query, description = "Source path (file or directory)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "tar archive bytes")
|
||||
)
|
||||
)]
|
||||
async fn get_v1_fs_download_batch(
|
||||
Query(query): Query<FsDownloadBatchQuery>,
|
||||
) -> Result<Response, ApiError> {
|
||||
let raw = query.path.unwrap_or_else(|| ".".to_string());
|
||||
let target = resolve_fs_path(&raw)?;
|
||||
let metadata = fs::symlink_metadata(&target).map_err(|err| map_fs_error(&target, err))?;
|
||||
if metadata.file_type().is_symlink() {
|
||||
return Err(SandboxError::InvalidRequest {
|
||||
message: format!(
|
||||
"symlinks are not supported in download-batch: {}",
|
||||
target.display()
|
||||
),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let mut out = Vec::<u8>::new();
|
||||
{
|
||||
let mut builder = Builder::new(&mut out);
|
||||
if metadata.is_dir() {
|
||||
// Pack directory contents, not an extra top-level folder wrapper.
|
||||
for entry in fs::read_dir(&target).map_err(|err| map_fs_error(&target, err))? {
|
||||
let entry = entry.map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
tar_add_path(&mut builder, &target, &entry.path())?;
|
||||
}
|
||||
} else if metadata.is_file() {
|
||||
let name = StdPath::new(".").join(target.file_name().ok_or_else(|| {
|
||||
SandboxError::InvalidRequest {
|
||||
message: format!("invalid file path: {}", target.display()),
|
||||
}
|
||||
})?);
|
||||
builder
|
||||
.append_path_with_name(&target, name)
|
||||
.map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
} else {
|
||||
return Err(SandboxError::InvalidRequest {
|
||||
message: format!("unsupported filesystem entry type: {}", target.display()),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
builder.finish().map_err(|err| SandboxError::StreamError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok((
|
||||
[(header::CONTENT_TYPE, "application/x-tar")],
|
||||
Bytes::from(out),
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/config/mcp",
|
||||
|
|
|
|||
|
|
@ -128,6 +128,13 @@ pub struct FsUploadBatchQuery {
|
|||
pub path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FsDownloadBatchQuery {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, ToSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum FsEntryType {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
use super::*;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[tokio::test]
|
||||
async fn v1_health_removed_legacy_and_opencode_unmounted() {
|
||||
|
|
@ -134,6 +135,73 @@ async fn v1_filesystem_endpoints_round_trip() {
|
|||
assert_eq!(status, StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn v1_filesystem_download_batch_returns_tar() {
|
||||
let test_app = TestApp::new(AuthConfig::disabled());
|
||||
|
||||
let (status, _, _) = send_request_raw(
|
||||
&test_app.app,
|
||||
Method::PUT,
|
||||
"/v1/fs/file?path=docs/a.txt",
|
||||
Some(b"aaa".to_vec()),
|
||||
&[],
|
||||
Some("application/octet-stream"),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
|
||||
let (status, _, _) = send_request_raw(
|
||||
&test_app.app,
|
||||
Method::PUT,
|
||||
"/v1/fs/file?path=docs/nested/b.txt",
|
||||
Some(b"bbb".to_vec()),
|
||||
&[],
|
||||
Some("application/octet-stream"),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
|
||||
let (status, headers, body) = send_request_raw(
|
||||
&test_app.app,
|
||||
Method::GET,
|
||||
"/v1/fs/download-batch?path=docs",
|
||||
None,
|
||||
&[],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
assert_eq!(
|
||||
headers
|
||||
.get(header::CONTENT_TYPE)
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.unwrap_or(""),
|
||||
"application/x-tar"
|
||||
);
|
||||
|
||||
let mut archive = tar::Archive::new(Cursor::new(body));
|
||||
let mut paths: Vec<String> = archive
|
||||
.entries()
|
||||
.expect("tar entries")
|
||||
.map(|entry| {
|
||||
entry
|
||||
.expect("tar entry")
|
||||
.path()
|
||||
.expect("tar path")
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
})
|
||||
.collect();
|
||||
paths.sort();
|
||||
|
||||
let has_a = paths.iter().any(|p| p == "a.txt" || p == "./a.txt");
|
||||
let has_b = paths
|
||||
.iter()
|
||||
.any(|p| p == "nested/b.txt" || p == "./nested/b.txt");
|
||||
assert!(has_a, "expected a.txt in tar, got: {paths:?}");
|
||||
assert!(has_b, "expected nested/b.txt in tar, got: {paths:?}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn require_preinstall_blocks_missing_agent() {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue