diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d4df1c8..904259d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,8 +20,22 @@ jobs:
with:
go-version-file: go.mod
+ - name: Setup Helm
+ uses: azure/setup-helm@v4
+
- name: Build
run: go build ./...
- name: Test
run: go test ./...
+
+ - name: Helm lint
+ run: helm lint chart/agentikube/
+
+ - name: Helm template
+ run: |
+ helm template agentikube chart/agentikube/ \
+ --namespace sandboxes \
+ --set storage.filesystemId=fs-test \
+ --set sandbox.image=test:latest \
+ --set compute.clusterName=test-cluster
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index bbf975f..b754e7b 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -9,6 +9,7 @@ on:
permissions:
contents: write
+ packages: write
concurrency:
group: release-${{ github.ref }}
@@ -29,6 +30,9 @@ jobs:
with:
go-version-file: go.mod
+ - name: Setup Helm
+ uses: azure/setup-helm@v4
+
- name: Compute next version
id: version
shell: bash
@@ -93,3 +97,17 @@ jobs:
name: ${{ steps.version.outputs.next_tag }}
generate_release_notes: true
files: dist/*
+
+ - name: Set chart version
+ run: |
+ sed -i "s/^version:.*/version: ${{ steps.version.outputs.version }}/" chart/agentikube/Chart.yaml
+ sed -i "s/^appVersion:.*/appVersion: \"${{ steps.version.outputs.version }}\"/" chart/agentikube/Chart.yaml
+
+ - name: Package Helm chart
+ run: helm package chart/agentikube/ --destination .helm-pkg
+
+ - name: Log in to GHCR
+ run: echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
+
+ - name: Push Helm chart to GHCR
+ run: helm push .helm-pkg/agentikube-${{ steps.version.outputs.version }}.tgz oci://ghcr.io/${{ github.repository_owner }}
diff --git a/.gitignore b/.gitignore
index 6198882..16d3c4d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1 @@
.cache
-agentikube
diff --git a/Makefile b/Makefile
index 160de4a..f68ed93 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: build install clean fmt vet lint
+.PHONY: build install clean fmt vet lint crds helm-lint helm-template
build:
go build -o agentikube ./cmd/agentikube
@@ -16,3 +16,16 @@ vet:
go vet ./...
lint: fmt vet
+
+crds:
+ ./scripts/download-crds.sh
+
+helm-lint:
+ helm lint chart/agentikube/
+
+helm-template:
+ helm template agentikube chart/agentikube/ \
+ --namespace sandboxes \
+ --set storage.filesystemId=fs-test \
+ --set sandbox.image=test:latest \
+ --set compute.clusterName=test-cluster
diff --git a/README.md b/README.md
index cef9d1d..d566e76 100644
--- a/README.md
+++ b/README.md
@@ -1,172 +1,85 @@
# agentikube
-[](https://github.com/harivansh-afk/agentikube/releases)
-[](https://github.com/harivansh-afk/agentikube/releases)
-[](https://github.com/harivansh-afk/agentikube)
[](https://github.com/harivansh-afk/agentikube/blob/main/go.mod)
-[](https://github.com/harivansh-afk/agentikube)
-[](https://github.com/harivansh-afk/agentikube/actions/workflows/ci.yml)
-[](https://github.com/harivansh-afk/agentikube/actions/workflows/release.yml)
+[](https://github.com/harivansh-afk/agentikube/tree/main/chart/agentikube)
+[](https://github.com/harivansh-afk/agentikube/releases/latest)
-This repo is a small Go CLI for running isolated agent sandboxes on Kubernetes.
+Isolated stateful agent sandboxes on Kubernetes
-The main job of `agentikube` is:
-- set up shared sandbox infra (`init`, `up`)
-- create one sandbox per user/handle (`create`)
-- let you inspect and access sandboxes (`list`, `status`, `ssh`)
-- clean up sandboxes or shared infra (`destroy`, `down`)
+
-It is built for AWS-style setups (EFS + optional Karpenter).
-
-## What This Stands Up
-
-When you run `up`, it renders and applies Kubernetes manifests from templates.
-
-Core resources:
-- `Namespace`
-- `StorageClass` (`efs-sandbox`, provisioner `efs.csi.aws.com`)
-- `SandboxTemplate` (`sandbox-template`)
-
-Optional resources:
-- `SandboxWarmPool` (if `sandbox.warmPool.enabled: true`)
-- `NodePool` + `EC2NodeClass` (if `compute.type: karpenter`)
-
-When you run `create `, it creates:
-- `Secret` (`sandbox-`) with provider credentials
-- `SandboxClaim` (`sandbox-`) that points to `sandbox-template`
-- PVC from template (`workspace` volume claim template)
-
-## Filesystem
-
-```text
-cmd/agentikube/main.go # CLI entrypoint + subcommand wiring
-internal/config/ # config structs + validation/defaults
-internal/manifest/ # template rendering
-internal/manifest/templates/ # Kubernetes YAML templates
-internal/kube/ # kube client, apply, wait, exec helpers
-internal/commands/ # command implementations
-agentikube.example.yaml # example config you copy to agentikube.yaml
-Makefile # build/install/fmt/vet targets
-```
-
-## How It Works (Simple Flow)
-
-```mermaid
-flowchart TD
- A[agentikube command] --> B[Load agentikube.yaml]
- B --> C[Validate config + apply defaults]
- C --> D{Command}
- D -->|init| E[Install CRDs + check prereqs + ensure namespace]
- D -->|up| F[Render templates -> server-side apply]
- D -->|create| G[Create Secret + SandboxClaim]
- G --> H[Watch SandboxClaim until Ready]
- D -->|list/status| I[Read SandboxClaim/WarmPool state]
- D -->|ssh| J[Resolve pod name -> kubectl exec -it]
- D -->|destroy| K[Delete SandboxClaim + Secret + best-effort PVC]
- D -->|down| L[Delete warm pool + template, keep user sandboxes]
-```
-
-## Resource Diagram (Abilities + Resources)
-
-```mermaid
-flowchart LR
- CLI[agentikube CLI] --> K8S[Kubernetes API]
- CLI --> KUBECTL[kubectl binary]
-
- K8S --> NS[Namespace]
- K8S --> SC[StorageClass efs-sandbox]
- K8S --> ST[SandboxTemplate]
- K8S --> WP[SandboxWarmPool]
- K8S --> NP[NodePool]
- K8S --> ENC[EC2NodeClass]
-
- K8S --> CLAIM[SandboxClaim per user]
- K8S --> SECRET[Secret per user]
- CLAIM --> POD[Sandbox Pod]
- POD --> PVC[Workspace PVC]
- PVC --> SC
- SC --> EFS[(AWS EFS)]
-
- NP --> EC2[(EC2 nodes via Karpenter)]
- ENC --> EC2
-```
-
-## Commands
-
-- `agentikube init`
- Installs agent-sandbox CRDs, checks for EFS CSI/Karpenter, and ensures namespace exists.
-- `agentikube up [--dry-run]`
- Renders manifests and applies them with server-side apply. `--dry-run` prints YAML only.
-- `agentikube create --provider --api-key `
- Creates per-user Secret + SandboxClaim and waits (up to 3 minutes) for Ready.
-- `agentikube list`
- Shows handle, status, age, and pod name for all sandbox claims.
-- `agentikube ssh `
- Finds the sandbox pod and opens `/bin/sh` using `kubectl exec -it`.
-- `agentikube destroy [--yes]`
- Deletes SandboxClaim + Secret + best-effort PVC for that handle.
-- `agentikube down`
- Deletes shared warm pool/template infra but preserves existing user sandboxes.
-- `agentikube status`
- Prints warm pool numbers, sandbox count, and Karpenter node count (if enabled).
-
-## Quick Start
-
-1. Copy config:
+## Install
```bash
-cp agentikube.example.yaml agentikube.yaml
+helm install agentikube oci://ghcr.io/harivansh-afk/agentikube \
+ -n sandboxes --create-namespace \
+ -f my-values.yaml
```
-2. Fill your values in `agentikube.yaml`:
-- namespace
-- EFS filesystem ID / base path
-- sandbox image
-- compute settings
+Create a `my-values.yaml` with your cluster details:
-3. Run:
+```yaml
+compute:
+ clusterName: my-eks-cluster
+storage:
+ filesystemId: fs-0123456789abcdef0
+sandbox:
+ image: my-registry/sandbox:latest
+```
+
+See [`values.yaml`](chart/agentikube/values.yaml) for all options.
+
+## CLI
+
+The Go CLI handles runtime operations that are inherently imperative:
```bash
-agentikube init
-agentikube up
agentikube create demo --provider openai --api-key
agentikube list
agentikube ssh demo
+agentikube status
+agentikube destroy demo
```
-## Test CLI Locally
+Build it with `go build ./cmd/agentikube` or `make build`.
-Use this exact flow to verify the CLI on your machine:
+## What gets created
+
+The Helm chart installs:
+
+- StorageClass (`efs-sandbox`) backed by your EFS filesystem
+- SandboxTemplate defining the pod spec
+- NetworkPolicy for ingress/egress rules
+- SandboxWarmPool (optional, enabled by default)
+- Karpenter NodePool + EC2NodeClass (optional, when `compute.type: karpenter`)
+
+Each `agentikube create ` then adds a Secret, SandboxClaim, and workspace PVC for that user.
+
+## Project layout
+
+```
+cmd/agentikube/ CLI entrypoint
+internal/ config, manifest rendering, kube helpers
+chart/agentikube/ Helm chart
+scripts/ CRD download helper
+```
+
+## Development
```bash
-# 1) Build + tests
-mkdir -p .cache/go-build .cache/go-mod
-GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go build ./...
-GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go test ./...
-
-# 2) Root help + command help
-GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube --help
-for c in init up create list ssh down destroy status; do
- GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube "$c" --help >/dev/null
-done
-
-# 3) Manifest generation smoke test
-./agentikube up --dry-run --config agentikube.example.yaml
+make build # compile CLI
+make helm-lint # lint the chart
+make helm-template # dry-run render
+go test ./... # run tests
```
-If those pass, the CLI wiring + config + templating path is working locally.
+## Good to know
-## CI And Auto Release
+- Storage is EFS-only for now
+- `kubectl` must be installed (used by `init` and `ssh`)
+- Fargate is validated in config but templates only cover Karpenter so far
+- [k9s](https://k9scli.io/) is great for browsing sandbox resources
-This repo now has two GitHub Actions workflows:
-- `.github/workflows/ci.yml`
- Runs `go build ./...` and `go test ./...` on PRs and non-main branch pushes.
-- `.github/workflows/release.yml`
- Runs on push to `main`, auto-bumps patch version (`vX.Y.Z`), writes `VERSION`, creates/pushes tag, builds multi-platform binaries, and creates a GitHub Release with artifacts.
+## Context
-## Notes / Current Limits
-
-- `storage.type` currently must be `efs`.
-- `kubectl` must be installed (used by `init` and `ssh`).
-- `compute.type: fargate` is validated, but this repo currently renders templates for the Karpenter path.
-- No Go tests are present yet (`go test ./...` reports no test files).
+(https://harivan.sh/thoughts/isolated-long-running-agents-with-kubernetes)
diff --git a/agentikube b/agentikube
new file mode 100755
index 0000000..83f4562
Binary files /dev/null and b/agentikube differ
diff --git a/agentikube.example.yaml b/agentikube.yaml
similarity index 100%
rename from agentikube.example.yaml
rename to agentikube.yaml
diff --git a/chart/agentikube/Chart.yaml b/chart/agentikube/Chart.yaml
new file mode 100644
index 0000000..293a85d
--- /dev/null
+++ b/chart/agentikube/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+name: agentikube
+description: Isolated agent sandboxes on Kubernetes
+type: application
+version: 0.1.0
+appVersion: "0.1.0"
+keywords:
+ - sandbox
+ - agents
+ - kubernetes
+ - karpenter
+ - efs
diff --git a/chart/agentikube/crds/.gitkeep b/chart/agentikube/crds/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/chart/agentikube/templates/NOTES.txt b/chart/agentikube/templates/NOTES.txt
new file mode 100644
index 0000000..1fcd08a
--- /dev/null
+++ b/chart/agentikube/templates/NOTES.txt
@@ -0,0 +1,25 @@
+agentikube has been installed in namespace {{ .Release.Namespace }}.
+
+Resources created:
+ - StorageClass: efs-sandbox (EFS filesystem: {{ .Values.storage.filesystemId }})
+ - SandboxTemplate: sandbox-template
+{{- if .Values.sandbox.warmPool.enabled }}
+ - SandboxWarmPool: sandbox-warm-pool ({{ .Values.sandbox.warmPool.size }} replicas)
+{{- end }}
+{{- if eq .Values.compute.type "karpenter" }}
+ - NodePool: sandbox-pool
+ - EC2NodeClass: sandbox-nodes
+{{- end }}
+ - NetworkPolicy: sandbox-network-policy
+
+To create a sandbox:
+ agentikube create --provider --api-key
+
+To list sandboxes:
+ agentikube list
+
+To SSH into a sandbox:
+ agentikube ssh
+
+To destroy a sandbox:
+ agentikube destroy
diff --git a/chart/agentikube/templates/_helpers.tpl b/chart/agentikube/templates/_helpers.tpl
new file mode 100644
index 0000000..c5210b9
--- /dev/null
+++ b/chart/agentikube/templates/_helpers.tpl
@@ -0,0 +1,42 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "agentikube.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+*/}}
+{{- define "agentikube.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "agentikube.labels" -}}
+helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{ include "agentikube.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "agentikube.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "agentikube.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
diff --git a/chart/agentikube/templates/karpenter-ec2nodeclass.yaml b/chart/agentikube/templates/karpenter-ec2nodeclass.yaml
new file mode 100644
index 0000000..b2f4959
--- /dev/null
+++ b/chart/agentikube/templates/karpenter-ec2nodeclass.yaml
@@ -0,0 +1,18 @@
+{{- if eq .Values.compute.type "karpenter" }}
+apiVersion: karpenter.k8s.aws/v1
+kind: EC2NodeClass
+metadata:
+ name: sandbox-nodes
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+spec:
+ amiSelectorTerms:
+ - alias: "al2023@latest"
+ subnetSelectorTerms:
+ - tags:
+ karpenter.sh/discovery: {{ required "compute.clusterName is required for Karpenter" .Values.compute.clusterName | quote }}
+ securityGroupSelectorTerms:
+ - tags:
+ karpenter.sh/discovery: {{ .Values.compute.clusterName | quote }}
+ role: {{ printf "KarpenterNodeRole-%s" .Values.compute.clusterName | quote }}
+{{- end }}
diff --git a/chart/agentikube/templates/karpenter-nodepool.yaml b/chart/agentikube/templates/karpenter-nodepool.yaml
new file mode 100644
index 0000000..4d55cb4
--- /dev/null
+++ b/chart/agentikube/templates/karpenter-nodepool.yaml
@@ -0,0 +1,37 @@
+{{- if eq .Values.compute.type "karpenter" }}
+apiVersion: karpenter.sh/v1
+kind: NodePool
+metadata:
+ name: sandbox-pool
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+spec:
+ template:
+ spec:
+ requirements:
+ - key: node.kubernetes.io/instance-type
+ operator: In
+ values:
+ {{- range .Values.compute.instanceTypes }}
+ - {{ . }}
+ {{- end }}
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ {{- range .Values.compute.capacityTypes }}
+ - {{ . }}
+ {{- end }}
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ nodeClassRef:
+ name: sandbox-nodes
+ group: karpenter.k8s.aws
+ kind: EC2NodeClass
+ limits:
+ cpu: {{ .Values.compute.maxCpu }}
+ memory: {{ .Values.compute.maxMemory }}
+ disruption:
+ consolidationPolicy: {{ if .Values.compute.consolidation }}WhenEmptyOrUnderutilized{{ else }}WhenEmpty{{ end }}
+{{- end }}
diff --git a/chart/agentikube/templates/networkpolicy.yaml b/chart/agentikube/templates/networkpolicy.yaml
new file mode 100644
index 0000000..fbad38e
--- /dev/null
+++ b/chart/agentikube/templates/networkpolicy.yaml
@@ -0,0 +1,28 @@
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: sandbox-network-policy
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: sandbox
+ policyTypes:
+ - Ingress
+ {{- if .Values.sandbox.networkPolicy.egressAllowAll }}
+ - Egress
+ {{- end }}
+ {{- if .Values.sandbox.networkPolicy.egressAllowAll }}
+ egress:
+ - to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ {{- end }}
+ ingress:
+ {{- range .Values.sandbox.networkPolicy.ingressPorts }}
+ - ports:
+ - port: {{ . }}
+ protocol: TCP
+ {{- end }}
diff --git a/chart/agentikube/templates/sandbox-template.yaml b/chart/agentikube/templates/sandbox-template.yaml
new file mode 100644
index 0000000..2c61361
--- /dev/null
+++ b/chart/agentikube/templates/sandbox-template.yaml
@@ -0,0 +1,57 @@
+apiVersion: extensions.agents.x-k8s.io/v1alpha1
+kind: SandboxTemplate
+metadata:
+ name: sandbox-template
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+spec:
+ template:
+ spec:
+ containers:
+ - name: sandbox
+ image: {{ required "sandbox.image is required" .Values.sandbox.image }}
+ ports:
+ {{- range .Values.sandbox.ports }}
+ - containerPort: {{ . }}
+ {{- end }}
+ resources:
+ requests:
+ cpu: {{ .Values.sandbox.resources.requests.cpu }}
+ memory: {{ .Values.sandbox.resources.requests.memory }}
+ limits:
+ cpu: {{ .Values.sandbox.resources.limits.cpu | quote }}
+ memory: {{ .Values.sandbox.resources.limits.memory }}
+ securityContext:
+ runAsUser: {{ .Values.sandbox.securityContext.runAsUser }}
+ runAsGroup: {{ .Values.sandbox.securityContext.runAsGroup }}
+ runAsNonRoot: {{ .Values.sandbox.securityContext.runAsNonRoot }}
+ {{- if .Values.sandbox.env }}
+ env:
+ {{- range $key, $value := .Values.sandbox.env }}
+ - name: {{ $key }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ startupProbe:
+ tcpSocket:
+ port: {{ .Values.sandbox.probes.port }}
+ failureThreshold: {{ .Values.sandbox.probes.startupFailureThreshold }}
+ periodSeconds: 10
+ readinessProbe:
+ tcpSocket:
+ port: {{ .Values.sandbox.probes.port }}
+ periodSeconds: 10
+ volumeMounts:
+ - name: workspace
+ mountPath: {{ .Values.sandbox.mountPath }}
+ volumeClaimTemplates:
+ - metadata:
+ name: workspace
+ spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: efs-sandbox
+ resources:
+ requests:
+ storage: "10Gi"
diff --git a/chart/agentikube/templates/storageclass-efs.yaml b/chart/agentikube/templates/storageclass-efs.yaml
new file mode 100644
index 0000000..8a9c2ff
--- /dev/null
+++ b/chart/agentikube/templates/storageclass-efs.yaml
@@ -0,0 +1,16 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: efs-sandbox
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+provisioner: efs.csi.aws.com
+parameters:
+ provisioningMode: efs-ap
+ fileSystemId: {{ required "storage.filesystemId is required" .Values.storage.filesystemId }}
+ directoryPerms: "755"
+ uid: {{ .Values.storage.uid | quote }}
+ gid: {{ .Values.storage.gid | quote }}
+ basePath: {{ .Values.storage.basePath }}
+reclaimPolicy: {{ .Values.storage.reclaimPolicy }}
+volumeBindingMode: Immediate
diff --git a/chart/agentikube/templates/warm-pool.yaml b/chart/agentikube/templates/warm-pool.yaml
new file mode 100644
index 0000000..52f726f
--- /dev/null
+++ b/chart/agentikube/templates/warm-pool.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.sandbox.warmPool.enabled }}
+apiVersion: extensions.agents.x-k8s.io/v1alpha1
+kind: SandboxWarmPool
+metadata:
+ name: sandbox-warm-pool
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "agentikube.labels" . | nindent 4 }}
+spec:
+ templateRef:
+ name: sandbox-template
+ replicas: {{ .Values.sandbox.warmPool.size }}
+ ttlMinutes: {{ .Values.sandbox.warmPool.ttlMinutes }}
+{{- end }}
diff --git a/chart/agentikube/values.yaml b/chart/agentikube/values.yaml
new file mode 100644
index 0000000..f40ece8
--- /dev/null
+++ b/chart/agentikube/values.yaml
@@ -0,0 +1,66 @@
+# Compute configuration for sandbox nodes
+compute:
+ # karpenter or fargate
+ type: karpenter
+ instanceTypes:
+ - m6i.xlarge
+ - m5.xlarge
+ - r6i.xlarge
+ capacityTypes:
+ - spot
+ - on-demand
+ maxCpu: 2000
+ maxMemory: 8000Gi
+ consolidation: true
+ # EKS cluster name - used for Karpenter subnet/SG/role discovery
+ clusterName: ""
+
+# Persistent storage configuration
+storage:
+ # efs is the only supported type
+ type: efs
+ # REQUIRED - your EFS filesystem ID
+ filesystemId: ""
+ basePath: /sandboxes
+ uid: 1000
+ gid: 1000
+ reclaimPolicy: Retain
+
+# Sandbox pod configuration
+sandbox:
+ # REQUIRED - container image for sandbox pods
+ image: ""
+ ports:
+ - 18789
+ - 2222
+ - 3000
+ - 5173
+ - 8080
+ mountPath: /home/node/.openclaw
+ resources:
+ requests:
+ cpu: 50m
+ memory: 512Mi
+ limits:
+ cpu: "2"
+ memory: 4Gi
+ env: {}
+ securityContext:
+ runAsUser: 1000
+ runAsGroup: 1000
+ runAsNonRoot: true
+ probes:
+ port: 18789
+ startupFailureThreshold: 30
+ warmPool:
+ enabled: true
+ size: 5
+ ttlMinutes: 120
+ networkPolicy:
+ egressAllowAll: true
+ ingressPorts:
+ - 18789
+ - 2222
+ - 3000
+ - 5173
+ - 8080
diff --git a/chart/agentikube_test.go b/chart/agentikube_test.go
new file mode 100644
index 0000000..9854572
--- /dev/null
+++ b/chart/agentikube_test.go
@@ -0,0 +1,223 @@
+package chart_test
+
+import (
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// helmTemplate runs helm template with the given extra args and returns stdout.
+func helmTemplate(t *testing.T, extraArgs ...string) string {
+ t.Helper()
+ args := []string{
+ "template", "agentikube", "chart/agentikube/",
+ "--namespace", "sandboxes",
+ "--set", "storage.filesystemId=fs-test",
+ "--set", "sandbox.image=test:latest",
+ "--set", "compute.clusterName=test-cluster",
+ }
+ args = append(args, extraArgs...)
+ cmd := exec.Command("helm", args...)
+ cmd.Dir = repoRoot(t)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("helm template failed: %v\n%s", err, out)
+ }
+ return string(out)
+}
+
+func repoRoot(t *testing.T) string {
+ t.Helper()
+ dir, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // This test file lives at chart/agentikube_test.go, so repo root is ..
+ return dir + "/.."
+}
+
+func TestHelmLint(t *testing.T) {
+ cmd := exec.Command("helm", "lint", "chart/agentikube/")
+ cmd.Dir = repoRoot(t)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("helm lint failed: %v\n%s", err, out)
+ }
+ if !strings.Contains(string(out), "0 chart(s) failed") {
+ t.Fatalf("helm lint reported failures:\n%s", out)
+ }
+}
+
+func TestHelmTemplateDefaultValues(t *testing.T) {
+ output := helmTemplate(t)
+
+ expected := []string{
+ "kind: StorageClass",
+ "kind: SandboxTemplate",
+ "kind: SandboxWarmPool",
+ "kind: NodePool",
+ "kind: EC2NodeClass",
+ "kind: NetworkPolicy",
+ }
+ for _, want := range expected {
+ if !strings.Contains(output, want) {
+ t.Errorf("expected %q in rendered output", want)
+ }
+ }
+}
+
+func TestHelmTemplateLabels(t *testing.T) {
+ output := helmTemplate(t)
+
+ labels := []string{
+ "helm.sh/chart: agentikube-0.1.0",
+ "app.kubernetes.io/name: agentikube",
+ "app.kubernetes.io/instance: agentikube",
+ "app.kubernetes.io/managed-by: Helm",
+ `app.kubernetes.io/version: "0.1.0"`,
+ }
+ for _, label := range labels {
+ if !strings.Contains(output, label) {
+ t.Errorf("expected label %q in rendered output", label)
+ }
+ }
+}
+
+func TestHelmTemplateKarpenterDisabled(t *testing.T) {
+ output := helmTemplate(t, "--set", "compute.type=fargate")
+
+ if strings.Contains(output, "kind: NodePool") {
+ t.Error("NodePool should not be rendered when compute.type=fargate")
+ }
+ if strings.Contains(output, "kind: EC2NodeClass") {
+ t.Error("EC2NodeClass should not be rendered when compute.type=fargate")
+ }
+ if !strings.Contains(output, "kind: StorageClass") {
+ t.Error("StorageClass should always be rendered")
+ }
+ if !strings.Contains(output, "kind: SandboxTemplate") {
+ t.Error("SandboxTemplate should always be rendered")
+ }
+}
+
+func TestHelmTemplateWarmPoolDisabled(t *testing.T) {
+ output := helmTemplate(t, "--set", "sandbox.warmPool.enabled=false")
+
+ if strings.Contains(output, "kind: SandboxWarmPool") {
+ t.Error("SandboxWarmPool should not be rendered when warmPool.enabled=false")
+ }
+ if !strings.Contains(output, "kind: SandboxTemplate") {
+ t.Error("SandboxTemplate should always be rendered")
+ }
+}
+
+func TestHelmTemplateEgressDisabled(t *testing.T) {
+ output := helmTemplate(t,
+ "--set", "sandbox.networkPolicy.egressAllowAll=false",
+ "-s", "templates/networkpolicy.yaml",
+ )
+
+ if strings.Contains(output, "0.0.0.0/0") {
+ t.Error("egress CIDR should not appear when egressAllowAll=false")
+ }
+ lines := strings.Split(output, "\n")
+ for i, line := range lines {
+ if strings.Contains(line, "policyTypes:") {
+ block := strings.Join(lines[i:min(i+4, len(lines))], "\n")
+ if strings.Contains(block, "Egress") {
+ t.Error("Egress should not be in policyTypes when egressAllowAll=false")
+ }
+ }
+ }
+}
+
+func TestHelmTemplateRequiredValues(t *testing.T) {
+ tests := []struct {
+ name string
+ args []string
+ wantErr string
+ }{
+ {
+ name: "missing filesystemId",
+ args: []string{"--set", "sandbox.image=test:latest", "--set", "compute.clusterName=test"},
+ wantErr: "storage.filesystemId is required",
+ },
+ {
+ name: "missing sandbox image",
+ args: []string{"--set", "storage.filesystemId=fs-test", "--set", "compute.clusterName=test"},
+ wantErr: "sandbox.image is required",
+ },
+ {
+ name: "missing clusterName for karpenter",
+ args: []string{"--set", "storage.filesystemId=fs-test", "--set", "sandbox.image=test:latest"},
+ wantErr: "compute.clusterName is required for Karpenter",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ args := append([]string{
+ "template", "agentikube", "chart/agentikube/",
+ "--namespace", "sandboxes",
+ }, tt.args...)
+ cmd := exec.Command("helm", args...)
+ cmd.Dir = repoRoot(t)
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Fatal("expected helm template to fail for missing required value")
+ }
+ if !strings.Contains(string(out), tt.wantErr) {
+ t.Errorf("expected error containing %q, got:\n%s", tt.wantErr, out)
+ }
+ })
+ }
+}
+
+func TestHelmTemplateEnvVars(t *testing.T) {
+ output := helmTemplate(t,
+ "--set", "sandbox.env.MY_VAR=my-value",
+ "-s", "templates/sandbox-template.yaml",
+ )
+
+ if !strings.Contains(output, "MY_VAR") {
+ t.Error("expected MY_VAR in rendered env")
+ }
+ if !strings.Contains(output, "my-value") {
+ t.Error("expected my-value in rendered env")
+ }
+}
+
+func TestHelmTemplateNoEnvWhenEmpty(t *testing.T) {
+ output := helmTemplate(t, "-s", "templates/sandbox-template.yaml")
+
+ lines := strings.Split(output, "\n")
+ for _, line := range lines {
+ trimmed := strings.TrimSpace(line)
+ if trimmed == "env:" {
+ t.Error("env: block should not appear when sandbox.env is empty")
+ }
+ }
+}
+
+func TestHelmTemplateNamespace(t *testing.T) {
+ output := helmTemplate(t, "--namespace", "custom-ns")
+
+ if !strings.Contains(output, "namespace: custom-ns") {
+ t.Error("expected namespace: custom-ns in rendered output")
+ }
+}
+
+func TestHelmTemplateConsolidationDisabled(t *testing.T) {
+ output := helmTemplate(t,
+ "--set", "compute.consolidation=false",
+ "-s", "templates/karpenter-nodepool.yaml",
+ )
+
+ if !strings.Contains(output, "consolidationPolicy: WhenEmpty") {
+ t.Error("expected consolidationPolicy: WhenEmpty when consolidation=false")
+ }
+ if strings.Contains(output, "WhenEmptyOrUnderutilized") {
+ t.Error("should not have WhenEmptyOrUnderutilized when consolidation=false")
+ }
+}
diff --git a/internal/commands/create.go b/internal/commands/create.go
index 1f38d21..5274594 100644
--- a/internal/commands/create.go
+++ b/internal/commands/create.go
@@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewCreateCmd() *cobra.Command {
@@ -55,7 +54,7 @@ func NewCreateCmd() *cobra.Command {
},
}
- secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
+ secretGVR := coreGVR("secrets")
_, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("creating secret %q: %w", name, err)
@@ -65,7 +64,7 @@ func NewCreateCmd() *cobra.Command {
// Create the SandboxClaim
claim := &unstructured.Unstructured{
Object: map[string]interface{}{
- "apiVersion": "agentsandbox.dev/v1",
+ "apiVersion": "extensions.agents.x-k8s.io/v1alpha1",
"kind": "SandboxClaim",
"metadata": map[string]interface{}{
"name": name,
@@ -82,12 +81,7 @@ func NewCreateCmd() *cobra.Command {
},
}
- claimGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxclaims",
- }
- _, err = client.Dynamic().Resource(claimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
+ _, err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("creating SandboxClaim %q: %w", name, err)
}
@@ -98,7 +92,7 @@ func NewCreateCmd() *cobra.Command {
waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()
- if err := client.WaitForReady(waitCtx, ns, "sandboxclaims", name); err != nil {
+ if err := client.WaitForReady(waitCtx, ns, sandboxClaimGVR, name); err != nil {
return fmt.Errorf("waiting for sandbox: %w", err)
}
diff --git a/internal/commands/destroy.go b/internal/commands/destroy.go
index 0175529..c235e19 100644
--- a/internal/commands/destroy.go
+++ b/internal/commands/destroy.go
@@ -9,9 +9,8 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func NewDestroyCmd() *cobra.Command {
@@ -50,17 +49,11 @@ func NewDestroyCmd() *cobra.Command {
ns := cfg.Namespace
name := "sandbox-" + handle
- claimGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxclaims",
- }
-
- secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
- pvcGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
+ secretGVR := coreGVR("secrets")
+ pvcGVR := coreGVR("persistentvolumeclaims")
// Delete SandboxClaim
- err = client.Dynamic().Resource(claimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
+ err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("deleting SandboxClaim %q: %w", name, err)
}
diff --git a/internal/commands/down.go b/internal/commands/down.go
index dada100..d5c765f 100644
--- a/internal/commands/down.go
+++ b/internal/commands/down.go
@@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewDownCmd() *cobra.Command {
@@ -30,26 +29,14 @@ func NewDownCmd() *cobra.Command {
ns := cfg.Namespace
- warmPoolGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxwarmpools",
- }
-
- templateGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxtemplates",
- }
-
- err = client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
+ err = client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
if err != nil {
fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err)
} else {
fmt.Println("[ok] SandboxWarmPool deleted")
}
- err = client.Dynamic().Resource(templateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
+ err = client.Dynamic().Resource(sandboxTemplateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
if err != nil {
fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err)
} else {
diff --git a/internal/commands/helpers.go b/internal/commands/helpers.go
index c7d1093..ad7cbfc 100644
--- a/internal/commands/helpers.go
+++ b/internal/commands/helpers.go
@@ -3,8 +3,31 @@ package commands
import (
"github.com/rathi/agentikube/internal/config"
"github.com/spf13/cobra"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
+var (
+ sandboxClaimGVR = schema.GroupVersionResource{
+ Group: "extensions.agents.x-k8s.io",
+ Version: "v1alpha1",
+ Resource: "sandboxclaims",
+ }
+ sandboxTemplateGVR = schema.GroupVersionResource{
+ Group: "extensions.agents.x-k8s.io",
+ Version: "v1alpha1",
+ Resource: "sandboxtemplates",
+ }
+ sandboxWarmPoolGVR = schema.GroupVersionResource{
+ Group: "extensions.agents.x-k8s.io",
+ Version: "v1alpha1",
+ Resource: "sandboxwarmpools",
+ }
+)
+
+func coreGVR(resource string) schema.GroupVersionResource {
+ return schema.GroupVersionResource{Group: "", Version: "v1", Resource: resource}
+}
+
func loadConfig(cmd *cobra.Command) (*config.Config, error) {
cfgPath, _ := cmd.Flags().GetString("config")
return config.Load(cfgPath)
diff --git a/internal/commands/init.go b/internal/commands/init.go
index 95ff11c..208f8d6 100644
--- a/internal/commands/init.go
+++ b/internal/commands/init.go
@@ -11,7 +11,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-const crdInstallURL = "https://raw.githubusercontent.com/agent-sandbox/agent-sandbox/main/deploy/install.yaml"
+const crdBaseURL = "https://raw.githubusercontent.com/kubernetes-sigs/agent-sandbox/main/k8s/crds/"
+
+var crdFiles = []string{
+ "agents.x-k8s.io_sandboxes.yaml",
+ "extensions.agents.x-k8s.io_sandboxclaims.yaml",
+ "extensions.agents.x-k8s.io_sandboxtemplates.yaml",
+ "extensions.agents.x-k8s.io_sandboxwarmpools.yaml",
+}
func NewInitCmd() *cobra.Command {
cmd := &cobra.Command{
@@ -35,9 +42,12 @@ func NewInitCmd() *cobra.Command {
// Apply agent-sandbox CRDs
fmt.Println("applying agent-sandbox CRDs...")
- out, err := exec.CommandContext(ctx, "kubectl", "apply", "-f", crdInstallURL).CombinedOutput()
- if err != nil {
- return fmt.Errorf("applying CRDs: %s: %w", strings.TrimSpace(string(out)), err)
+ for _, f := range crdFiles {
+ url := crdBaseURL + f
+ out, err := exec.CommandContext(ctx, "kubectl", "apply", "-f", url).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("applying CRD %s: %s: %w", f, strings.TrimSpace(string(out)), err)
+ }
}
fmt.Println("[ok] agent-sandbox CRDs applied")
diff --git a/internal/commands/list.go b/internal/commands/list.go
index 92605ce..fcbe996 100644
--- a/internal/commands/list.go
+++ b/internal/commands/list.go
@@ -10,7 +10,6 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewListCmd() *cobra.Command {
@@ -31,13 +30,7 @@ func NewListCmd() *cobra.Command {
return fmt.Errorf("connecting to cluster: %w", err)
}
- claimGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxclaims",
- }
-
- list, err := client.Dynamic().Resource(claimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
+ list, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("listing SandboxClaims: %w", err)
}
@@ -114,7 +107,7 @@ func extractPodName(obj map[string]interface{}) string {
if ok {
annotations, ok := metadata["annotations"].(map[string]interface{})
if ok {
- if podName, ok := annotations["agentsandbox.dev/pod-name"].(string); ok {
+ if podName, ok := annotations["agents.x-k8s.io/pod-name"].(string); ok {
return podName
}
}
diff --git a/internal/commands/ssh.go b/internal/commands/ssh.go
index c52d1b1..5e9b9f0 100644
--- a/internal/commands/ssh.go
+++ b/internal/commands/ssh.go
@@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewSSHCmd() *cobra.Command {
@@ -33,13 +32,7 @@ func NewSSHCmd() *cobra.Command {
ns := cfg.Namespace
name := "sandbox-" + handle
- claimGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxclaims",
- }
-
- claim, err := client.Dynamic().Resource(claimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
+ claim, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("getting SandboxClaim %q: %w", name, err)
}
diff --git a/internal/commands/status.go b/internal/commands/status.go
index c87c49f..b870ad0 100644
--- a/internal/commands/status.go
+++ b/internal/commands/status.go
@@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewStatusCmd() *cobra.Command {
@@ -31,13 +30,7 @@ func NewStatusCmd() *cobra.Command {
ns := cfg.Namespace
// Warm pool status
- warmPoolGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxwarmpools",
- }
-
- wp, err := client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
+ wp, err := client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
if err != nil {
fmt.Printf("warm pool: not found (%v)\n", err)
} else {
@@ -55,13 +48,7 @@ func NewStatusCmd() *cobra.Command {
}
// Sandbox count
- claimGVR := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: "sandboxclaims",
- }
-
- claims, err := client.Dynamic().Resource(claimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
+ claims, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
if err != nil {
fmt.Printf("\nsandboxes: error listing (%v)\n", err)
} else {
diff --git a/internal/commands/up.go b/internal/commands/up.go
index ab95a3e..70a3a01 100644
--- a/internal/commands/up.go
+++ b/internal/commands/up.go
@@ -46,7 +46,7 @@ func NewUpCmd() *cobra.Command {
if cfg.Sandbox.WarmPool.Enabled {
fmt.Println("waiting for warm pool to become ready...")
- if err := client.WaitForReady(ctx, cfg.Namespace, "sandboxwarmpools", "sandbox-warm-pool"); err != nil {
+ if err := client.WaitForReady(ctx, cfg.Namespace, sandboxWarmPoolGVR, "sandbox-warm-pool"); err != nil {
return fmt.Errorf("waiting for warm pool: %w", err)
}
fmt.Println("[ok] warm pool ready")
diff --git a/internal/kube/wait.go b/internal/kube/wait.go
index 4d31ee3..48691d1 100644
--- a/internal/kube/wait.go
+++ b/internal/kube/wait.go
@@ -10,34 +10,27 @@ import (
"k8s.io/apimachinery/pkg/watch"
)
-// WaitForReady watches a resource in the agentsandbox.dev/v1 group until its
-// Ready condition becomes True or the context is cancelled/times out.
-// The resource parameter is the plural resource name (e.g. "sandboxclaims", "sandboxwarmpools").
-func (c *Client) WaitForReady(ctx context.Context, namespace, resource, name string) error {
- gvr := schema.GroupVersionResource{
- Group: "agentsandbox.dev",
- Version: "v1",
- Resource: resource,
- }
-
+// WaitForReady watches a resource until its Ready condition becomes True
+// or the context is cancelled/times out.
+func (c *Client) WaitForReady(ctx context.Context, namespace string, gvr schema.GroupVersionResource, name string) error {
watcher, err := c.Dynamic().Resource(gvr).Namespace(namespace).Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
})
if err != nil {
- return fmt.Errorf("watching %s %s/%s: %w", resource, namespace, name, err)
+ return fmt.Errorf("watching %s %s/%s: %w", gvr.Resource, namespace, name, err)
}
defer watcher.Stop()
for {
select {
case <-ctx.Done():
- return fmt.Errorf("timed out waiting for %s %s/%s to become ready", resource, namespace, name)
+ return fmt.Errorf("timed out waiting for %s %s/%s to become ready", gvr.Resource, namespace, name)
case event, ok := <-watcher.ResultChan():
if !ok {
- return fmt.Errorf("watch channel closed for %s %s/%s", resource, namespace, name)
+ return fmt.Errorf("watch channel closed for %s %s/%s", gvr.Resource, namespace, name)
}
if event.Type == watch.Error {
- return fmt.Errorf("watch error for %s %s/%s", resource, namespace, name)
+ return fmt.Errorf("watch error for %s %s/%s", gvr.Resource, namespace, name)
}
obj, ok := event.Object.(*unstructured.Unstructured)
diff --git a/internal/manifest/templates/sandbox-template.yaml.tmpl b/internal/manifest/templates/sandbox-template.yaml.tmpl
index a968a63..848c8a3 100644
--- a/internal/manifest/templates/sandbox-template.yaml.tmpl
+++ b/internal/manifest/templates/sandbox-template.yaml.tmpl
@@ -1,4 +1,4 @@
-apiVersion: agentsandbox.dev/v1
+apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxTemplate
metadata:
name: sandbox-template
diff --git a/internal/manifest/templates/warm-pool.yaml.tmpl b/internal/manifest/templates/warm-pool.yaml.tmpl
index d030490..24c16d9 100644
--- a/internal/manifest/templates/warm-pool.yaml.tmpl
+++ b/internal/manifest/templates/warm-pool.yaml.tmpl
@@ -1,4 +1,4 @@
-apiVersion: agentsandbox.dev/v1
+apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxWarmPool
metadata:
name: sandbox-warm-pool
diff --git a/scripts/download-crds.sh b/scripts/download-crds.sh
new file mode 100755
index 0000000..f4090be
--- /dev/null
+++ b/scripts/download-crds.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# Download agent-sandbox CRDs into chart/agentikube/crds/
+# Run this before packaging the chart: make crds
+
+REPO="kubernetes-sigs/agent-sandbox"
+BRANCH="main"
+BASE_URL="https://raw.githubusercontent.com/${REPO}/${BRANCH}/k8s/crds"
+DEST="$(cd "$(dirname "$0")/.." && pwd)/chart/agentikube/crds"
+
+CRDS=(
+ sandboxtemplates.yaml
+ sandboxclaims.yaml
+ sandboxwarmpools.yaml
+)
+
+echo "Downloading CRDs from ${REPO}@${BRANCH} ..."
+mkdir -p "$DEST"
+
+for crd in "${CRDS[@]}"; do
+ echo " ${crd}"
+ curl -sSfL "${BASE_URL}/${crd}" -o "${DEST}/${crd}"
+done
+
+echo "CRDs written to ${DEST}"