diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 904259d..d4df1c8 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,22 +20,8 @@ jobs:
with:
go-version-file: go.mod
- - name: Setup Helm
- uses: azure/setup-helm@v4
-
- name: Build
run: go build ./...
- name: Test
run: go test ./...
-
- - name: Helm lint
- run: helm lint chart/agentikube/
-
- - name: Helm template
- run: |
- helm template agentikube chart/agentikube/ \
- --namespace sandboxes \
- --set storage.filesystemId=fs-test \
- --set sandbox.image=test:latest \
- --set compute.clusterName=test-cluster
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index b754e7b..bbf975f 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -9,7 +9,6 @@ on:
permissions:
contents: write
- packages: write
concurrency:
group: release-${{ github.ref }}
@@ -30,9 +29,6 @@ jobs:
with:
go-version-file: go.mod
- - name: Setup Helm
- uses: azure/setup-helm@v4
-
- name: Compute next version
id: version
shell: bash
@@ -97,17 +93,3 @@ jobs:
name: ${{ steps.version.outputs.next_tag }}
generate_release_notes: true
files: dist/*
-
- - name: Set chart version
- run: |
- sed -i "s/^version:.*/version: ${{ steps.version.outputs.version }}/" chart/agentikube/Chart.yaml
- sed -i "s/^appVersion:.*/appVersion: \"${{ steps.version.outputs.version }}\"/" chart/agentikube/Chart.yaml
-
- - name: Package Helm chart
- run: helm package chart/agentikube/ --destination .helm-pkg
-
- - name: Log in to GHCR
- run: echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
-
- - name: Push Helm chart to GHCR
- run: helm push .helm-pkg/agentikube-${{ steps.version.outputs.version }}.tgz oci://ghcr.io/${{ github.repository_owner }}
diff --git a/.gitignore b/.gitignore
index 16d3c4d..6198882 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
.cache
+agentikube
diff --git a/Makefile b/Makefile
index f68ed93..160de4a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: build install clean fmt vet lint crds helm-lint helm-template
+.PHONY: build install clean fmt vet lint
build:
go build -o agentikube ./cmd/agentikube
@@ -16,16 +16,3 @@ vet:
go vet ./...
lint: fmt vet
-
-crds:
- ./scripts/download-crds.sh
-
-helm-lint:
- helm lint chart/agentikube/
-
-helm-template:
- helm template agentikube chart/agentikube/ \
- --namespace sandboxes \
- --set storage.filesystemId=fs-test \
- --set sandbox.image=test:latest \
- --set compute.clusterName=test-cluster
diff --git a/README.md b/README.md
index d566e76..96accd1 100644
--- a/README.md
+++ b/README.md
@@ -1,85 +1,167 @@
# agentikube
-
[](https://github.com/harivansh-afk/agentikube/blob/main/go.mod)
-[](https://github.com/harivansh-afk/agentikube/tree/main/chart/agentikube)
-[](https://github.com/harivansh-afk/agentikube/releases/latest)
+[](https://github.com/harivansh-afk/agentikube)
+[](https://github.com/harivansh-afk/agentikube/actions/workflows/release.yml)
-Isolated stateful agent sandboxes on Kubernetes
+This repo is a small Go CLI for running isolated agent sandboxes on Kubernetes.
-
+The main job of `agentikube` is:
+- set up shared sandbox infra (`init`, `up`)
+- create one sandbox per user/handle (`create`)
+- let you inspect and access sandboxes (`list`, `status`, `ssh`)
+- clean up sandboxes or shared infra (`destroy`, `down`)
-## Install
+It is built for AWS-style setups (EFS + optional Karpenter).
-```bash
-helm install agentikube oci://ghcr.io/harivansh-afk/agentikube \
- -n sandboxes --create-namespace \
- -f my-values.yaml
+## What This Stands Up
+
+When you run `up`, it renders and applies Kubernetes manifests from templates.
+
+Core resources:
+- `Namespace`
+- `StorageClass` (`efs-sandbox`, provisioner `efs.csi.aws.com`)
+- `SandboxTemplate` (`sandbox-template`)
+
+Optional resources:
+- `SandboxWarmPool` (if `sandbox.warmPool.enabled: true`)
+- `NodePool` + `EC2NodeClass` (if `compute.type: karpenter`)
+
+When you run `create `, it creates:
+- `Secret` (`sandbox-`) with provider credentials
+- `SandboxClaim` (`sandbox-`) that points to `sandbox-template`
+- PVC from template (`workspace` volume claim template)
+
+## Filesystem
+
+```text
+cmd/agentikube/main.go # CLI entrypoint + subcommand wiring
+internal/config/ # config structs + validation/defaults
+internal/manifest/ # template rendering
+internal/manifest/templates/ # Kubernetes YAML templates
+internal/kube/ # kube client, apply, wait, exec helpers
+internal/commands/ # command implementations
+agentikube.example.yaml # example config you copy to agentikube.yaml
+Makefile # build/install/fmt/vet targets
```
-Create a `my-values.yaml` with your cluster details:
+## How It Works (Simple Flow)
-```yaml
-compute:
- clusterName: my-eks-cluster
-storage:
- filesystemId: fs-0123456789abcdef0
-sandbox:
- image: my-registry/sandbox:latest
+```mermaid
+flowchart TD
+ A[agentikube command] --> B[Load agentikube.yaml]
+ B --> C[Validate config + apply defaults]
+ C --> D{Command}
+ D -->|init| E[Install CRDs + check prereqs + ensure namespace]
+ D -->|up| F[Render templates -> server-side apply]
+ D -->|create| G[Create Secret + SandboxClaim]
+ G --> H[Watch SandboxClaim until Ready]
+ D -->|list/status| I[Read SandboxClaim/WarmPool state]
+ D -->|ssh| J[Resolve pod name -> kubectl exec -it]
+ D -->|destroy| K[Delete SandboxClaim + Secret + best-effort PVC]
+ D -->|down| L[Delete warm pool + template, keep user sandboxes]
```
-See [`values.yaml`](chart/agentikube/values.yaml) for all options.
+## Resource Diagram (Abilities + Resources)
-## CLI
+```mermaid
+flowchart LR
+ CLI[agentikube CLI] --> K8S[Kubernetes API]
+ CLI --> KUBECTL[kubectl binary]
-The Go CLI handles runtime operations that are inherently imperative:
+ K8S --> NS[Namespace]
+ K8S --> SC[StorageClass efs-sandbox]
+ K8S --> ST[SandboxTemplate]
+ K8S --> WP[SandboxWarmPool]
+ K8S --> NP[NodePool]
+ K8S --> ENC[EC2NodeClass]
+
+ K8S --> CLAIM[SandboxClaim per user]
+ K8S --> SECRET[Secret per user]
+ CLAIM --> POD[Sandbox Pod]
+ POD --> PVC[Workspace PVC]
+ PVC --> SC
+ SC --> EFS[(AWS EFS)]
+
+ NP --> EC2[(EC2 nodes via Karpenter)]
+ ENC --> EC2
+```
+
+## Commands
+
+- `agentikube init`
+ Installs agent-sandbox CRDs, checks for EFS CSI/Karpenter, and ensures namespace exists.
+- `agentikube up [--dry-run]`
+ Renders manifests and applies them with server-side apply. `--dry-run` prints YAML only.
+- `agentikube create --provider --api-key `
+ Creates per-user Secret + SandboxClaim and waits (up to 3 minutes) for Ready.
+- `agentikube list`
+ Shows handle, status, age, and pod name for all sandbox claims.
+- `agentikube ssh `
+ Finds the sandbox pod and opens `/bin/sh` using `kubectl exec -it`.
+- `agentikube destroy [--yes]`
+ Deletes SandboxClaim + Secret + best-effort PVC for that handle.
+- `agentikube down`
+ Deletes shared warm pool/template infra but preserves existing user sandboxes.
+- `agentikube status`
+ Prints warm pool numbers, sandbox count, and Karpenter node count (if enabled).
+
+## Quick Start
+
+1. Copy config:
```bash
+cp agentikube.example.yaml agentikube.yaml
+```
+
+2. Fill your values in `agentikube.yaml`:
+- namespace
+- EFS filesystem ID / base path
+- sandbox image
+- compute settings
+
+3. Run:
+
+```bash
+agentikube init
+agentikube up
agentikube create demo --provider openai --api-key
agentikube list
agentikube ssh demo
-agentikube status
-agentikube destroy demo
```
-Build it with `go build ./cmd/agentikube` or `make build`.
+## Test CLI Locally
-## What gets created
-
-The Helm chart installs:
-
-- StorageClass (`efs-sandbox`) backed by your EFS filesystem
-- SandboxTemplate defining the pod spec
-- NetworkPolicy for ingress/egress rules
-- SandboxWarmPool (optional, enabled by default)
-- Karpenter NodePool + EC2NodeClass (optional, when `compute.type: karpenter`)
-
-Each `agentikube create ` then adds a Secret, SandboxClaim, and workspace PVC for that user.
-
-## Project layout
-
-```
-cmd/agentikube/ CLI entrypoint
-internal/ config, manifest rendering, kube helpers
-chart/agentikube/ Helm chart
-scripts/ CRD download helper
-```
-
-## Development
+Use this exact flow to verify the CLI on your machine:
```bash
-make build # compile CLI
-make helm-lint # lint the chart
-make helm-template # dry-run render
-go test ./... # run tests
+# 1) Build + tests
+mkdir -p .cache/go-build .cache/go-mod
+GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go build ./...
+GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go test ./...
+
+# 2) Root help + command help
+GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube --help
+for c in init up create list ssh down destroy status; do
+ GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube "$c" --help >/dev/null
+done
+
+# 3) Manifest generation smoke test
+./agentikube up --dry-run --config agentikube.example.yaml
```
-## Good to know
+If those pass, the CLI wiring + config + templating path is working locally.
-- Storage is EFS-only for now
-- `kubectl` must be installed (used by `init` and `ssh`)
-- Fargate is validated in config but templates only cover Karpenter so far
-- [k9s](https://k9scli.io/) is great for browsing sandbox resources
+## CI And Auto Release
-## Context
+This repo now has two GitHub Actions workflows:
+- `.github/workflows/ci.yml`
+ Runs `go build ./...` and `go test ./...` on PRs and non-main branch pushes.
+- `.github/workflows/release.yml`
+ Runs on push to `main`, auto-bumps patch version (`vX.Y.Z`), writes `VERSION`, creates/pushes tag, builds multi-platform binaries, and creates a GitHub Release with artifacts.
-(https://harivan.sh/thoughts/isolated-long-running-agents-with-kubernetes)
+## Notes / Current Limits
+
+- `storage.type` currently must be `efs`.
+- `kubectl` must be installed (used by `init` and `ssh`).
+- `compute.type: fargate` is validated, but this repo currently renders templates for the Karpenter path.
+- No Go tests are present yet (`go test ./...` reports no test files).
diff --git a/agentikube b/agentikube
deleted file mode 100755
index 83f4562..0000000
Binary files a/agentikube and /dev/null differ
diff --git a/chart/agentikube/Chart.yaml b/chart/agentikube/Chart.yaml
deleted file mode 100644
index 293a85d..0000000
--- a/chart/agentikube/Chart.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: v2
-name: agentikube
-description: Isolated agent sandboxes on Kubernetes
-type: application
-version: 0.1.0
-appVersion: "0.1.0"
-keywords:
- - sandbox
- - agents
- - kubernetes
- - karpenter
- - efs
diff --git a/chart/agentikube/crds/.gitkeep b/chart/agentikube/crds/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/chart/agentikube/templates/NOTES.txt b/chart/agentikube/templates/NOTES.txt
deleted file mode 100644
index 1fcd08a..0000000
--- a/chart/agentikube/templates/NOTES.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-agentikube has been installed in namespace {{ .Release.Namespace }}.
-
-Resources created:
- - StorageClass: efs-sandbox (EFS filesystem: {{ .Values.storage.filesystemId }})
- - SandboxTemplate: sandbox-template
-{{- if .Values.sandbox.warmPool.enabled }}
- - SandboxWarmPool: sandbox-warm-pool ({{ .Values.sandbox.warmPool.size }} replicas)
-{{- end }}
-{{- if eq .Values.compute.type "karpenter" }}
- - NodePool: sandbox-pool
- - EC2NodeClass: sandbox-nodes
-{{- end }}
- - NetworkPolicy: sandbox-network-policy
-
-To create a sandbox:
- agentikube create --provider --api-key
-
-To list sandboxes:
- agentikube list
-
-To SSH into a sandbox:
- agentikube ssh
-
-To destroy a sandbox:
- agentikube destroy
diff --git a/chart/agentikube/templates/_helpers.tpl b/chart/agentikube/templates/_helpers.tpl
deleted file mode 100644
index c5210b9..0000000
--- a/chart/agentikube/templates/_helpers.tpl
+++ /dev/null
@@ -1,42 +0,0 @@
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "agentikube.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-*/}}
-{{- define "agentikube.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "agentikube.labels" -}}
-helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{ include "agentikube.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "agentikube.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "agentikube.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
diff --git a/chart/agentikube/templates/karpenter-ec2nodeclass.yaml b/chart/agentikube/templates/karpenter-ec2nodeclass.yaml
deleted file mode 100644
index b2f4959..0000000
--- a/chart/agentikube/templates/karpenter-ec2nodeclass.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-{{- if eq .Values.compute.type "karpenter" }}
-apiVersion: karpenter.k8s.aws/v1
-kind: EC2NodeClass
-metadata:
- name: sandbox-nodes
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-spec:
- amiSelectorTerms:
- - alias: "al2023@latest"
- subnetSelectorTerms:
- - tags:
- karpenter.sh/discovery: {{ required "compute.clusterName is required for Karpenter" .Values.compute.clusterName | quote }}
- securityGroupSelectorTerms:
- - tags:
- karpenter.sh/discovery: {{ .Values.compute.clusterName | quote }}
- role: {{ printf "KarpenterNodeRole-%s" .Values.compute.clusterName | quote }}
-{{- end }}
diff --git a/chart/agentikube/templates/karpenter-nodepool.yaml b/chart/agentikube/templates/karpenter-nodepool.yaml
deleted file mode 100644
index 4d55cb4..0000000
--- a/chart/agentikube/templates/karpenter-nodepool.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-{{- if eq .Values.compute.type "karpenter" }}
-apiVersion: karpenter.sh/v1
-kind: NodePool
-metadata:
- name: sandbox-pool
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-spec:
- template:
- spec:
- requirements:
- - key: node.kubernetes.io/instance-type
- operator: In
- values:
- {{- range .Values.compute.instanceTypes }}
- - {{ . }}
- {{- end }}
- - key: karpenter.sh/capacity-type
- operator: In
- values:
- {{- range .Values.compute.capacityTypes }}
- - {{ . }}
- {{- end }}
- - key: kubernetes.io/arch
- operator: In
- values:
- - amd64
- nodeClassRef:
- name: sandbox-nodes
- group: karpenter.k8s.aws
- kind: EC2NodeClass
- limits:
- cpu: {{ .Values.compute.maxCpu }}
- memory: {{ .Values.compute.maxMemory }}
- disruption:
- consolidationPolicy: {{ if .Values.compute.consolidation }}WhenEmptyOrUnderutilized{{ else }}WhenEmpty{{ end }}
-{{- end }}
diff --git a/chart/agentikube/templates/networkpolicy.yaml b/chart/agentikube/templates/networkpolicy.yaml
deleted file mode 100644
index fbad38e..0000000
--- a/chart/agentikube/templates/networkpolicy.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
- name: sandbox-network-policy
- namespace: {{ .Release.Namespace }}
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-spec:
- podSelector:
- matchLabels:
- app.kubernetes.io/name: sandbox
- policyTypes:
- - Ingress
- {{- if .Values.sandbox.networkPolicy.egressAllowAll }}
- - Egress
- {{- end }}
- {{- if .Values.sandbox.networkPolicy.egressAllowAll }}
- egress:
- - to:
- - ipBlock:
- cidr: 0.0.0.0/0
- {{- end }}
- ingress:
- {{- range .Values.sandbox.networkPolicy.ingressPorts }}
- - ports:
- - port: {{ . }}
- protocol: TCP
- {{- end }}
diff --git a/chart/agentikube/templates/sandbox-template.yaml b/chart/agentikube/templates/sandbox-template.yaml
deleted file mode 100644
index 2c61361..0000000
--- a/chart/agentikube/templates/sandbox-template.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: extensions.agents.x-k8s.io/v1alpha1
-kind: SandboxTemplate
-metadata:
- name: sandbox-template
- namespace: {{ .Release.Namespace }}
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-spec:
- template:
- spec:
- containers:
- - name: sandbox
- image: {{ required "sandbox.image is required" .Values.sandbox.image }}
- ports:
- {{- range .Values.sandbox.ports }}
- - containerPort: {{ . }}
- {{- end }}
- resources:
- requests:
- cpu: {{ .Values.sandbox.resources.requests.cpu }}
- memory: {{ .Values.sandbox.resources.requests.memory }}
- limits:
- cpu: {{ .Values.sandbox.resources.limits.cpu | quote }}
- memory: {{ .Values.sandbox.resources.limits.memory }}
- securityContext:
- runAsUser: {{ .Values.sandbox.securityContext.runAsUser }}
- runAsGroup: {{ .Values.sandbox.securityContext.runAsGroup }}
- runAsNonRoot: {{ .Values.sandbox.securityContext.runAsNonRoot }}
- {{- if .Values.sandbox.env }}
- env:
- {{- range $key, $value := .Values.sandbox.env }}
- - name: {{ $key }}
- value: {{ $value | quote }}
- {{- end }}
- {{- end }}
- startupProbe:
- tcpSocket:
- port: {{ .Values.sandbox.probes.port }}
- failureThreshold: {{ .Values.sandbox.probes.startupFailureThreshold }}
- periodSeconds: 10
- readinessProbe:
- tcpSocket:
- port: {{ .Values.sandbox.probes.port }}
- periodSeconds: 10
- volumeMounts:
- - name: workspace
- mountPath: {{ .Values.sandbox.mountPath }}
- volumeClaimTemplates:
- - metadata:
- name: workspace
- spec:
- accessModes:
- - ReadWriteMany
- storageClassName: efs-sandbox
- resources:
- requests:
- storage: "10Gi"
diff --git a/chart/agentikube/templates/storageclass-efs.yaml b/chart/agentikube/templates/storageclass-efs.yaml
deleted file mode 100644
index 8a9c2ff..0000000
--- a/chart/agentikube/templates/storageclass-efs.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: efs-sandbox
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-provisioner: efs.csi.aws.com
-parameters:
- provisioningMode: efs-ap
- fileSystemId: {{ required "storage.filesystemId is required" .Values.storage.filesystemId }}
- directoryPerms: "755"
- uid: {{ .Values.storage.uid | quote }}
- gid: {{ .Values.storage.gid | quote }}
- basePath: {{ .Values.storage.basePath }}
-reclaimPolicy: {{ .Values.storage.reclaimPolicy }}
-volumeBindingMode: Immediate
diff --git a/chart/agentikube/templates/warm-pool.yaml b/chart/agentikube/templates/warm-pool.yaml
deleted file mode 100644
index 52f726f..0000000
--- a/chart/agentikube/templates/warm-pool.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{{- if .Values.sandbox.warmPool.enabled }}
-apiVersion: extensions.agents.x-k8s.io/v1alpha1
-kind: SandboxWarmPool
-metadata:
- name: sandbox-warm-pool
- namespace: {{ .Release.Namespace }}
- labels:
- {{- include "agentikube.labels" . | nindent 4 }}
-spec:
- templateRef:
- name: sandbox-template
- replicas: {{ .Values.sandbox.warmPool.size }}
- ttlMinutes: {{ .Values.sandbox.warmPool.ttlMinutes }}
-{{- end }}
diff --git a/chart/agentikube/values.yaml b/chart/agentikube/values.yaml
deleted file mode 100644
index f40ece8..0000000
--- a/chart/agentikube/values.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Compute configuration for sandbox nodes
-compute:
- # karpenter or fargate
- type: karpenter
- instanceTypes:
- - m6i.xlarge
- - m5.xlarge
- - r6i.xlarge
- capacityTypes:
- - spot
- - on-demand
- maxCpu: 2000
- maxMemory: 8000Gi
- consolidation: true
- # EKS cluster name - used for Karpenter subnet/SG/role discovery
- clusterName: ""
-
-# Persistent storage configuration
-storage:
- # efs is the only supported type
- type: efs
- # REQUIRED - your EFS filesystem ID
- filesystemId: ""
- basePath: /sandboxes
- uid: 1000
- gid: 1000
- reclaimPolicy: Retain
-
-# Sandbox pod configuration
-sandbox:
- # REQUIRED - container image for sandbox pods
- image: ""
- ports:
- - 18789
- - 2222
- - 3000
- - 5173
- - 8080
- mountPath: /home/node/.openclaw
- resources:
- requests:
- cpu: 50m
- memory: 512Mi
- limits:
- cpu: "2"
- memory: 4Gi
- env: {}
- securityContext:
- runAsUser: 1000
- runAsGroup: 1000
- runAsNonRoot: true
- probes:
- port: 18789
- startupFailureThreshold: 30
- warmPool:
- enabled: true
- size: 5
- ttlMinutes: 120
- networkPolicy:
- egressAllowAll: true
- ingressPorts:
- - 18789
- - 2222
- - 3000
- - 5173
- - 8080
diff --git a/chart/agentikube_test.go b/chart/agentikube_test.go
deleted file mode 100644
index 9854572..0000000
--- a/chart/agentikube_test.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package chart_test
-
-import (
- "os"
- "os/exec"
- "strings"
- "testing"
-)
-
-// helmTemplate runs helm template with the given extra args and returns stdout.
-func helmTemplate(t *testing.T, extraArgs ...string) string {
- t.Helper()
- args := []string{
- "template", "agentikube", "chart/agentikube/",
- "--namespace", "sandboxes",
- "--set", "storage.filesystemId=fs-test",
- "--set", "sandbox.image=test:latest",
- "--set", "compute.clusterName=test-cluster",
- }
- args = append(args, extraArgs...)
- cmd := exec.Command("helm", args...)
- cmd.Dir = repoRoot(t)
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("helm template failed: %v\n%s", err, out)
- }
- return string(out)
-}
-
-func repoRoot(t *testing.T) string {
- t.Helper()
- dir, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
- // This test file lives at chart/agentikube_test.go, so repo root is ..
- return dir + "/.."
-}
-
-func TestHelmLint(t *testing.T) {
- cmd := exec.Command("helm", "lint", "chart/agentikube/")
- cmd.Dir = repoRoot(t)
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("helm lint failed: %v\n%s", err, out)
- }
- if !strings.Contains(string(out), "0 chart(s) failed") {
- t.Fatalf("helm lint reported failures:\n%s", out)
- }
-}
-
-func TestHelmTemplateDefaultValues(t *testing.T) {
- output := helmTemplate(t)
-
- expected := []string{
- "kind: StorageClass",
- "kind: SandboxTemplate",
- "kind: SandboxWarmPool",
- "kind: NodePool",
- "kind: EC2NodeClass",
- "kind: NetworkPolicy",
- }
- for _, want := range expected {
- if !strings.Contains(output, want) {
- t.Errorf("expected %q in rendered output", want)
- }
- }
-}
-
-func TestHelmTemplateLabels(t *testing.T) {
- output := helmTemplate(t)
-
- labels := []string{
- "helm.sh/chart: agentikube-0.1.0",
- "app.kubernetes.io/name: agentikube",
- "app.kubernetes.io/instance: agentikube",
- "app.kubernetes.io/managed-by: Helm",
- `app.kubernetes.io/version: "0.1.0"`,
- }
- for _, label := range labels {
- if !strings.Contains(output, label) {
- t.Errorf("expected label %q in rendered output", label)
- }
- }
-}
-
-func TestHelmTemplateKarpenterDisabled(t *testing.T) {
- output := helmTemplate(t, "--set", "compute.type=fargate")
-
- if strings.Contains(output, "kind: NodePool") {
- t.Error("NodePool should not be rendered when compute.type=fargate")
- }
- if strings.Contains(output, "kind: EC2NodeClass") {
- t.Error("EC2NodeClass should not be rendered when compute.type=fargate")
- }
- if !strings.Contains(output, "kind: StorageClass") {
- t.Error("StorageClass should always be rendered")
- }
- if !strings.Contains(output, "kind: SandboxTemplate") {
- t.Error("SandboxTemplate should always be rendered")
- }
-}
-
-func TestHelmTemplateWarmPoolDisabled(t *testing.T) {
- output := helmTemplate(t, "--set", "sandbox.warmPool.enabled=false")
-
- if strings.Contains(output, "kind: SandboxWarmPool") {
- t.Error("SandboxWarmPool should not be rendered when warmPool.enabled=false")
- }
- if !strings.Contains(output, "kind: SandboxTemplate") {
- t.Error("SandboxTemplate should always be rendered")
- }
-}
-
-func TestHelmTemplateEgressDisabled(t *testing.T) {
- output := helmTemplate(t,
- "--set", "sandbox.networkPolicy.egressAllowAll=false",
- "-s", "templates/networkpolicy.yaml",
- )
-
- if strings.Contains(output, "0.0.0.0/0") {
- t.Error("egress CIDR should not appear when egressAllowAll=false")
- }
- lines := strings.Split(output, "\n")
- for i, line := range lines {
- if strings.Contains(line, "policyTypes:") {
- block := strings.Join(lines[i:min(i+4, len(lines))], "\n")
- if strings.Contains(block, "Egress") {
- t.Error("Egress should not be in policyTypes when egressAllowAll=false")
- }
- }
- }
-}
-
-func TestHelmTemplateRequiredValues(t *testing.T) {
- tests := []struct {
- name string
- args []string
- wantErr string
- }{
- {
- name: "missing filesystemId",
- args: []string{"--set", "sandbox.image=test:latest", "--set", "compute.clusterName=test"},
- wantErr: "storage.filesystemId is required",
- },
- {
- name: "missing sandbox image",
- args: []string{"--set", "storage.filesystemId=fs-test", "--set", "compute.clusterName=test"},
- wantErr: "sandbox.image is required",
- },
- {
- name: "missing clusterName for karpenter",
- args: []string{"--set", "storage.filesystemId=fs-test", "--set", "sandbox.image=test:latest"},
- wantErr: "compute.clusterName is required for Karpenter",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- args := append([]string{
- "template", "agentikube", "chart/agentikube/",
- "--namespace", "sandboxes",
- }, tt.args...)
- cmd := exec.Command("helm", args...)
- cmd.Dir = repoRoot(t)
- out, err := cmd.CombinedOutput()
- if err == nil {
- t.Fatal("expected helm template to fail for missing required value")
- }
- if !strings.Contains(string(out), tt.wantErr) {
- t.Errorf("expected error containing %q, got:\n%s", tt.wantErr, out)
- }
- })
- }
-}
-
-func TestHelmTemplateEnvVars(t *testing.T) {
- output := helmTemplate(t,
- "--set", "sandbox.env.MY_VAR=my-value",
- "-s", "templates/sandbox-template.yaml",
- )
-
- if !strings.Contains(output, "MY_VAR") {
- t.Error("expected MY_VAR in rendered env")
- }
- if !strings.Contains(output, "my-value") {
- t.Error("expected my-value in rendered env")
- }
-}
-
-func TestHelmTemplateNoEnvWhenEmpty(t *testing.T) {
- output := helmTemplate(t, "-s", "templates/sandbox-template.yaml")
-
- lines := strings.Split(output, "\n")
- for _, line := range lines {
- trimmed := strings.TrimSpace(line)
- if trimmed == "env:" {
- t.Error("env: block should not appear when sandbox.env is empty")
- }
- }
-}
-
-func TestHelmTemplateNamespace(t *testing.T) {
- output := helmTemplate(t, "--namespace", "custom-ns")
-
- if !strings.Contains(output, "namespace: custom-ns") {
- t.Error("expected namespace: custom-ns in rendered output")
- }
-}
-
-func TestHelmTemplateConsolidationDisabled(t *testing.T) {
- output := helmTemplate(t,
- "--set", "compute.consolidation=false",
- "-s", "templates/karpenter-nodepool.yaml",
- )
-
- if !strings.Contains(output, "consolidationPolicy: WhenEmpty") {
- t.Error("expected consolidationPolicy: WhenEmpty when consolidation=false")
- }
- if strings.Contains(output, "WhenEmptyOrUnderutilized") {
- t.Error("should not have WhenEmptyOrUnderutilized when consolidation=false")
- }
-}
diff --git a/internal/commands/create.go b/internal/commands/create.go
index 5274594..1f38d21 100644
--- a/internal/commands/create.go
+++ b/internal/commands/create.go
@@ -9,6 +9,7 @@ import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewCreateCmd() *cobra.Command {
@@ -54,7 +55,7 @@ func NewCreateCmd() *cobra.Command {
},
}
- secretGVR := coreGVR("secrets")
+ secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
_, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("creating secret %q: %w", name, err)
@@ -64,7 +65,7 @@ func NewCreateCmd() *cobra.Command {
// Create the SandboxClaim
claim := &unstructured.Unstructured{
Object: map[string]interface{}{
- "apiVersion": "extensions.agents.x-k8s.io/v1alpha1",
+ "apiVersion": "agentsandbox.dev/v1",
"kind": "SandboxClaim",
"metadata": map[string]interface{}{
"name": name,
@@ -81,7 +82,12 @@ func NewCreateCmd() *cobra.Command {
},
}
- _, err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
+ claimGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxclaims",
+ }
+ _, err = client.Dynamic().Resource(claimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("creating SandboxClaim %q: %w", name, err)
}
@@ -92,7 +98,7 @@ func NewCreateCmd() *cobra.Command {
waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()
- if err := client.WaitForReady(waitCtx, ns, sandboxClaimGVR, name); err != nil {
+ if err := client.WaitForReady(waitCtx, ns, "sandboxclaims", name); err != nil {
return fmt.Errorf("waiting for sandbox: %w", err)
}
diff --git a/internal/commands/destroy.go b/internal/commands/destroy.go
index c235e19..0175529 100644
--- a/internal/commands/destroy.go
+++ b/internal/commands/destroy.go
@@ -9,8 +9,9 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
- "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/api/errors"
)
func NewDestroyCmd() *cobra.Command {
@@ -49,11 +50,17 @@ func NewDestroyCmd() *cobra.Command {
ns := cfg.Namespace
name := "sandbox-" + handle
- secretGVR := coreGVR("secrets")
- pvcGVR := coreGVR("persistentvolumeclaims")
+ claimGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxclaims",
+ }
+
+ secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
+ pvcGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
// Delete SandboxClaim
- err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
+ err = client.Dynamic().Resource(claimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("deleting SandboxClaim %q: %w", name, err)
}
diff --git a/internal/commands/down.go b/internal/commands/down.go
index d5c765f..dada100 100644
--- a/internal/commands/down.go
+++ b/internal/commands/down.go
@@ -7,6 +7,7 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewDownCmd() *cobra.Command {
@@ -29,14 +30,26 @@ func NewDownCmd() *cobra.Command {
ns := cfg.Namespace
- err = client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
+ warmPoolGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxwarmpools",
+ }
+
+ templateGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxtemplates",
+ }
+
+ err = client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
if err != nil {
fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err)
} else {
fmt.Println("[ok] SandboxWarmPool deleted")
}
- err = client.Dynamic().Resource(sandboxTemplateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
+ err = client.Dynamic().Resource(templateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
if err != nil {
fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err)
} else {
diff --git a/internal/commands/helpers.go b/internal/commands/helpers.go
index ad7cbfc..c7d1093 100644
--- a/internal/commands/helpers.go
+++ b/internal/commands/helpers.go
@@ -3,31 +3,8 @@ package commands
import (
"github.com/rathi/agentikube/internal/config"
"github.com/spf13/cobra"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
-var (
- sandboxClaimGVR = schema.GroupVersionResource{
- Group: "extensions.agents.x-k8s.io",
- Version: "v1alpha1",
- Resource: "sandboxclaims",
- }
- sandboxTemplateGVR = schema.GroupVersionResource{
- Group: "extensions.agents.x-k8s.io",
- Version: "v1alpha1",
- Resource: "sandboxtemplates",
- }
- sandboxWarmPoolGVR = schema.GroupVersionResource{
- Group: "extensions.agents.x-k8s.io",
- Version: "v1alpha1",
- Resource: "sandboxwarmpools",
- }
-)
-
-func coreGVR(resource string) schema.GroupVersionResource {
- return schema.GroupVersionResource{Group: "", Version: "v1", Resource: resource}
-}
-
func loadConfig(cmd *cobra.Command) (*config.Config, error) {
cfgPath, _ := cmd.Flags().GetString("config")
return config.Load(cfgPath)
diff --git a/internal/commands/list.go b/internal/commands/list.go
index fcbe996..92605ce 100644
--- a/internal/commands/list.go
+++ b/internal/commands/list.go
@@ -10,6 +10,7 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewListCmd() *cobra.Command {
@@ -30,7 +31,13 @@ func NewListCmd() *cobra.Command {
return fmt.Errorf("connecting to cluster: %w", err)
}
- list, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
+ claimGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxclaims",
+ }
+
+ list, err := client.Dynamic().Resource(claimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("listing SandboxClaims: %w", err)
}
@@ -107,7 +114,7 @@ func extractPodName(obj map[string]interface{}) string {
if ok {
annotations, ok := metadata["annotations"].(map[string]interface{})
if ok {
- if podName, ok := annotations["agents.x-k8s.io/pod-name"].(string); ok {
+ if podName, ok := annotations["agentsandbox.dev/pod-name"].(string); ok {
return podName
}
}
diff --git a/internal/commands/ssh.go b/internal/commands/ssh.go
index 5e9b9f0..c52d1b1 100644
--- a/internal/commands/ssh.go
+++ b/internal/commands/ssh.go
@@ -7,6 +7,7 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewSSHCmd() *cobra.Command {
@@ -32,7 +33,13 @@ func NewSSHCmd() *cobra.Command {
ns := cfg.Namespace
name := "sandbox-" + handle
- claim, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
+ claimGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxclaims",
+ }
+
+ claim, err := client.Dynamic().Resource(claimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("getting SandboxClaim %q: %w", name, err)
}
diff --git a/internal/commands/status.go b/internal/commands/status.go
index b870ad0..c87c49f 100644
--- a/internal/commands/status.go
+++ b/internal/commands/status.go
@@ -7,6 +7,7 @@ import (
"github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
func NewStatusCmd() *cobra.Command {
@@ -30,7 +31,13 @@ func NewStatusCmd() *cobra.Command {
ns := cfg.Namespace
// Warm pool status
- wp, err := client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
+ warmPoolGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxwarmpools",
+ }
+
+ wp, err := client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
if err != nil {
fmt.Printf("warm pool: not found (%v)\n", err)
} else {
@@ -48,7 +55,13 @@ func NewStatusCmd() *cobra.Command {
}
// Sandbox count
- claims, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
+ claimGVR := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: "sandboxclaims",
+ }
+
+ claims, err := client.Dynamic().Resource(claimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
if err != nil {
fmt.Printf("\nsandboxes: error listing (%v)\n", err)
} else {
diff --git a/internal/commands/up.go b/internal/commands/up.go
index 70a3a01..ab95a3e 100644
--- a/internal/commands/up.go
+++ b/internal/commands/up.go
@@ -46,7 +46,7 @@ func NewUpCmd() *cobra.Command {
if cfg.Sandbox.WarmPool.Enabled {
fmt.Println("waiting for warm pool to become ready...")
- if err := client.WaitForReady(ctx, cfg.Namespace, sandboxWarmPoolGVR, "sandbox-warm-pool"); err != nil {
+ if err := client.WaitForReady(ctx, cfg.Namespace, "sandboxwarmpools", "sandbox-warm-pool"); err != nil {
return fmt.Errorf("waiting for warm pool: %w", err)
}
fmt.Println("[ok] warm pool ready")
diff --git a/internal/kube/wait.go b/internal/kube/wait.go
index 48691d1..4d31ee3 100644
--- a/internal/kube/wait.go
+++ b/internal/kube/wait.go
@@ -10,27 +10,34 @@ import (
"k8s.io/apimachinery/pkg/watch"
)
-// WaitForReady watches a resource until its Ready condition becomes True
-// or the context is cancelled/times out.
-func (c *Client) WaitForReady(ctx context.Context, namespace string, gvr schema.GroupVersionResource, name string) error {
+// WaitForReady watches a resource in the agentsandbox.dev/v1 group until its
+// Ready condition becomes True or the context is cancelled/times out.
+// The resource parameter is the plural resource name (e.g. "sandboxclaims", "sandboxwarmpools").
+func (c *Client) WaitForReady(ctx context.Context, namespace, resource, name string) error {
+ gvr := schema.GroupVersionResource{
+ Group: "agentsandbox.dev",
+ Version: "v1",
+ Resource: resource,
+ }
+
watcher, err := c.Dynamic().Resource(gvr).Namespace(namespace).Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
})
if err != nil {
- return fmt.Errorf("watching %s %s/%s: %w", gvr.Resource, namespace, name, err)
+ return fmt.Errorf("watching %s %s/%s: %w", resource, namespace, name, err)
}
defer watcher.Stop()
for {
select {
case <-ctx.Done():
- return fmt.Errorf("timed out waiting for %s %s/%s to become ready", gvr.Resource, namespace, name)
+ return fmt.Errorf("timed out waiting for %s %s/%s to become ready", resource, namespace, name)
case event, ok := <-watcher.ResultChan():
if !ok {
- return fmt.Errorf("watch channel closed for %s %s/%s", gvr.Resource, namespace, name)
+ return fmt.Errorf("watch channel closed for %s %s/%s", resource, namespace, name)
}
if event.Type == watch.Error {
- return fmt.Errorf("watch error for %s %s/%s", gvr.Resource, namespace, name)
+ return fmt.Errorf("watch error for %s %s/%s", resource, namespace, name)
}
obj, ok := event.Object.(*unstructured.Unstructured)
diff --git a/internal/manifest/templates/sandbox-template.yaml.tmpl b/internal/manifest/templates/sandbox-template.yaml.tmpl
index 848c8a3..a968a63 100644
--- a/internal/manifest/templates/sandbox-template.yaml.tmpl
+++ b/internal/manifest/templates/sandbox-template.yaml.tmpl
@@ -1,4 +1,4 @@
-apiVersion: extensions.agents.x-k8s.io/v1alpha1
+apiVersion: agentsandbox.dev/v1
kind: SandboxTemplate
metadata:
name: sandbox-template
diff --git a/internal/manifest/templates/warm-pool.yaml.tmpl b/internal/manifest/templates/warm-pool.yaml.tmpl
index 24c16d9..d030490 100644
--- a/internal/manifest/templates/warm-pool.yaml.tmpl
+++ b/internal/manifest/templates/warm-pool.yaml.tmpl
@@ -1,4 +1,4 @@
-apiVersion: extensions.agents.x-k8s.io/v1alpha1
+apiVersion: agentsandbox.dev/v1
kind: SandboxWarmPool
metadata:
name: sandbox-warm-pool
diff --git a/scripts/download-crds.sh b/scripts/download-crds.sh
deleted file mode 100755
index f4090be..0000000
--- a/scripts/download-crds.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-# Download agent-sandbox CRDs into chart/agentikube/crds/
-# Run this before packaging the chart: make crds
-
-REPO="kubernetes-sigs/agent-sandbox"
-BRANCH="main"
-BASE_URL="https://raw.githubusercontent.com/${REPO}/${BRANCH}/k8s/crds"
-DEST="$(cd "$(dirname "$0")/.." && pwd)/chart/agentikube/crds"
-
-CRDS=(
- sandboxtemplates.yaml
- sandboxclaims.yaml
- sandboxwarmpools.yaml
-)
-
-echo "Downloading CRDs from ${REPO}@${BRANCH} ..."
-mkdir -p "$DEST"
-
-for crd in "${CRDS[@]}"; do
- echo " ${crd}"
- curl -sSfL "${BASE_URL}/${crd}" -o "${DEST}/${crd}"
-done
-
-echo "CRDs written to ${DEST}"