Compare commits

..

13 commits
v0.1.3 ... main

Author SHA1 Message Date
Hari
af0a16382f
Fix blog post link in README
Updated the blog post link for isolated long-running agents.
2026-03-27 10:10:29 -04:00
351bf2892f helm was getting gitignored 2026-02-07 20:22:45 -05:00
081739b9a3 help release fix 2026-02-07 20:13:09 -05:00
165ac465e7 readme 2026-02-07 20:09:23 -05:00
305c77eec9 helm release 2026-02-07 20:08:49 -05:00
44fe1e1f5c helm install 2026-02-07 20:05:55 -05:00
Hari
f3abdfe7b8
Remove context section from README.md 2026-02-07 19:30:40 -05:00
9b7c1c0054 readme 2026-02-07 19:29:53 -05:00
3fef4c03ce update 2026-02-07 19:23:21 -05:00
Hari
3c6848e6d3
Replace image with higher resolution version
Updated image in README to a higher resolution.
2026-02-07 15:33:50 -05:00
Hari
acea3e2351
Update README.md 2026-02-07 15:21:03 -05:00
70f02c616f readme 2026-02-07 15:20:06 -05:00
e5d7b7a7c5 update 2026-02-07 14:39:14 -05:00
30 changed files with 719 additions and 239 deletions

View file

@ -20,8 +20,22 @@ jobs:
with: with:
go-version-file: go.mod go-version-file: go.mod
- name: Setup Helm
uses: azure/setup-helm@v4
- name: Build - name: Build
run: go build ./... run: go build ./...
- name: Test - name: Test
run: go test ./... run: go test ./...
- name: Helm lint
run: helm lint chart/agentikube/
- name: Helm template
run: |
helm template agentikube chart/agentikube/ \
--namespace sandboxes \
--set storage.filesystemId=fs-test \
--set sandbox.image=test:latest \
--set compute.clusterName=test-cluster

View file

@ -9,6 +9,7 @@ on:
permissions: permissions:
contents: write contents: write
packages: write
concurrency: concurrency:
group: release-${{ github.ref }} group: release-${{ github.ref }}
@ -29,6 +30,9 @@ jobs:
with: with:
go-version-file: go.mod go-version-file: go.mod
- name: Setup Helm
uses: azure/setup-helm@v4
- name: Compute next version - name: Compute next version
id: version id: version
shell: bash shell: bash
@ -93,3 +97,17 @@ jobs:
name: ${{ steps.version.outputs.next_tag }} name: ${{ steps.version.outputs.next_tag }}
generate_release_notes: true generate_release_notes: true
files: dist/* files: dist/*
- name: Set chart version
run: |
sed -i "s/^version:.*/version: ${{ steps.version.outputs.version }}/" chart/agentikube/Chart.yaml
sed -i "s/^appVersion:.*/appVersion: \"${{ steps.version.outputs.version }}\"/" chart/agentikube/Chart.yaml
- name: Package Helm chart
run: helm package chart/agentikube/ --destination .helm-pkg
- name: Log in to GHCR
run: echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Push Helm chart to GHCR
run: helm push .helm-pkg/agentikube-${{ steps.version.outputs.version }}.tgz oci://ghcr.io/${{ github.repository_owner }}

1
.gitignore vendored
View file

@ -1,2 +1 @@
.cache .cache
agentikube

View file

@ -1,4 +1,4 @@
.PHONY: build install clean fmt vet lint .PHONY: build install clean fmt vet lint crds helm-lint helm-template
build: build:
go build -o agentikube ./cmd/agentikube go build -o agentikube ./cmd/agentikube
@ -16,3 +16,16 @@ vet:
go vet ./... go vet ./...
lint: fmt vet lint: fmt vet
crds:
./scripts/download-crds.sh
helm-lint:
helm lint chart/agentikube/
helm-template:
helm template agentikube chart/agentikube/ \
--namespace sandboxes \
--set storage.filesystemId=fs-test \
--set sandbox.image=test:latest \
--set compute.clusterName=test-cluster

213
README.md
View file

@ -1,176 +1,85 @@
# agentikube # agentikube
[![Go Version](https://img.shields.io/github/go-mod/go-version/harivansh-afk/agentikube)](https://github.com/harivansh-afk/agentikube/blob/main/go.mod) [![Go Version](https://img.shields.io/github/go-mod/go-version/harivansh-afk/agentikube)](https://github.com/harivansh-afk/agentikube/blob/main/go.mod)
[![Code Size](https://img.shields.io/github/languages/code-size/harivansh-afk/agentikube)](https://github.com/harivansh-afk/agentikube) [![Helm Version](https://img.shields.io/badge/helm%20chart-0.1.0-blue)](https://github.com/harivansh-afk/agentikube/tree/main/chart/agentikube)
[![Release](https://github.com/harivansh-afk/agentikube/actions/workflows/release.yml/badge.svg)](https://github.com/harivansh-afk/agentikube/actions/workflows/release.yml) [![Release](https://img.shields.io/github/v/release/harivansh-afk/agentikube)](https://github.com/harivansh-afk/agentikube/releases/latest)
This repo is a small Go CLI for running isolated agent sandboxes on Kubernetes. Isolated stateful agent sandboxes on Kubernetes
The main job of `agentikube` is: <img width="1023" height="745" alt="image" src="https://github.com/user-attachments/assets/d62b6d99-b6bf-4ac3-9fb3-9b8373afbbec" />
- set up shared sandbox infra (`init`, `up`)
- create one sandbox per user/handle (`create`)
- let you inspect and access sandboxes (`list`, `status`, `ssh`)
- clean up sandboxes or shared infra (`destroy`, `down`)
It is built for AWS-style setups (EFS + optional Karpenter). ## Install
## What This Stands Up
When you run `up`, it renders and applies Kubernetes manifests from templates.
Core resources:
- `Namespace`
- `StorageClass` (`efs-sandbox`, provisioner `efs.csi.aws.com`)
- `SandboxTemplate` (`sandbox-template`)
Optional resources:
- `SandboxWarmPool` (if `sandbox.warmPool.enabled: true`)
- `NodePool` + `EC2NodeClass` (if `compute.type: karpenter`)
When you run `create <handle>`, it creates:
- `Secret` (`sandbox-<handle>`) with provider credentials
- `SandboxClaim` (`sandbox-<handle>`) that points to `sandbox-template`
- PVC from template (`workspace` volume claim template)
## Filesystem
```text
cmd/agentikube/main.go # CLI entrypoint + subcommand wiring
internal/config/ # config structs + validation/defaults
internal/manifest/ # template rendering
internal/manifest/templates/ # Kubernetes YAML templates
internal/kube/ # kube client, apply, wait, exec helpers
internal/commands/ # command implementations
agentikube.example.yaml # example config you copy to agentikube.yaml
Makefile # build/install/fmt/vet targets
```
## How It Works (Simple Flow)
```mermaid
flowchart TD
A[agentikube command] --> B[Load agentikube.yaml]
B --> C[Validate config + apply defaults]
C --> D{Command}
D -->|init| E[Install CRDs + check prereqs + ensure namespace]
D -->|up| F[Render templates -> server-side apply]
D -->|create| G[Create Secret + SandboxClaim]
G --> H[Watch SandboxClaim until Ready]
D -->|list/status| I[Read SandboxClaim/WarmPool state]
D -->|ssh| J[Resolve pod name -> kubectl exec -it]
D -->|destroy| K[Delete SandboxClaim + Secret + best-effort PVC]
D -->|down| L[Delete warm pool + template, keep user sandboxes]
```
## Resource Diagram (Abilities + Resources)
```mermaid
flowchart LR
CLI[agentikube CLI] --> K8S[Kubernetes API]
CLI --> KUBECTL[kubectl binary]
K8S --> NS[Namespace]
K8S --> SC[StorageClass efs-sandbox]
K8S --> ST[SandboxTemplate]
K8S --> WP[SandboxWarmPool]
K8S --> NP[NodePool]
K8S --> ENC[EC2NodeClass]
K8S --> CLAIM[SandboxClaim per user]
K8S --> SECRET[Secret per user]
CLAIM --> POD[Sandbox Pod]
POD --> PVC[Workspace PVC]
PVC --> SC
SC --> EFS[(AWS EFS)]
NP --> EC2[(EC2 nodes via Karpenter)]
ENC --> EC2
```
## Commands
- `agentikube init`
Installs agent-sandbox CRDs, checks for EFS CSI/Karpenter, and ensures namespace exists.
- `agentikube up [--dry-run]`
Renders manifests and applies them with server-side apply. `--dry-run` prints YAML only.
- `agentikube create <handle> --provider <name> --api-key <key>`
Creates per-user Secret + SandboxClaim and waits (up to 3 minutes) for Ready.
- `agentikube list`
Shows handle, status, age, and pod name for all sandbox claims.
- `agentikube ssh <handle>`
Finds the sandbox pod and opens `/bin/sh` using `kubectl exec -it`.
- `agentikube destroy <handle> [--yes]`
Deletes SandboxClaim + Secret + best-effort PVC for that handle.
- `agentikube down`
Deletes shared warm pool/template infra but preserves existing user sandboxes.
- `agentikube status`
Prints warm pool numbers, sandbox count, and Karpenter node count (if enabled).
## Quick Start
1. Copy config:
```bash ```bash
cp agentikube.example.yaml agentikube.yaml helm install agentikube oci://ghcr.io/harivansh-afk/agentikube \
-n sandboxes --create-namespace \
-f my-values.yaml
``` ```
2. Fill your values in `agentikube.yaml`: Create a `my-values.yaml` with your cluster details:
- namespace
- EFS filesystem ID / base path
- sandbox image
- compute settings
3. Run: ```yaml
compute:
clusterName: my-eks-cluster
storage:
filesystemId: fs-0123456789abcdef0
sandbox:
image: my-registry/sandbox:latest
```
See [`values.yaml`](chart/agentikube/values.yaml) for all options.
## CLI
The Go CLI handles runtime operations that are inherently imperative:
```bash ```bash
agentikube init
agentikube up
agentikube create demo --provider openai --api-key <key> agentikube create demo --provider openai --api-key <key>
agentikube list agentikube list
agentikube ssh demo agentikube ssh demo
agentikube status
agentikube destroy demo
``` ```
4. (Recommended) Install [k9s](https://k9scli.io/) for managing Kubernetes resources: Build it with `go build ./cmd/agentikube` or `make build`.
## What gets created
The Helm chart installs:
- StorageClass (`efs-sandbox`) backed by your EFS filesystem
- SandboxTemplate defining the pod spec
- NetworkPolicy for ingress/egress rules
- SandboxWarmPool (optional, enabled by default)
- Karpenter NodePool + EC2NodeClass (optional, when `compute.type: karpenter`)
Each `agentikube create <handle>` then adds a Secret, SandboxClaim, and workspace PVC for that user.
## Project layout
```
cmd/agentikube/ CLI entrypoint
internal/ config, manifest rendering, kube helpers
chart/agentikube/ Helm chart
scripts/ CRD download helper
```
## Development
```bash ```bash
brew install derailed/k9s/k9s make build # compile CLI
k9s --context <your-cluster-context> make helm-lint # lint the chart
make helm-template # dry-run render
go test ./... # run tests
``` ```
Use `:crds`, `:sandboxes`, `:sandboxtemplates`, etc. to browse agent-sandbox resources. ## Good to know
## Test CLI Locally - Storage is EFS-only for now
- `kubectl` must be installed (used by `init` and `ssh`)
- Fargate is validated in config but templates only cover Karpenter so far
- [k9s](https://k9scli.io/) is great for browsing sandbox resources
Use this exact flow to verify the CLI on your machine: ## Context
```bash (https://harivan.sh/thoughts/isolated-long-running-agents-with-kubernetes)
# 1) Build + tests
mkdir -p .cache/go-build .cache/go-mod
GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go build ./...
GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go test ./...
# 2) Root help + command help
GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube --help
for c in init up create list ssh down destroy status; do
GOCACHE=$(pwd)/.cache/go-build GOMODCACHE=$(pwd)/.cache/go-mod go run ./cmd/agentikube "$c" --help >/dev/null
done
# 3) Manifest generation smoke test
./agentikube up --dry-run --config agentikube.example.yaml
```
If those pass, the CLI wiring + config + templating path is working locally.
## CI And Auto Release
This repo now has two GitHub Actions workflows:
- `.github/workflows/ci.yml`
Runs `go build ./...` and `go test ./...` on PRs and non-main branch pushes.
- `.github/workflows/release.yml`
Runs on push to `main`, auto-bumps patch version (`vX.Y.Z`), writes `VERSION`, creates/pushes tag, builds multi-platform binaries, and creates a GitHub Release with artifacts.
## Notes / Current Limits
- `storage.type` currently must be `efs`.
- `kubectl` must be installed (used by `init` and `ssh`).
- `compute.type: fargate` is validated, but this repo currently renders templates for the Karpenter path.
- No Go tests are present yet (`go test ./...` reports no test files).

BIN
agentikube Executable file

Binary file not shown.

View file

@ -0,0 +1,12 @@
apiVersion: v2
name: agentikube
description: Isolated agent sandboxes on Kubernetes
type: application
version: 0.1.0
appVersion: "0.1.0"
keywords:
- sandbox
- agents
- kubernetes
- karpenter
- efs

View file

View file

@ -0,0 +1,25 @@
agentikube has been installed in namespace {{ .Release.Namespace }}.
Resources created:
- StorageClass: efs-sandbox (EFS filesystem: {{ .Values.storage.filesystemId }})
- SandboxTemplate: sandbox-template
{{- if .Values.sandbox.warmPool.enabled }}
- SandboxWarmPool: sandbox-warm-pool ({{ .Values.sandbox.warmPool.size }} replicas)
{{- end }}
{{- if eq .Values.compute.type "karpenter" }}
- NodePool: sandbox-pool
- EC2NodeClass: sandbox-nodes
{{- end }}
- NetworkPolicy: sandbox-network-policy
To create a sandbox:
agentikube create <handle> --provider <provider> --api-key <key>
To list sandboxes:
agentikube list
To SSH into a sandbox:
agentikube ssh <handle>
To destroy a sandbox:
agentikube destroy <handle>

View file

@ -0,0 +1,42 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "agentikube.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "agentikube.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "agentikube.labels" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{ include "agentikube.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "agentikube.selectorLabels" -}}
app.kubernetes.io/name: {{ include "agentikube.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if eq .Values.compute.type "karpenter" }}
apiVersion: karpenter.k8s.aws/v1
kind: EC2NodeClass
metadata:
name: sandbox-nodes
labels:
{{- include "agentikube.labels" . | nindent 4 }}
spec:
amiSelectorTerms:
- alias: "al2023@latest"
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: {{ required "compute.clusterName is required for Karpenter" .Values.compute.clusterName | quote }}
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: {{ .Values.compute.clusterName | quote }}
role: {{ printf "KarpenterNodeRole-%s" .Values.compute.clusterName | quote }}
{{- end }}

View file

@ -0,0 +1,37 @@
{{- if eq .Values.compute.type "karpenter" }}
apiVersion: karpenter.sh/v1
kind: NodePool
metadata:
name: sandbox-pool
labels:
{{- include "agentikube.labels" . | nindent 4 }}
spec:
template:
spec:
requirements:
- key: node.kubernetes.io/instance-type
operator: In
values:
{{- range .Values.compute.instanceTypes }}
- {{ . }}
{{- end }}
- key: karpenter.sh/capacity-type
operator: In
values:
{{- range .Values.compute.capacityTypes }}
- {{ . }}
{{- end }}
- key: kubernetes.io/arch
operator: In
values:
- amd64
nodeClassRef:
name: sandbox-nodes
group: karpenter.k8s.aws
kind: EC2NodeClass
limits:
cpu: {{ .Values.compute.maxCpu }}
memory: {{ .Values.compute.maxMemory }}
disruption:
consolidationPolicy: {{ if .Values.compute.consolidation }}WhenEmptyOrUnderutilized{{ else }}WhenEmpty{{ end }}
{{- end }}

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: sandbox-network-policy
namespace: {{ .Release.Namespace }}
labels:
{{- include "agentikube.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: sandbox
policyTypes:
- Ingress
{{- if .Values.sandbox.networkPolicy.egressAllowAll }}
- Egress
{{- end }}
{{- if .Values.sandbox.networkPolicy.egressAllowAll }}
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0
{{- end }}
ingress:
{{- range .Values.sandbox.networkPolicy.ingressPorts }}
- ports:
- port: {{ . }}
protocol: TCP
{{- end }}

View file

@ -0,0 +1,57 @@
apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxTemplate
metadata:
name: sandbox-template
namespace: {{ .Release.Namespace }}
labels:
{{- include "agentikube.labels" . | nindent 4 }}
spec:
template:
spec:
containers:
- name: sandbox
image: {{ required "sandbox.image is required" .Values.sandbox.image }}
ports:
{{- range .Values.sandbox.ports }}
- containerPort: {{ . }}
{{- end }}
resources:
requests:
cpu: {{ .Values.sandbox.resources.requests.cpu }}
memory: {{ .Values.sandbox.resources.requests.memory }}
limits:
cpu: {{ .Values.sandbox.resources.limits.cpu | quote }}
memory: {{ .Values.sandbox.resources.limits.memory }}
securityContext:
runAsUser: {{ .Values.sandbox.securityContext.runAsUser }}
runAsGroup: {{ .Values.sandbox.securityContext.runAsGroup }}
runAsNonRoot: {{ .Values.sandbox.securityContext.runAsNonRoot }}
{{- if .Values.sandbox.env }}
env:
{{- range $key, $value := .Values.sandbox.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
startupProbe:
tcpSocket:
port: {{ .Values.sandbox.probes.port }}
failureThreshold: {{ .Values.sandbox.probes.startupFailureThreshold }}
periodSeconds: 10
readinessProbe:
tcpSocket:
port: {{ .Values.sandbox.probes.port }}
periodSeconds: 10
volumeMounts:
- name: workspace
mountPath: {{ .Values.sandbox.mountPath }}
volumeClaimTemplates:
- metadata:
name: workspace
spec:
accessModes:
- ReadWriteMany
storageClassName: efs-sandbox
resources:
requests:
storage: "10Gi"

View file

@ -0,0 +1,16 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: efs-sandbox
labels:
{{- include "agentikube.labels" . | nindent 4 }}
provisioner: efs.csi.aws.com
parameters:
provisioningMode: efs-ap
fileSystemId: {{ required "storage.filesystemId is required" .Values.storage.filesystemId }}
directoryPerms: "755"
uid: {{ .Values.storage.uid | quote }}
gid: {{ .Values.storage.gid | quote }}
basePath: {{ .Values.storage.basePath }}
reclaimPolicy: {{ .Values.storage.reclaimPolicy }}
volumeBindingMode: Immediate

View file

@ -0,0 +1,14 @@
{{- if .Values.sandbox.warmPool.enabled }}
apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxWarmPool
metadata:
name: sandbox-warm-pool
namespace: {{ .Release.Namespace }}
labels:
{{- include "agentikube.labels" . | nindent 4 }}
spec:
templateRef:
name: sandbox-template
replicas: {{ .Values.sandbox.warmPool.size }}
ttlMinutes: {{ .Values.sandbox.warmPool.ttlMinutes }}
{{- end }}

View file

@ -0,0 +1,66 @@
# Compute configuration for sandbox nodes
compute:
# karpenter or fargate
type: karpenter
instanceTypes:
- m6i.xlarge
- m5.xlarge
- r6i.xlarge
capacityTypes:
- spot
- on-demand
maxCpu: 2000
maxMemory: 8000Gi
consolidation: true
# EKS cluster name - used for Karpenter subnet/SG/role discovery
clusterName: ""
# Persistent storage configuration
storage:
# efs is the only supported type
type: efs
# REQUIRED - your EFS filesystem ID
filesystemId: ""
basePath: /sandboxes
uid: 1000
gid: 1000
reclaimPolicy: Retain
# Sandbox pod configuration
sandbox:
# REQUIRED - container image for sandbox pods
image: ""
ports:
- 18789
- 2222
- 3000
- 5173
- 8080
mountPath: /home/node/.openclaw
resources:
requests:
cpu: 50m
memory: 512Mi
limits:
cpu: "2"
memory: 4Gi
env: {}
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
probes:
port: 18789
startupFailureThreshold: 30
warmPool:
enabled: true
size: 5
ttlMinutes: 120
networkPolicy:
egressAllowAll: true
ingressPorts:
- 18789
- 2222
- 3000
- 5173
- 8080

223
chart/agentikube_test.go Normal file
View file

@ -0,0 +1,223 @@
package chart_test
import (
"os"
"os/exec"
"strings"
"testing"
)
// helmTemplate runs helm template with the given extra args and returns stdout.
func helmTemplate(t *testing.T, extraArgs ...string) string {
t.Helper()
args := []string{
"template", "agentikube", "chart/agentikube/",
"--namespace", "sandboxes",
"--set", "storage.filesystemId=fs-test",
"--set", "sandbox.image=test:latest",
"--set", "compute.clusterName=test-cluster",
}
args = append(args, extraArgs...)
cmd := exec.Command("helm", args...)
cmd.Dir = repoRoot(t)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("helm template failed: %v\n%s", err, out)
}
return string(out)
}
func repoRoot(t *testing.T) string {
t.Helper()
dir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
// This test file lives at chart/agentikube_test.go, so repo root is ..
return dir + "/.."
}
func TestHelmLint(t *testing.T) {
cmd := exec.Command("helm", "lint", "chart/agentikube/")
cmd.Dir = repoRoot(t)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("helm lint failed: %v\n%s", err, out)
}
if !strings.Contains(string(out), "0 chart(s) failed") {
t.Fatalf("helm lint reported failures:\n%s", out)
}
}
func TestHelmTemplateDefaultValues(t *testing.T) {
output := helmTemplate(t)
expected := []string{
"kind: StorageClass",
"kind: SandboxTemplate",
"kind: SandboxWarmPool",
"kind: NodePool",
"kind: EC2NodeClass",
"kind: NetworkPolicy",
}
for _, want := range expected {
if !strings.Contains(output, want) {
t.Errorf("expected %q in rendered output", want)
}
}
}
func TestHelmTemplateLabels(t *testing.T) {
output := helmTemplate(t)
labels := []string{
"helm.sh/chart: agentikube-0.1.0",
"app.kubernetes.io/name: agentikube",
"app.kubernetes.io/instance: agentikube",
"app.kubernetes.io/managed-by: Helm",
`app.kubernetes.io/version: "0.1.0"`,
}
for _, label := range labels {
if !strings.Contains(output, label) {
t.Errorf("expected label %q in rendered output", label)
}
}
}
func TestHelmTemplateKarpenterDisabled(t *testing.T) {
output := helmTemplate(t, "--set", "compute.type=fargate")
if strings.Contains(output, "kind: NodePool") {
t.Error("NodePool should not be rendered when compute.type=fargate")
}
if strings.Contains(output, "kind: EC2NodeClass") {
t.Error("EC2NodeClass should not be rendered when compute.type=fargate")
}
if !strings.Contains(output, "kind: StorageClass") {
t.Error("StorageClass should always be rendered")
}
if !strings.Contains(output, "kind: SandboxTemplate") {
t.Error("SandboxTemplate should always be rendered")
}
}
func TestHelmTemplateWarmPoolDisabled(t *testing.T) {
output := helmTemplate(t, "--set", "sandbox.warmPool.enabled=false")
if strings.Contains(output, "kind: SandboxWarmPool") {
t.Error("SandboxWarmPool should not be rendered when warmPool.enabled=false")
}
if !strings.Contains(output, "kind: SandboxTemplate") {
t.Error("SandboxTemplate should always be rendered")
}
}
func TestHelmTemplateEgressDisabled(t *testing.T) {
output := helmTemplate(t,
"--set", "sandbox.networkPolicy.egressAllowAll=false",
"-s", "templates/networkpolicy.yaml",
)
if strings.Contains(output, "0.0.0.0/0") {
t.Error("egress CIDR should not appear when egressAllowAll=false")
}
lines := strings.Split(output, "\n")
for i, line := range lines {
if strings.Contains(line, "policyTypes:") {
block := strings.Join(lines[i:min(i+4, len(lines))], "\n")
if strings.Contains(block, "Egress") {
t.Error("Egress should not be in policyTypes when egressAllowAll=false")
}
}
}
}
func TestHelmTemplateRequiredValues(t *testing.T) {
tests := []struct {
name string
args []string
wantErr string
}{
{
name: "missing filesystemId",
args: []string{"--set", "sandbox.image=test:latest", "--set", "compute.clusterName=test"},
wantErr: "storage.filesystemId is required",
},
{
name: "missing sandbox image",
args: []string{"--set", "storage.filesystemId=fs-test", "--set", "compute.clusterName=test"},
wantErr: "sandbox.image is required",
},
{
name: "missing clusterName for karpenter",
args: []string{"--set", "storage.filesystemId=fs-test", "--set", "sandbox.image=test:latest"},
wantErr: "compute.clusterName is required for Karpenter",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
args := append([]string{
"template", "agentikube", "chart/agentikube/",
"--namespace", "sandboxes",
}, tt.args...)
cmd := exec.Command("helm", args...)
cmd.Dir = repoRoot(t)
out, err := cmd.CombinedOutput()
if err == nil {
t.Fatal("expected helm template to fail for missing required value")
}
if !strings.Contains(string(out), tt.wantErr) {
t.Errorf("expected error containing %q, got:\n%s", tt.wantErr, out)
}
})
}
}
func TestHelmTemplateEnvVars(t *testing.T) {
output := helmTemplate(t,
"--set", "sandbox.env.MY_VAR=my-value",
"-s", "templates/sandbox-template.yaml",
)
if !strings.Contains(output, "MY_VAR") {
t.Error("expected MY_VAR in rendered env")
}
if !strings.Contains(output, "my-value") {
t.Error("expected my-value in rendered env")
}
}
func TestHelmTemplateNoEnvWhenEmpty(t *testing.T) {
output := helmTemplate(t, "-s", "templates/sandbox-template.yaml")
lines := strings.Split(output, "\n")
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "env:" {
t.Error("env: block should not appear when sandbox.env is empty")
}
}
}
func TestHelmTemplateNamespace(t *testing.T) {
output := helmTemplate(t, "--namespace", "custom-ns")
if !strings.Contains(output, "namespace: custom-ns") {
t.Error("expected namespace: custom-ns in rendered output")
}
}
func TestHelmTemplateConsolidationDisabled(t *testing.T) {
output := helmTemplate(t,
"--set", "compute.consolidation=false",
"-s", "templates/karpenter-nodepool.yaml",
)
if !strings.Contains(output, "consolidationPolicy: WhenEmpty") {
t.Error("expected consolidationPolicy: WhenEmpty when consolidation=false")
}
if strings.Contains(output, "WhenEmptyOrUnderutilized") {
t.Error("should not have WhenEmptyOrUnderutilized when consolidation=false")
}
}

View file

@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
func NewCreateCmd() *cobra.Command { func NewCreateCmd() *cobra.Command {
@ -55,7 +54,7 @@ func NewCreateCmd() *cobra.Command {
}, },
} }
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} secretGVR := coreGVR("secrets")
_, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{}) _, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("creating secret %q: %w", name, err) return fmt.Errorf("creating secret %q: %w", name, err)
@ -65,7 +64,7 @@ func NewCreateCmd() *cobra.Command {
// Create the SandboxClaim // Create the SandboxClaim
claim := &unstructured.Unstructured{ claim := &unstructured.Unstructured{
Object: map[string]interface{}{ Object: map[string]interface{}{
"apiVersion": "agentsandbox.dev/v1", "apiVersion": "extensions.agents.x-k8s.io/v1alpha1",
"kind": "SandboxClaim", "kind": "SandboxClaim",
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
"name": name, "name": name,
@ -82,12 +81,7 @@ func NewCreateCmd() *cobra.Command {
}, },
} }
claimGVR := schema.GroupVersionResource{ _, err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxclaims",
}
_, err = client.Dynamic().Resource(claimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("creating SandboxClaim %q: %w", name, err) return fmt.Errorf("creating SandboxClaim %q: %w", name, err)
} }
@ -98,7 +92,7 @@ func NewCreateCmd() *cobra.Command {
waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel() defer cancel()
if err := client.WaitForReady(waitCtx, ns, "sandboxclaims", name); err != nil { if err := client.WaitForReady(waitCtx, ns, sandboxClaimGVR, name); err != nil {
return fmt.Errorf("waiting for sandbox: %w", err) return fmt.Errorf("waiting for sandbox: %w", err)
} }

View file

@ -9,9 +9,8 @@ import (
"github.com/rathi/agentikube/internal/kube" "github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
func NewDestroyCmd() *cobra.Command { func NewDestroyCmd() *cobra.Command {
@ -50,17 +49,11 @@ func NewDestroyCmd() *cobra.Command {
ns := cfg.Namespace ns := cfg.Namespace
name := "sandbox-" + handle name := "sandbox-" + handle
claimGVR := schema.GroupVersionResource{ secretGVR := coreGVR("secrets")
Group: "agentsandbox.dev", pvcGVR := coreGVR("persistentvolumeclaims")
Version: "v1",
Resource: "sandboxclaims",
}
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
pvcGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
// Delete SandboxClaim // Delete SandboxClaim
err = client.Dynamic().Resource(claimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{}) err = client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil { if err != nil {
return fmt.Errorf("deleting SandboxClaim %q: %w", name, err) return fmt.Errorf("deleting SandboxClaim %q: %w", name, err)
} }

View file

@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube" "github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
func NewDownCmd() *cobra.Command { func NewDownCmd() *cobra.Command {
@ -30,26 +29,14 @@ func NewDownCmd() *cobra.Command {
ns := cfg.Namespace ns := cfg.Namespace
warmPoolGVR := schema.GroupVersionResource{ err = client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxwarmpools",
}
templateGVR := schema.GroupVersionResource{
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxtemplates",
}
err = client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
if err != nil { if err != nil {
fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err) fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err)
} else { } else {
fmt.Println("[ok] SandboxWarmPool deleted") fmt.Println("[ok] SandboxWarmPool deleted")
} }
err = client.Dynamic().Resource(templateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{}) err = client.Dynamic().Resource(sandboxTemplateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
if err != nil { if err != nil {
fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err) fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err)
} else { } else {

View file

@ -3,8 +3,31 @@ package commands
import ( import (
"github.com/rathi/agentikube/internal/config" "github.com/rathi/agentikube/internal/config"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
var (
sandboxClaimGVR = schema.GroupVersionResource{
Group: "extensions.agents.x-k8s.io",
Version: "v1alpha1",
Resource: "sandboxclaims",
}
sandboxTemplateGVR = schema.GroupVersionResource{
Group: "extensions.agents.x-k8s.io",
Version: "v1alpha1",
Resource: "sandboxtemplates",
}
sandboxWarmPoolGVR = schema.GroupVersionResource{
Group: "extensions.agents.x-k8s.io",
Version: "v1alpha1",
Resource: "sandboxwarmpools",
}
)
func coreGVR(resource string) schema.GroupVersionResource {
return schema.GroupVersionResource{Group: "", Version: "v1", Resource: resource}
}
func loadConfig(cmd *cobra.Command) (*config.Config, error) { func loadConfig(cmd *cobra.Command) (*config.Config, error) {
cfgPath, _ := cmd.Flags().GetString("config") cfgPath, _ := cmd.Flags().GetString("config")
return config.Load(cfgPath) return config.Load(cfgPath)

View file

@ -10,7 +10,6 @@ import (
"github.com/rathi/agentikube/internal/kube" "github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
func NewListCmd() *cobra.Command { func NewListCmd() *cobra.Command {
@ -31,13 +30,7 @@ func NewListCmd() *cobra.Command {
return fmt.Errorf("connecting to cluster: %w", err) return fmt.Errorf("connecting to cluster: %w", err)
} }
claimGVR := schema.GroupVersionResource{ list, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxclaims",
}
list, err := client.Dynamic().Resource(claimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("listing SandboxClaims: %w", err) return fmt.Errorf("listing SandboxClaims: %w", err)
} }
@ -114,7 +107,7 @@ func extractPodName(obj map[string]interface{}) string {
if ok { if ok {
annotations, ok := metadata["annotations"].(map[string]interface{}) annotations, ok := metadata["annotations"].(map[string]interface{})
if ok { if ok {
if podName, ok := annotations["agentsandbox.dev/pod-name"].(string); ok { if podName, ok := annotations["agents.x-k8s.io/pod-name"].(string); ok {
return podName return podName
} }
} }

View file

@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube" "github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
func NewSSHCmd() *cobra.Command { func NewSSHCmd() *cobra.Command {
@ -33,13 +32,7 @@ func NewSSHCmd() *cobra.Command {
ns := cfg.Namespace ns := cfg.Namespace
name := "sandbox-" + handle name := "sandbox-" + handle
claimGVR := schema.GroupVersionResource{ claim, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxclaims",
}
claim, err := client.Dynamic().Resource(claimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("getting SandboxClaim %q: %w", name, err) return fmt.Errorf("getting SandboxClaim %q: %w", name, err)
} }

View file

@ -7,7 +7,6 @@ import (
"github.com/rathi/agentikube/internal/kube" "github.com/rathi/agentikube/internal/kube"
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
) )
func NewStatusCmd() *cobra.Command { func NewStatusCmd() *cobra.Command {
@ -31,13 +30,7 @@ func NewStatusCmd() *cobra.Command {
ns := cfg.Namespace ns := cfg.Namespace
// Warm pool status // Warm pool status
warmPoolGVR := schema.GroupVersionResource{ wp, err := client.Dynamic().Resource(sandboxWarmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxwarmpools",
}
wp, err := client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
if err != nil { if err != nil {
fmt.Printf("warm pool: not found (%v)\n", err) fmt.Printf("warm pool: not found (%v)\n", err)
} else { } else {
@ -55,13 +48,7 @@ func NewStatusCmd() *cobra.Command {
} }
// Sandbox count // Sandbox count
claimGVR := schema.GroupVersionResource{ claims, err := client.Dynamic().Resource(sandboxClaimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
Group: "agentsandbox.dev",
Version: "v1",
Resource: "sandboxclaims",
}
claims, err := client.Dynamic().Resource(claimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
fmt.Printf("\nsandboxes: error listing (%v)\n", err) fmt.Printf("\nsandboxes: error listing (%v)\n", err)
} else { } else {

View file

@ -46,7 +46,7 @@ func NewUpCmd() *cobra.Command {
if cfg.Sandbox.WarmPool.Enabled { if cfg.Sandbox.WarmPool.Enabled {
fmt.Println("waiting for warm pool to become ready...") fmt.Println("waiting for warm pool to become ready...")
if err := client.WaitForReady(ctx, cfg.Namespace, "sandboxwarmpools", "sandbox-warm-pool"); err != nil { if err := client.WaitForReady(ctx, cfg.Namespace, sandboxWarmPoolGVR, "sandbox-warm-pool"); err != nil {
return fmt.Errorf("waiting for warm pool: %w", err) return fmt.Errorf("waiting for warm pool: %w", err)
} }
fmt.Println("[ok] warm pool ready") fmt.Println("[ok] warm pool ready")

View file

@ -10,34 +10,27 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
) )
// WaitForReady watches a resource in the agentsandbox.dev/v1 group until its // WaitForReady watches a resource until its Ready condition becomes True
// Ready condition becomes True or the context is cancelled/times out. // or the context is cancelled/times out.
// The resource parameter is the plural resource name (e.g. "sandboxclaims", "sandboxwarmpools"). func (c *Client) WaitForReady(ctx context.Context, namespace string, gvr schema.GroupVersionResource, name string) error {
func (c *Client) WaitForReady(ctx context.Context, namespace, resource, name string) error {
gvr := schema.GroupVersionResource{
Group: "agentsandbox.dev",
Version: "v1",
Resource: resource,
}
watcher, err := c.Dynamic().Resource(gvr).Namespace(namespace).Watch(ctx, metav1.ListOptions{ watcher, err := c.Dynamic().Resource(gvr).Namespace(namespace).Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", name), FieldSelector: fmt.Sprintf("metadata.name=%s", name),
}) })
if err != nil { if err != nil {
return fmt.Errorf("watching %s %s/%s: %w", resource, namespace, name, err) return fmt.Errorf("watching %s %s/%s: %w", gvr.Resource, namespace, name, err)
} }
defer watcher.Stop() defer watcher.Stop()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return fmt.Errorf("timed out waiting for %s %s/%s to become ready", resource, namespace, name) return fmt.Errorf("timed out waiting for %s %s/%s to become ready", gvr.Resource, namespace, name)
case event, ok := <-watcher.ResultChan(): case event, ok := <-watcher.ResultChan():
if !ok { if !ok {
return fmt.Errorf("watch channel closed for %s %s/%s", resource, namespace, name) return fmt.Errorf("watch channel closed for %s %s/%s", gvr.Resource, namespace, name)
} }
if event.Type == watch.Error { if event.Type == watch.Error {
return fmt.Errorf("watch error for %s %s/%s", resource, namespace, name) return fmt.Errorf("watch error for %s %s/%s", gvr.Resource, namespace, name)
} }
obj, ok := event.Object.(*unstructured.Unstructured) obj, ok := event.Object.(*unstructured.Unstructured)

View file

@ -1,4 +1,4 @@
apiVersion: agentsandbox.dev/v1 apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxTemplate kind: SandboxTemplate
metadata: metadata:
name: sandbox-template name: sandbox-template

View file

@ -1,4 +1,4 @@
apiVersion: agentsandbox.dev/v1 apiVersion: extensions.agents.x-k8s.io/v1alpha1
kind: SandboxWarmPool kind: SandboxWarmPool
metadata: metadata:
name: sandbox-warm-pool name: sandbox-warm-pool

26
scripts/download-crds.sh Executable file
View file

@ -0,0 +1,26 @@
#!/usr/bin/env bash
set -euo pipefail
# Download agent-sandbox CRDs into chart/agentikube/crds/
# Run this before packaging the chart: make crds
REPO="kubernetes-sigs/agent-sandbox"
BRANCH="main"
BASE_URL="https://raw.githubusercontent.com/${REPO}/${BRANCH}/k8s/crds"
DEST="$(cd "$(dirname "$0")/.." && pwd)/chart/agentikube/crds"
CRDS=(
sandboxtemplates.yaml
sandboxclaims.yaml
sandboxwarmpools.yaml
)
echo "Downloading CRDs from ${REPO}@${BRANCH} ..."
mkdir -p "$DEST"
for crd in "${CRDS[@]}"; do
echo " ${crd}"
curl -sSfL "${BASE_URL}/${crd}" -o "${DEST}/${crd}"
done
echo "CRDs written to ${DEST}"