mirror of
https://github.com/harivansh-afk/agentikube.git
synced 2026-04-15 01:00:28 +00:00
init
This commit is contained in:
commit
0595d93c49
28 changed files with 1763 additions and 0 deletions
18
Makefile
Normal file
18
Makefile
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
.PHONY: build install clean fmt vet lint
|
||||
|
||||
build:
|
||||
go build -o agentikube ./cmd/agentikube
|
||||
|
||||
install:
|
||||
go install ./cmd/agentikube
|
||||
|
||||
clean:
|
||||
rm -f agentikube
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
lint: fmt vet
|
||||
98
agentikube.example.yaml
Normal file
98
agentikube.example.yaml
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
# agentikube configuration
|
||||
# Copy this file to agentikube.yaml and fill in your values.
|
||||
|
||||
# Kubernetes namespace for all sandbox resources
|
||||
namespace: sandboxes
|
||||
|
||||
# Compute configuration for sandbox nodes
|
||||
compute:
|
||||
# karpenter or fargate
|
||||
type: karpenter
|
||||
|
||||
# EC2 instance types for Karpenter-managed nodes
|
||||
instanceTypes:
|
||||
- m6i.xlarge
|
||||
- m5.xlarge
|
||||
- r6i.xlarge
|
||||
|
||||
# Capacity types: spot-first for cost savings
|
||||
capacityTypes: [spot, on-demand]
|
||||
|
||||
# Cluster-wide resource limits
|
||||
maxCpu: 2000
|
||||
maxMemory: 8000Gi
|
||||
|
||||
# Enable aggressive node bin-packing
|
||||
consolidation: true
|
||||
|
||||
# Fargate selectors (only used when type: fargate)
|
||||
# fargateSelectors:
|
||||
# - namespace: sandboxes
|
||||
|
||||
# Persistent storage configuration
|
||||
storage:
|
||||
# Storage backend (efs is the only option for now)
|
||||
type: efs
|
||||
|
||||
# Your EFS filesystem ID
|
||||
filesystemId: fs-08f7991b1c7f3ded8
|
||||
|
||||
# Root directory for sandbox access points
|
||||
basePath: /sandboxes
|
||||
|
||||
# UID/GID for created access points
|
||||
uid: 1000
|
||||
gid: 1000
|
||||
|
||||
# Retain keeps data when a sandbox is deleted; Delete removes it
|
||||
reclaimPolicy: Retain
|
||||
|
||||
# Sandbox pod configuration
|
||||
sandbox:
|
||||
# Container image for sandboxes
|
||||
image: openclaw:2026.2.2
|
||||
|
||||
# Ports exposed by the sandbox container
|
||||
ports: [18789, 2222, 3000, 5173, 8080]
|
||||
|
||||
# Where EFS storage mounts inside the container
|
||||
mountPath: /home/node/.openclaw
|
||||
|
||||
# Container resource requests and limits
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 512Mi
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 4Gi
|
||||
|
||||
# Extra environment variables injected into every sandbox
|
||||
env:
|
||||
LLM_GATEWAY_URL: http://llm-gateway.sandboxes.svc.cluster.local
|
||||
|
||||
# Container security context
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
|
||||
# Health check probes
|
||||
probes:
|
||||
# TCP probe target port
|
||||
port: 18789
|
||||
# Startup grace: 30 * 10s = 5 min
|
||||
startupFailureThreshold: 30
|
||||
|
||||
# Warm pool pre-provisions sandbox pods for fast allocation
|
||||
warmPool:
|
||||
enabled: true
|
||||
size: 5
|
||||
ttlMinutes: 120
|
||||
|
||||
# Network policy for sandbox pods
|
||||
networkPolicy:
|
||||
# Agents need outbound access for LLM APIs, npm, etc.
|
||||
egressAllowAll: true
|
||||
# Ports accessible from within the cluster
|
||||
ingressPorts: [18789, 2222, 3000, 5173, 8080]
|
||||
39
cmd/agentikube/main.go
Normal file
39
cmd/agentikube/main.go
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rathi/agentikube/internal/commands"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
|
||||
func main() {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "agentikube",
|
||||
Short: "CLI for long-running agent sandboxes on Kubernetes",
|
||||
Long: "agentikube provisions and manages long-running agent sandboxes on AWS using Kubernetes.",
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().String("config", "agentikube.yaml", "path to config file")
|
||||
|
||||
rootCmd.AddCommand(
|
||||
commands.NewInitCmd(),
|
||||
commands.NewUpCmd(),
|
||||
commands.NewCreateCmd(),
|
||||
commands.NewListCmd(),
|
||||
commands.NewSSHCmd(),
|
||||
commands.NewDownCmd(),
|
||||
commands.NewDestroyCmd(),
|
||||
commands.NewStatusCmd(),
|
||||
)
|
||||
|
||||
rootCmd.Version = version
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
50
go.mod
Normal file
50
go.mod
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
module github.com/rathi/agentikube
|
||||
|
||||
go 1.25.6
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v1.10.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.35.0
|
||||
k8s.io/apimachinery v0.35.0
|
||||
k8s.io/client-go v0.35.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
135
go.sum
Normal file
135
go.sum
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
|
||||
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
|
||||
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
|
||||
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
|
||||
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
117
internal/commands/create.go
Normal file
117
internal/commands/create.go
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewCreateCmd() *cobra.Command {
|
||||
var provider string
|
||||
var apiKey string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create <handle>",
|
||||
Short: "Create a new sandbox for an agent",
|
||||
Long: "Creates a Secret and SandboxClaim for the given handle, then waits for it to be ready.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
// Create the secret with provider credentials
|
||||
secret := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
},
|
||||
"stringData": map[string]interface{}{
|
||||
"PROVIDER": provider,
|
||||
"PROVIDER_KEY": apiKey,
|
||||
"USER_NAME": handle,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
_, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating secret %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] secret %q created\n", name)
|
||||
|
||||
// Create the SandboxClaim
|
||||
claim := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "agentsandbox.dev/v1",
|
||||
"kind": "SandboxClaim",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"templateRef": map[string]interface{}{
|
||||
"name": "sandbox-template",
|
||||
},
|
||||
"secretRef": map[string]interface{}{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
_, err = client.Dynamic().Resource(claimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] SandboxClaim %q created\n", name)
|
||||
|
||||
// Wait for the sandbox to become ready (3 min timeout)
|
||||
fmt.Println("waiting for sandbox to be ready...")
|
||||
waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
if err := client.WaitForReady(waitCtx, ns, "sandboxclaims", name); err != nil {
|
||||
return fmt.Errorf("waiting for sandbox: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nsandbox %q is ready\n", handle)
|
||||
fmt.Printf(" name: %s\n", name)
|
||||
fmt.Printf(" namespace: %s\n", ns)
|
||||
fmt.Printf(" ssh: agentikube ssh %s\n", handle)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&provider, "provider", "", "LLM provider name (env: SANDBOX_LLM_PROVIDER)")
|
||||
cmd.Flags().StringVar(&apiKey, "api-key", "", "LLM provider API key (env: SANDBOX_API_KEY)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
94
internal/commands/destroy.go
Normal file
94
internal/commands/destroy.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
func NewDestroyCmd() *cobra.Command {
|
||||
var yes bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "destroy <handle>",
|
||||
Short: "Destroy a sandbox and its resources",
|
||||
Long: "Deletes the SandboxClaim, Secret, and PVC for the given handle.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
if !yes {
|
||||
fmt.Printf("are you sure you want to destroy sandbox %q? [y/N] ", handle)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Scan()
|
||||
answer := strings.TrimSpace(strings.ToLower(scanner.Text()))
|
||||
if answer != "y" && answer != "yes" {
|
||||
fmt.Println("aborted")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
pvcGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
|
||||
|
||||
// Delete SandboxClaim
|
||||
err = client.Dynamic().Resource(claimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] SandboxClaim %q deleted\n", name)
|
||||
|
||||
// Delete Secret
|
||||
err = client.Dynamic().Resource(secretGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting Secret %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] Secret %q deleted\n", name)
|
||||
|
||||
// Delete PVC (best-effort)
|
||||
err = client.Dynamic().Resource(pvcGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
fmt.Printf("[warn] could not delete PVC %q: %v\n", name, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[ok] PVC %q deleted\n", name)
|
||||
}
|
||||
|
||||
fmt.Printf("\nsandbox %q destroyed\n", handle)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&yes, "yes", false, "skip confirmation prompt")
|
||||
|
||||
return cmd
|
||||
}
|
||||
65
internal/commands/down.go
Normal file
65
internal/commands/down.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewDownCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "down",
|
||||
Short: "Remove sandbox infrastructure (preserves user sandboxes)",
|
||||
Long: "Deletes the SandboxWarmPool and SandboxTemplate. User sandboxes are preserved.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
|
||||
warmPoolGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxwarmpools",
|
||||
}
|
||||
|
||||
templateGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxtemplates",
|
||||
}
|
||||
|
||||
err = client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("[ok] SandboxWarmPool deleted")
|
||||
}
|
||||
|
||||
err = client.Dynamic().Resource(templateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("[ok] SandboxTemplate deleted")
|
||||
}
|
||||
|
||||
fmt.Println("\nwarm pool and template deleted. User sandboxes are preserved.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
11
internal/commands/helpers.go
Normal file
11
internal/commands/helpers.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/rathi/agentikube/internal/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func loadConfig(cmd *cobra.Command) (*config.Config, error) {
|
||||
cfgPath, _ := cmd.Flags().GetString("config")
|
||||
return config.Load(cfgPath)
|
||||
}
|
||||
97
internal/commands/init.go
Normal file
97
internal/commands/init.go
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const crdInstallURL = "https://raw.githubusercontent.com/agent-sandbox/agent-sandbox/main/deploy/install.yaml"
|
||||
|
||||
func NewInitCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize the cluster for agent sandboxes",
|
||||
Long: "Checks prerequisites, installs CRDs, and creates the target namespace.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check kubectl context
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] connected to Kubernetes cluster")
|
||||
|
||||
// Apply agent-sandbox CRDs
|
||||
fmt.Println("applying agent-sandbox CRDs...")
|
||||
out, err := exec.CommandContext(ctx, "kubectl", "apply", "-f", crdInstallURL).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying CRDs: %s: %w", strings.TrimSpace(string(out)), err)
|
||||
}
|
||||
fmt.Println("[ok] agent-sandbox CRDs applied")
|
||||
|
||||
// Check for EFS CSI driver
|
||||
dsList, err := client.Clientset().AppsV1().DaemonSets("kube-system").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing daemonsets in kube-system: %w", err)
|
||||
}
|
||||
efsFound := false
|
||||
for _, ds := range dsList.Items {
|
||||
if strings.Contains(ds.Name, "efs-csi") {
|
||||
efsFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if efsFound {
|
||||
fmt.Println("[ok] EFS CSI driver found")
|
||||
} else {
|
||||
fmt.Println("[warn] EFS CSI driver not found - install it before using EFS storage")
|
||||
}
|
||||
|
||||
// Check for Karpenter
|
||||
karpenterFound := false
|
||||
for _, ns := range []string{"karpenter", "kube-system"} {
|
||||
depList, err := client.Clientset().AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, dep := range depList.Items {
|
||||
if strings.Contains(dep.Name, "karpenter") {
|
||||
karpenterFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if karpenterFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
if karpenterFound {
|
||||
fmt.Println("[ok] Karpenter found")
|
||||
} else {
|
||||
fmt.Println("[warn] Karpenter not found - required if compute.type is karpenter")
|
||||
}
|
||||
|
||||
// Create namespace if it does not exist
|
||||
if err := client.EnsureNamespace(ctx, cfg.Namespace); err != nil {
|
||||
return fmt.Errorf("creating namespace %q: %w", cfg.Namespace, err)
|
||||
}
|
||||
fmt.Printf("[ok] namespace %q ready\n", cfg.Namespace)
|
||||
|
||||
fmt.Println("\ninit complete")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
138
internal/commands/list.go
Normal file
138
internal/commands/list.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewListCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all sandboxes",
|
||||
Long: "Lists all SandboxClaims in the configured namespace.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
list, err := client.Dynamic().Resource(claimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing SandboxClaims: %w", err)
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0)
|
||||
fmt.Fprintln(w, "HANDLE\tSTATUS\tAGE\tPOD")
|
||||
|
||||
for _, item := range list.Items {
|
||||
name := item.GetName()
|
||||
handle := name
|
||||
if len(name) > 8 && name[:8] == "sandbox-" {
|
||||
handle = name[8:]
|
||||
}
|
||||
|
||||
status := extractStatus(item.Object)
|
||||
podName := extractPodName(item.Object)
|
||||
age := formatAge(item.GetCreationTimestamp().Time)
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", handle, status, age, podName)
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func extractStatus(obj map[string]interface{}) string {
|
||||
status, ok := obj["status"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
conditions, ok := status["conditions"].([]interface{})
|
||||
if !ok || len(conditions) == 0 {
|
||||
return "Pending"
|
||||
}
|
||||
|
||||
// Look for the Ready condition
|
||||
for _, c := range conditions {
|
||||
cond, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
condType, _ := cond["type"].(string)
|
||||
condStatus, _ := cond["status"].(string)
|
||||
if condType == "Ready" {
|
||||
if condStatus == "True" {
|
||||
return "Ready"
|
||||
}
|
||||
reason, _ := cond["reason"].(string)
|
||||
if reason != "" {
|
||||
return reason
|
||||
}
|
||||
return "NotReady"
|
||||
}
|
||||
}
|
||||
|
||||
return "Pending"
|
||||
}
|
||||
|
||||
func extractPodName(obj map[string]interface{}) string {
|
||||
status, ok := obj["status"].(map[string]interface{})
|
||||
if ok {
|
||||
if podName, ok := status["podName"].(string); ok && podName != "" {
|
||||
return podName
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to annotations
|
||||
metadata, ok := obj["metadata"].(map[string]interface{})
|
||||
if ok {
|
||||
annotations, ok := metadata["annotations"].(map[string]interface{})
|
||||
if ok {
|
||||
if podName, ok := annotations["agentsandbox.dev/pod-name"].(string); ok {
|
||||
return podName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "-"
|
||||
}
|
||||
|
||||
func formatAge(created time.Time) string {
|
||||
d := time.Since(created)
|
||||
switch {
|
||||
case d < time.Minute:
|
||||
return fmt.Sprintf("%ds", int(d.Seconds()))
|
||||
case d < time.Hour:
|
||||
return fmt.Sprintf("%dm", int(d.Minutes()))
|
||||
case d < 24*time.Hour:
|
||||
return fmt.Sprintf("%dh", int(d.Hours()))
|
||||
default:
|
||||
return fmt.Sprintf("%dd", int(d.Hours()/24))
|
||||
}
|
||||
}
|
||||
58
internal/commands/ssh.go
Normal file
58
internal/commands/ssh.go
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewSSHCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh <handle>",
|
||||
Short: "Open a shell into a sandbox",
|
||||
Long: "Exec into the sandbox pod for the given handle.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
claim, err := client.Dynamic().Resource(claimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
|
||||
podName := extractPodName(claim.Object)
|
||||
if podName == "-" || podName == "" {
|
||||
return fmt.Errorf("sandbox %q does not have a pod assigned yet", handle)
|
||||
}
|
||||
|
||||
fmt.Printf("connecting to pod %s...\n", podName)
|
||||
return kube.Exec(ns, podName, []string{"/bin/sh"})
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
108
internal/commands/status.go
Normal file
108
internal/commands/status.go
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewStatusCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show cluster and sandbox status",
|
||||
Long: "Displays warm pool status, sandbox counts, and compute node information.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
|
||||
// Warm pool status
|
||||
warmPoolGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxwarmpools",
|
||||
}
|
||||
|
||||
wp, err := client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("warm pool: not found (%v)\n", err)
|
||||
} else {
|
||||
spec, _ := wp.Object["spec"].(map[string]interface{})
|
||||
status, _ := wp.Object["status"].(map[string]interface{})
|
||||
|
||||
replicas := getInt64(spec, "replicas")
|
||||
readyReplicas := getInt64(status, "readyReplicas")
|
||||
pendingReplicas := getInt64(status, "pendingReplicas")
|
||||
|
||||
fmt.Println("warm pool:")
|
||||
fmt.Printf(" desired: %d\n", replicas)
|
||||
fmt.Printf(" ready: %d\n", readyReplicas)
|
||||
fmt.Printf(" pending: %d\n", pendingReplicas)
|
||||
}
|
||||
|
||||
// Sandbox count
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
claims, err := client.Dynamic().Resource(claimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("\nsandboxes: error listing (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nsandboxes: %d\n", len(claims.Items))
|
||||
}
|
||||
|
||||
// Karpenter nodes (if applicable)
|
||||
if cfg.Compute.Type == "karpenter" {
|
||||
nodes, err := client.Clientset().CoreV1().Nodes().List(ctx, metav1.ListOptions{
|
||||
LabelSelector: "karpenter.sh/nodepool",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("\nkarpenter nodes: error listing (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nkarpenter nodes: %d\n", len(nodes.Items))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getInt64(m map[string]interface{}, key string) int64 {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
switch n := v.(type) {
|
||||
case int64:
|
||||
return n
|
||||
case float64:
|
||||
return int64(n)
|
||||
case int:
|
||||
return int64(n)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
63
internal/commands/up.go
Normal file
63
internal/commands/up.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/rathi/agentikube/internal/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUpCmd() *cobra.Command {
|
||||
var dryRun bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "up",
|
||||
Short: "Apply sandbox infrastructure to the cluster",
|
||||
Long: "Generates and applies all sandbox manifests (templates, warm pool, storage, compute).",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifests, err := manifest.Generate(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating manifests: %w", err)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Print(string(manifests))
|
||||
return nil
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
if err := client.ServerSideApply(ctx, manifests); err != nil {
|
||||
return fmt.Errorf("applying manifests: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] manifests applied")
|
||||
|
||||
if cfg.Sandbox.WarmPool.Enabled {
|
||||
fmt.Println("waiting for warm pool to become ready...")
|
||||
if err := client.WaitForReady(ctx, cfg.Namespace, "sandboxwarmpools", "sandbox-warm-pool"); err != nil {
|
||||
return fmt.Errorf("waiting for warm pool: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] warm pool ready")
|
||||
}
|
||||
|
||||
fmt.Println("\ninfrastructure is up")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&dryRun, "dry-run", false, "print manifests to stdout without applying")
|
||||
|
||||
return cmd
|
||||
}
|
||||
102
internal/config/config.go
Normal file
102
internal/config/config.go
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Config is the top-level configuration parsed from agentikube.yaml.
|
||||
type Config struct {
|
||||
Namespace string `yaml:"namespace"`
|
||||
Compute ComputeConfig `yaml:"compute"`
|
||||
Storage StorageConfig `yaml:"storage"`
|
||||
Sandbox SandboxConfig `yaml:"sandbox"`
|
||||
}
|
||||
|
||||
type ComputeConfig struct {
|
||||
Type string `yaml:"type"` // karpenter | fargate
|
||||
InstanceTypes []string `yaml:"instanceTypes"`
|
||||
CapacityTypes []string `yaml:"capacityTypes"`
|
||||
MaxCPU int `yaml:"maxCpu"`
|
||||
MaxMemory string `yaml:"maxMemory"`
|
||||
Consolidation bool `yaml:"consolidation"`
|
||||
FargateSelectors []FargateSelector `yaml:"fargateSelectors"`
|
||||
}
|
||||
|
||||
type FargateSelector struct {
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
type StorageConfig struct {
|
||||
Type string `yaml:"type"` // efs
|
||||
FilesystemID string `yaml:"filesystemId"`
|
||||
BasePath string `yaml:"basePath"`
|
||||
UID int `yaml:"uid"`
|
||||
GID int `yaml:"gid"`
|
||||
ReclaimPolicy string `yaml:"reclaimPolicy"`
|
||||
}
|
||||
|
||||
type SandboxConfig struct {
|
||||
Image string `yaml:"image"`
|
||||
Ports []int `yaml:"ports"`
|
||||
MountPath string `yaml:"mountPath"`
|
||||
Resources ResourcesConfig `yaml:"resources"`
|
||||
Env map[string]string `yaml:"env"`
|
||||
SecurityContext SecurityContext `yaml:"securityContext"`
|
||||
Probes ProbesConfig `yaml:"probes"`
|
||||
WarmPool WarmPoolConfig `yaml:"warmPool"`
|
||||
NetworkPolicy NetworkPolicy `yaml:"networkPolicy"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
Requests ResourceValues `yaml:"requests"`
|
||||
Limits ResourceValues `yaml:"limits"`
|
||||
}
|
||||
|
||||
type ResourceValues struct {
|
||||
CPU string `yaml:"cpu"`
|
||||
Memory string `yaml:"memory"`
|
||||
}
|
||||
|
||||
type SecurityContext struct {
|
||||
RunAsUser int `yaml:"runAsUser"`
|
||||
RunAsGroup int `yaml:"runAsGroup"`
|
||||
RunAsNonRoot bool `yaml:"runAsNonRoot"`
|
||||
}
|
||||
|
||||
type ProbesConfig struct {
|
||||
Port int `yaml:"port"`
|
||||
StartupFailureThreshold int `yaml:"startupFailureThreshold"`
|
||||
}
|
||||
|
||||
type WarmPoolConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
Size int `yaml:"size"`
|
||||
TTLMinutes int `yaml:"ttlMinutes"`
|
||||
}
|
||||
|
||||
type NetworkPolicy struct {
|
||||
EgressAllowAll bool `yaml:"egressAllowAll"`
|
||||
IngressPorts []int `yaml:"ingressPorts"`
|
||||
}
|
||||
|
||||
// Load reads and parses the config file at the given path.
|
||||
func Load(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config file: %w", err)
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parsing config file: %w", err)
|
||||
}
|
||||
|
||||
if err := Validate(&cfg); err != nil {
|
||||
return nil, fmt.Errorf("validating config: %w", err)
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
99
internal/config/validate.go
Normal file
99
internal/config/validate.go
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Validate checks that all required fields are present and values are valid.
|
||||
func Validate(cfg *Config) error {
|
||||
var errs []string
|
||||
|
||||
if cfg.Namespace == "" {
|
||||
errs = append(errs, "namespace is required")
|
||||
}
|
||||
|
||||
// Compute validation
|
||||
switch cfg.Compute.Type {
|
||||
case "karpenter":
|
||||
if len(cfg.Compute.InstanceTypes) == 0 {
|
||||
errs = append(errs, "compute.instanceTypes is required when type is karpenter")
|
||||
}
|
||||
if len(cfg.Compute.CapacityTypes) == 0 {
|
||||
errs = append(errs, "compute.capacityTypes is required when type is karpenter")
|
||||
}
|
||||
if cfg.Compute.MaxCPU <= 0 {
|
||||
errs = append(errs, "compute.maxCpu must be > 0")
|
||||
}
|
||||
if cfg.Compute.MaxMemory == "" {
|
||||
errs = append(errs, "compute.maxMemory is required when type is karpenter")
|
||||
}
|
||||
case "fargate":
|
||||
if len(cfg.Compute.FargateSelectors) == 0 {
|
||||
errs = append(errs, "compute.fargateSelectors is required when type is fargate")
|
||||
}
|
||||
case "":
|
||||
errs = append(errs, "compute.type is required (karpenter or fargate)")
|
||||
default:
|
||||
errs = append(errs, fmt.Sprintf("compute.type must be karpenter or fargate, got %q", cfg.Compute.Type))
|
||||
}
|
||||
|
||||
// Storage validation
|
||||
if cfg.Storage.Type == "" {
|
||||
errs = append(errs, "storage.type is required")
|
||||
} else if cfg.Storage.Type != "efs" {
|
||||
errs = append(errs, fmt.Sprintf("storage.type must be efs, got %q", cfg.Storage.Type))
|
||||
}
|
||||
if cfg.Storage.FilesystemID == "" {
|
||||
errs = append(errs, "storage.filesystemId is required")
|
||||
}
|
||||
if cfg.Storage.BasePath == "" {
|
||||
errs = append(errs, "storage.basePath is required")
|
||||
}
|
||||
if cfg.Storage.ReclaimPolicy == "" {
|
||||
cfg.Storage.ReclaimPolicy = "Retain"
|
||||
} else if cfg.Storage.ReclaimPolicy != "Retain" && cfg.Storage.ReclaimPolicy != "Delete" {
|
||||
errs = append(errs, fmt.Sprintf("storage.reclaimPolicy must be Retain or Delete, got %q", cfg.Storage.ReclaimPolicy))
|
||||
}
|
||||
|
||||
// Storage defaults
|
||||
if cfg.Storage.UID == 0 {
|
||||
cfg.Storage.UID = 1000
|
||||
}
|
||||
if cfg.Storage.GID == 0 {
|
||||
cfg.Storage.GID = 1000
|
||||
}
|
||||
|
||||
// Sandbox validation
|
||||
if cfg.Sandbox.Image == "" {
|
||||
errs = append(errs, "sandbox.image is required")
|
||||
}
|
||||
if len(cfg.Sandbox.Ports) == 0 {
|
||||
errs = append(errs, "sandbox.ports is required")
|
||||
}
|
||||
if cfg.Sandbox.MountPath == "" {
|
||||
errs = append(errs, "sandbox.mountPath is required")
|
||||
}
|
||||
|
||||
// Warm pool defaults
|
||||
if cfg.Sandbox.WarmPool.Size == 0 && cfg.Sandbox.WarmPool.Enabled {
|
||||
cfg.Sandbox.WarmPool.Size = 5
|
||||
}
|
||||
if cfg.Sandbox.WarmPool.TTLMinutes == 0 {
|
||||
cfg.Sandbox.WarmPool.TTLMinutes = 120
|
||||
}
|
||||
|
||||
// Probes defaults
|
||||
if cfg.Sandbox.Probes.Port == 0 && len(cfg.Sandbox.Ports) > 0 {
|
||||
cfg.Sandbox.Probes.Port = cfg.Sandbox.Ports[0]
|
||||
}
|
||||
if cfg.Sandbox.Probes.StartupFailureThreshold == 0 {
|
||||
cfg.Sandbox.Probes.StartupFailureThreshold = 30
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("config validation errors:\n - %s", strings.Join(errs, "\n - "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
92
internal/kube/apply.go
Normal file
92
internal/kube/apply.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/restmapper"
|
||||
yamlserializer "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
|
||||
)
|
||||
|
||||
// ServerSideApply splits a multi-document YAML into individual resources
|
||||
// and applies each one using server-side apply with the "agentikube" field manager.
|
||||
func (c *Client) ServerSideApply(ctx context.Context, manifests []byte) error {
|
||||
decoder := k8syaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifests), 4096)
|
||||
|
||||
discoveryClient, ok := c.Clientset().Discovery().(*discovery.DiscoveryClient)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to get discovery client")
|
||||
}
|
||||
cachedDiscovery := memory.NewMemCacheClient(discoveryClient)
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery)
|
||||
|
||||
deserializer := yamlserializer.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
|
||||
|
||||
for {
|
||||
var rawObj unstructured.Unstructured
|
||||
if err := decoder.Decode(&rawObj); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("decoding YAML document: %w", err)
|
||||
}
|
||||
|
||||
// Skip empty documents
|
||||
if len(rawObj.Object) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Re-encode to JSON for the patch body
|
||||
rawJSON, err := rawObj.MarshalJSON()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling to JSON: %w", err)
|
||||
}
|
||||
|
||||
// Decode to get the GVK
|
||||
obj := &unstructured.Unstructured{}
|
||||
_, gvk, err := deserializer.Decode(rawJSON, nil, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deserializing object: %w", err)
|
||||
}
|
||||
|
||||
// Map GVK to GVR using the REST mapper
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mapping GVK %s to GVR: %w", gvk.String(), err)
|
||||
}
|
||||
|
||||
gvr := mapping.Resource
|
||||
name := obj.GetName()
|
||||
namespace := obj.GetNamespace()
|
||||
|
||||
applyOpts := metav1.ApplyOptions{
|
||||
FieldManager: "agentikube",
|
||||
}
|
||||
|
||||
// Apply using the dynamic client - handle namespaced vs cluster-scoped
|
||||
if namespace != "" {
|
||||
_, err = c.Dynamic().Resource(gvr).Namespace(namespace).Patch(
|
||||
ctx, name, types.ApplyPatchType, rawJSON, applyOpts.ToPatchOptions(),
|
||||
)
|
||||
} else {
|
||||
_, err = c.Dynamic().Resource(gvr).Patch(
|
||||
ctx, name, types.ApplyPatchType, rawJSON, applyOpts.ToPatchOptions(),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying %s/%s: %w", gvk.Kind, name, err)
|
||||
}
|
||||
|
||||
fmt.Printf("applied %s/%s\n", gvk.Kind, name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
76
internal/kube/client.go
Normal file
76
internal/kube/client.go
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// Client wraps the Kubernetes dynamic client, typed clientset, and REST config.
|
||||
type Client struct {
|
||||
dynamic dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
restConfig *rest.Config
|
||||
}
|
||||
|
||||
func (c *Client) Dynamic() dynamic.Interface { return c.dynamic }
|
||||
func (c *Client) Clientset() kubernetes.Interface { return c.clientset }
|
||||
func (c *Client) RestConfig() *rest.Config { return c.restConfig }
|
||||
|
||||
// NewClient creates a Kubernetes client using the default kubeconfig loading
|
||||
// rules (KUBECONFIG env var or ~/.kube/config).
|
||||
func NewClient() (*Client, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
restConfig, err := kubeConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating dynamic client: %w", err)
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating clientset: %w", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
dynamic: dynamicClient,
|
||||
clientset: clientset,
|
||||
restConfig: restConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EnsureNamespace creates the namespace if it does not already exist.
|
||||
func (c *Client) EnsureNamespace(ctx context.Context, name string) error {
|
||||
_, err := c.clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
return fmt.Errorf("checking namespace %q: %w", name, err)
|
||||
}
|
||||
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
_, err = c.clientset.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating namespace %q: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
24
internal/kube/exec.go
Normal file
24
internal/kube/exec.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Exec runs kubectl exec to attach an interactive terminal to the specified
|
||||
// pod. If command is empty, it defaults to /bin/sh.
|
||||
func Exec(namespace, podName string, command []string) error {
|
||||
if len(command) == 0 {
|
||||
command = []string{"/bin/sh"}
|
||||
}
|
||||
|
||||
args := []string{"exec", "-it", "-n", namespace, podName, "--"}
|
||||
args = append(args, command...)
|
||||
|
||||
cmd := exec.Command("kubectl", args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
81
internal/kube/wait.go
Normal file
81
internal/kube/wait.go
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
// WaitForReady watches a resource in the agentsandbox.dev/v1 group until its
|
||||
// Ready condition becomes True or the context is cancelled/times out.
|
||||
// The resource parameter is the plural resource name (e.g. "sandboxclaims", "sandboxwarmpools").
|
||||
func (c *Client) WaitForReady(ctx context.Context, namespace, resource, name string) error {
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: resource,
|
||||
}
|
||||
|
||||
watcher, err := c.Dynamic().Resource(gvr).Namespace(namespace).Watch(ctx, metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("watching %s %s/%s: %w", resource, namespace, name, err)
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timed out waiting for %s %s/%s to become ready", resource, namespace, name)
|
||||
case event, ok := <-watcher.ResultChan():
|
||||
if !ok {
|
||||
return fmt.Errorf("watch channel closed for %s %s/%s", resource, namespace, name)
|
||||
}
|
||||
if event.Type == watch.Error {
|
||||
return fmt.Errorf("watch error for %s %s/%s", resource, namespace, name)
|
||||
}
|
||||
|
||||
obj, ok := event.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if isReady(obj) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isReady checks whether an unstructured object has a condition with
|
||||
// type=Ready and status=True.
|
||||
func isReady(obj *unstructured.Unstructured) bool {
|
||||
status, found, err := unstructured.NestedMap(obj.Object, "status")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
|
||||
conditionsRaw, found, err := unstructured.NestedSlice(status, "conditions")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range conditionsRaw {
|
||||
condition, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
condType, _ := condition["type"].(string)
|
||||
condStatus, _ := condition["status"].(string)
|
||||
if condType == "Ready" && condStatus == "True" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
51
internal/manifest/generate.go
Normal file
51
internal/manifest/generate.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/rathi/agentikube/internal/config"
|
||||
)
|
||||
|
||||
// Generate renders all applicable Kubernetes manifests from the embedded
|
||||
// templates using the provided configuration. Templates are selected based
|
||||
// on the compute type and warm pool settings.
|
||||
func Generate(cfg *config.Config) ([]byte, error) {
|
||||
tmpl, err := template.ParseFS(templateFS, "templates/*.yaml.tmpl")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing templates: %w", err)
|
||||
}
|
||||
|
||||
// Always-rendered templates
|
||||
names := []string{
|
||||
"namespace.yaml.tmpl",
|
||||
"storageclass-efs.yaml.tmpl",
|
||||
"sandbox-template.yaml.tmpl",
|
||||
}
|
||||
|
||||
// Conditionally add Karpenter templates
|
||||
if cfg.Compute.Type == "karpenter" {
|
||||
names = append(names,
|
||||
"karpenter-nodepool.yaml.tmpl",
|
||||
"karpenter-ec2nodeclass.yaml.tmpl",
|
||||
)
|
||||
}
|
||||
|
||||
// Conditionally add warm pool template
|
||||
if cfg.Sandbox.WarmPool.Enabled {
|
||||
names = append(names, "warm-pool.yaml.tmpl")
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
for i, name := range names {
|
||||
if i > 0 {
|
||||
out.WriteString("---\n")
|
||||
}
|
||||
if err := tmpl.ExecuteTemplate(&out, name, cfg); err != nil {
|
||||
return nil, fmt.Errorf("rendering template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
6
internal/manifest/templates.go
Normal file
6
internal/manifest/templates.go
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
package manifest
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed templates/*.yaml.tmpl
|
||||
var templateFS embed.FS
|
||||
14
internal/manifest/templates/karpenter-ec2nodeclass.yaml.tmpl
Normal file
14
internal/manifest/templates/karpenter-ec2nodeclass.yaml.tmpl
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: karpenter.k8s.aws/v1
|
||||
kind: EC2NodeClass
|
||||
metadata:
|
||||
name: sandbox-nodes
|
||||
spec:
|
||||
amiSelectorTerms:
|
||||
- alias: "al2023@latest"
|
||||
subnetSelectorTerms:
|
||||
- tags:
|
||||
karpenter.sh/discovery: "{{ .Namespace }}-cluster"
|
||||
securityGroupSelectorTerms:
|
||||
- tags:
|
||||
karpenter.sh/discovery: "{{ .Namespace }}-cluster"
|
||||
role: "KarpenterNodeRole-{{ .Namespace }}-cluster"
|
||||
33
internal/manifest/templates/karpenter-nodepool.yaml.tmpl
Normal file
33
internal/manifest/templates/karpenter-nodepool.yaml.tmpl
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
apiVersion: karpenter.sh/v1
|
||||
kind: NodePool
|
||||
metadata:
|
||||
name: sandbox-pool
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
requirements:
|
||||
- key: node.kubernetes.io/instance-type
|
||||
operator: In
|
||||
values:
|
||||
{{- range .Compute.InstanceTypes }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
- key: karpenter.sh/capacity-type
|
||||
operator: In
|
||||
values:
|
||||
{{- range .Compute.CapacityTypes }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
- key: kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
nodeClassRef:
|
||||
name: sandbox-nodes
|
||||
group: karpenter.k8s.aws
|
||||
kind: EC2NodeClass
|
||||
limits:
|
||||
cpu: {{ .Compute.MaxCPU }}
|
||||
memory: {{ .Compute.MaxMemory }}
|
||||
disruption:
|
||||
consolidationPolicy: {{ if .Compute.Consolidation }}WhenEmptyOrUnderutilized{{ else }}WhenEmpty{{ end }}
|
||||
4
internal/manifest/templates/namespace.yaml.tmpl
Normal file
4
internal/manifest/templates/namespace.yaml.tmpl
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Namespace }}
|
||||
66
internal/manifest/templates/sandbox-template.yaml.tmpl
Normal file
66
internal/manifest/templates/sandbox-template.yaml.tmpl
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
apiVersion: agentsandbox.dev/v1
|
||||
kind: SandboxTemplate
|
||||
metadata:
|
||||
name: sandbox-template
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: sandbox
|
||||
image: {{ .Sandbox.Image }}
|
||||
ports:
|
||||
{{- range .Sandbox.Ports }}
|
||||
- containerPort: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Sandbox.Resources.Requests.CPU }}
|
||||
memory: {{ .Sandbox.Resources.Requests.Memory }}
|
||||
limits:
|
||||
cpu: {{ .Sandbox.Resources.Limits.CPU }}
|
||||
memory: {{ .Sandbox.Resources.Limits.Memory }}
|
||||
securityContext:
|
||||
runAsUser: {{ .Sandbox.SecurityContext.RunAsUser }}
|
||||
runAsGroup: {{ .Sandbox.SecurityContext.RunAsGroup }}
|
||||
runAsNonRoot: {{ .Sandbox.SecurityContext.RunAsNonRoot }}
|
||||
env:
|
||||
{{- range $key, $value := .Sandbox.Env }}
|
||||
- name: {{ $key }}
|
||||
value: "{{ $value }}"
|
||||
{{- end }}
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: {{ .Sandbox.Probes.Port }}
|
||||
failureThreshold: {{ .Sandbox.Probes.StartupFailureThreshold }}
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: {{ .Sandbox.Probes.Port }}
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: {{ .Sandbox.MountPath }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: workspace
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
storageClassName: efs-sandbox
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
networkPolicy:
|
||||
egress:
|
||||
{{- if .Sandbox.NetworkPolicy.EgressAllowAll }}
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
{{- end }}
|
||||
ingress:
|
||||
{{- range .Sandbox.NetworkPolicy.IngressPorts }}
|
||||
- ports:
|
||||
- port: {{ . }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
14
internal/manifest/templates/storageclass-efs.yaml.tmpl
Normal file
14
internal/manifest/templates/storageclass-efs.yaml.tmpl
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: efs-sandbox
|
||||
provisioner: efs.csi.aws.com
|
||||
parameters:
|
||||
provisioningMode: efs-ap
|
||||
fileSystemId: {{ .Storage.FilesystemID }}
|
||||
directoryPerms: "755"
|
||||
uid: "{{ .Storage.UID }}"
|
||||
gid: "{{ .Storage.GID }}"
|
||||
basePath: {{ .Storage.BasePath }}
|
||||
reclaimPolicy: {{ .Storage.ReclaimPolicy }}
|
||||
volumeBindingMode: Immediate
|
||||
10
internal/manifest/templates/warm-pool.yaml.tmpl
Normal file
10
internal/manifest/templates/warm-pool.yaml.tmpl
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: agentsandbox.dev/v1
|
||||
kind: SandboxWarmPool
|
||||
metadata:
|
||||
name: sandbox-warm-pool
|
||||
namespace: {{ .Namespace }}
|
||||
spec:
|
||||
templateRef:
|
||||
name: sandbox-template
|
||||
replicas: {{ .Sandbox.WarmPool.Size }}
|
||||
ttlMinutes: {{ .Sandbox.WarmPool.TTLMinutes }}
|
||||
Loading…
Add table
Add a link
Reference in a new issue