mirror of
https://github.com/harivansh-afk/agentikube.git
synced 2026-04-20 03:00:28 +00:00
init
This commit is contained in:
commit
0595d93c49
28 changed files with 1763 additions and 0 deletions
117
internal/commands/create.go
Normal file
117
internal/commands/create.go
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewCreateCmd() *cobra.Command {
|
||||
var provider string
|
||||
var apiKey string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create <handle>",
|
||||
Short: "Create a new sandbox for an agent",
|
||||
Long: "Creates a Secret and SandboxClaim for the given handle, then waits for it to be ready.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
// Create the secret with provider credentials
|
||||
secret := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
},
|
||||
"stringData": map[string]interface{}{
|
||||
"PROVIDER": provider,
|
||||
"PROVIDER_KEY": apiKey,
|
||||
"USER_NAME": handle,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
_, err = client.Dynamic().Resource(secretGVR).Namespace(ns).Create(ctx, secret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating secret %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] secret %q created\n", name)
|
||||
|
||||
// Create the SandboxClaim
|
||||
claim := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "agentsandbox.dev/v1",
|
||||
"kind": "SandboxClaim",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"templateRef": map[string]interface{}{
|
||||
"name": "sandbox-template",
|
||||
},
|
||||
"secretRef": map[string]interface{}{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
_, err = client.Dynamic().Resource(claimGVR).Namespace(ns).Create(ctx, claim, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] SandboxClaim %q created\n", name)
|
||||
|
||||
// Wait for the sandbox to become ready (3 min timeout)
|
||||
fmt.Println("waiting for sandbox to be ready...")
|
||||
waitCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
if err := client.WaitForReady(waitCtx, ns, "sandboxclaims", name); err != nil {
|
||||
return fmt.Errorf("waiting for sandbox: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nsandbox %q is ready\n", handle)
|
||||
fmt.Printf(" name: %s\n", name)
|
||||
fmt.Printf(" namespace: %s\n", ns)
|
||||
fmt.Printf(" ssh: agentikube ssh %s\n", handle)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&provider, "provider", "", "LLM provider name (env: SANDBOX_LLM_PROVIDER)")
|
||||
cmd.Flags().StringVar(&apiKey, "api-key", "", "LLM provider API key (env: SANDBOX_API_KEY)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
94
internal/commands/destroy.go
Normal file
94
internal/commands/destroy.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
func NewDestroyCmd() *cobra.Command {
|
||||
var yes bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "destroy <handle>",
|
||||
Short: "Destroy a sandbox and its resources",
|
||||
Long: "Deletes the SandboxClaim, Secret, and PVC for the given handle.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
if !yes {
|
||||
fmt.Printf("are you sure you want to destroy sandbox %q? [y/N] ", handle)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Scan()
|
||||
answer := strings.TrimSpace(strings.ToLower(scanner.Text()))
|
||||
if answer != "y" && answer != "yes" {
|
||||
fmt.Println("aborted")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
pvcGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
|
||||
|
||||
// Delete SandboxClaim
|
||||
err = client.Dynamic().Resource(claimGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] SandboxClaim %q deleted\n", name)
|
||||
|
||||
// Delete Secret
|
||||
err = client.Dynamic().Resource(secretGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting Secret %q: %w", name, err)
|
||||
}
|
||||
fmt.Printf("[ok] Secret %q deleted\n", name)
|
||||
|
||||
// Delete PVC (best-effort)
|
||||
err = client.Dynamic().Resource(pvcGVR).Namespace(ns).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
fmt.Printf("[warn] could not delete PVC %q: %v\n", name, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[ok] PVC %q deleted\n", name)
|
||||
}
|
||||
|
||||
fmt.Printf("\nsandbox %q destroyed\n", handle)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&yes, "yes", false, "skip confirmation prompt")
|
||||
|
||||
return cmd
|
||||
}
|
||||
65
internal/commands/down.go
Normal file
65
internal/commands/down.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewDownCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "down",
|
||||
Short: "Remove sandbox infrastructure (preserves user sandboxes)",
|
||||
Long: "Deletes the SandboxWarmPool and SandboxTemplate. User sandboxes are preserved.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
|
||||
warmPoolGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxwarmpools",
|
||||
}
|
||||
|
||||
templateGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxtemplates",
|
||||
}
|
||||
|
||||
err = client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Delete(ctx, "sandbox-warm-pool", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[warn] could not delete SandboxWarmPool: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("[ok] SandboxWarmPool deleted")
|
||||
}
|
||||
|
||||
err = client.Dynamic().Resource(templateGVR).Namespace(ns).Delete(ctx, "sandbox-template", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[warn] could not delete SandboxTemplate: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("[ok] SandboxTemplate deleted")
|
||||
}
|
||||
|
||||
fmt.Println("\nwarm pool and template deleted. User sandboxes are preserved.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
11
internal/commands/helpers.go
Normal file
11
internal/commands/helpers.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/rathi/agentikube/internal/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func loadConfig(cmd *cobra.Command) (*config.Config, error) {
|
||||
cfgPath, _ := cmd.Flags().GetString("config")
|
||||
return config.Load(cfgPath)
|
||||
}
|
||||
97
internal/commands/init.go
Normal file
97
internal/commands/init.go
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const crdInstallURL = "https://raw.githubusercontent.com/agent-sandbox/agent-sandbox/main/deploy/install.yaml"
|
||||
|
||||
func NewInitCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize the cluster for agent sandboxes",
|
||||
Long: "Checks prerequisites, installs CRDs, and creates the target namespace.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check kubectl context
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] connected to Kubernetes cluster")
|
||||
|
||||
// Apply agent-sandbox CRDs
|
||||
fmt.Println("applying agent-sandbox CRDs...")
|
||||
out, err := exec.CommandContext(ctx, "kubectl", "apply", "-f", crdInstallURL).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying CRDs: %s: %w", strings.TrimSpace(string(out)), err)
|
||||
}
|
||||
fmt.Println("[ok] agent-sandbox CRDs applied")
|
||||
|
||||
// Check for EFS CSI driver
|
||||
dsList, err := client.Clientset().AppsV1().DaemonSets("kube-system").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing daemonsets in kube-system: %w", err)
|
||||
}
|
||||
efsFound := false
|
||||
for _, ds := range dsList.Items {
|
||||
if strings.Contains(ds.Name, "efs-csi") {
|
||||
efsFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if efsFound {
|
||||
fmt.Println("[ok] EFS CSI driver found")
|
||||
} else {
|
||||
fmt.Println("[warn] EFS CSI driver not found - install it before using EFS storage")
|
||||
}
|
||||
|
||||
// Check for Karpenter
|
||||
karpenterFound := false
|
||||
for _, ns := range []string{"karpenter", "kube-system"} {
|
||||
depList, err := client.Clientset().AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, dep := range depList.Items {
|
||||
if strings.Contains(dep.Name, "karpenter") {
|
||||
karpenterFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if karpenterFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
if karpenterFound {
|
||||
fmt.Println("[ok] Karpenter found")
|
||||
} else {
|
||||
fmt.Println("[warn] Karpenter not found - required if compute.type is karpenter")
|
||||
}
|
||||
|
||||
// Create namespace if it does not exist
|
||||
if err := client.EnsureNamespace(ctx, cfg.Namespace); err != nil {
|
||||
return fmt.Errorf("creating namespace %q: %w", cfg.Namespace, err)
|
||||
}
|
||||
fmt.Printf("[ok] namespace %q ready\n", cfg.Namespace)
|
||||
|
||||
fmt.Println("\ninit complete")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
138
internal/commands/list.go
Normal file
138
internal/commands/list.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewListCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all sandboxes",
|
||||
Long: "Lists all SandboxClaims in the configured namespace.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
list, err := client.Dynamic().Resource(claimGVR).Namespace(cfg.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing SandboxClaims: %w", err)
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0)
|
||||
fmt.Fprintln(w, "HANDLE\tSTATUS\tAGE\tPOD")
|
||||
|
||||
for _, item := range list.Items {
|
||||
name := item.GetName()
|
||||
handle := name
|
||||
if len(name) > 8 && name[:8] == "sandbox-" {
|
||||
handle = name[8:]
|
||||
}
|
||||
|
||||
status := extractStatus(item.Object)
|
||||
podName := extractPodName(item.Object)
|
||||
age := formatAge(item.GetCreationTimestamp().Time)
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", handle, status, age, podName)
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func extractStatus(obj map[string]interface{}) string {
|
||||
status, ok := obj["status"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
conditions, ok := status["conditions"].([]interface{})
|
||||
if !ok || len(conditions) == 0 {
|
||||
return "Pending"
|
||||
}
|
||||
|
||||
// Look for the Ready condition
|
||||
for _, c := range conditions {
|
||||
cond, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
condType, _ := cond["type"].(string)
|
||||
condStatus, _ := cond["status"].(string)
|
||||
if condType == "Ready" {
|
||||
if condStatus == "True" {
|
||||
return "Ready"
|
||||
}
|
||||
reason, _ := cond["reason"].(string)
|
||||
if reason != "" {
|
||||
return reason
|
||||
}
|
||||
return "NotReady"
|
||||
}
|
||||
}
|
||||
|
||||
return "Pending"
|
||||
}
|
||||
|
||||
func extractPodName(obj map[string]interface{}) string {
|
||||
status, ok := obj["status"].(map[string]interface{})
|
||||
if ok {
|
||||
if podName, ok := status["podName"].(string); ok && podName != "" {
|
||||
return podName
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to annotations
|
||||
metadata, ok := obj["metadata"].(map[string]interface{})
|
||||
if ok {
|
||||
annotations, ok := metadata["annotations"].(map[string]interface{})
|
||||
if ok {
|
||||
if podName, ok := annotations["agentsandbox.dev/pod-name"].(string); ok {
|
||||
return podName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "-"
|
||||
}
|
||||
|
||||
func formatAge(created time.Time) string {
|
||||
d := time.Since(created)
|
||||
switch {
|
||||
case d < time.Minute:
|
||||
return fmt.Sprintf("%ds", int(d.Seconds()))
|
||||
case d < time.Hour:
|
||||
return fmt.Sprintf("%dm", int(d.Minutes()))
|
||||
case d < 24*time.Hour:
|
||||
return fmt.Sprintf("%dh", int(d.Hours()))
|
||||
default:
|
||||
return fmt.Sprintf("%dd", int(d.Hours()/24))
|
||||
}
|
||||
}
|
||||
58
internal/commands/ssh.go
Normal file
58
internal/commands/ssh.go
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewSSHCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh <handle>",
|
||||
Short: "Open a shell into a sandbox",
|
||||
Long: "Exec into the sandbox pod for the given handle.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
handle := args[0]
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
name := "sandbox-" + handle
|
||||
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
claim, err := client.Dynamic().Resource(claimGVR).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting SandboxClaim %q: %w", name, err)
|
||||
}
|
||||
|
||||
podName := extractPodName(claim.Object)
|
||||
if podName == "-" || podName == "" {
|
||||
return fmt.Errorf("sandbox %q does not have a pod assigned yet", handle)
|
||||
}
|
||||
|
||||
fmt.Printf("connecting to pod %s...\n", podName)
|
||||
return kube.Exec(ns, podName, []string{"/bin/sh"})
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
108
internal/commands/status.go
Normal file
108
internal/commands/status.go
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func NewStatusCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show cluster and sandbox status",
|
||||
Long: "Displays warm pool status, sandbox counts, and compute node information.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
ns := cfg.Namespace
|
||||
|
||||
// Warm pool status
|
||||
warmPoolGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxwarmpools",
|
||||
}
|
||||
|
||||
wp, err := client.Dynamic().Resource(warmPoolGVR).Namespace(ns).Get(ctx, "sandbox-warm-pool", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("warm pool: not found (%v)\n", err)
|
||||
} else {
|
||||
spec, _ := wp.Object["spec"].(map[string]interface{})
|
||||
status, _ := wp.Object["status"].(map[string]interface{})
|
||||
|
||||
replicas := getInt64(spec, "replicas")
|
||||
readyReplicas := getInt64(status, "readyReplicas")
|
||||
pendingReplicas := getInt64(status, "pendingReplicas")
|
||||
|
||||
fmt.Println("warm pool:")
|
||||
fmt.Printf(" desired: %d\n", replicas)
|
||||
fmt.Printf(" ready: %d\n", readyReplicas)
|
||||
fmt.Printf(" pending: %d\n", pendingReplicas)
|
||||
}
|
||||
|
||||
// Sandbox count
|
||||
claimGVR := schema.GroupVersionResource{
|
||||
Group: "agentsandbox.dev",
|
||||
Version: "v1",
|
||||
Resource: "sandboxclaims",
|
||||
}
|
||||
|
||||
claims, err := client.Dynamic().Resource(claimGVR).Namespace(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("\nsandboxes: error listing (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nsandboxes: %d\n", len(claims.Items))
|
||||
}
|
||||
|
||||
// Karpenter nodes (if applicable)
|
||||
if cfg.Compute.Type == "karpenter" {
|
||||
nodes, err := client.Clientset().CoreV1().Nodes().List(ctx, metav1.ListOptions{
|
||||
LabelSelector: "karpenter.sh/nodepool",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("\nkarpenter nodes: error listing (%v)\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nkarpenter nodes: %d\n", len(nodes.Items))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getInt64(m map[string]interface{}, key string) int64 {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
switch n := v.(type) {
|
||||
case int64:
|
||||
return n
|
||||
case float64:
|
||||
return int64(n)
|
||||
case int:
|
||||
return int64(n)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
63
internal/commands/up.go
Normal file
63
internal/commands/up.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rathi/agentikube/internal/kube"
|
||||
"github.com/rathi/agentikube/internal/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUpCmd() *cobra.Command {
|
||||
var dryRun bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "up",
|
||||
Short: "Apply sandbox infrastructure to the cluster",
|
||||
Long: "Generates and applies all sandbox manifests (templates, warm pool, storage, compute).",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg, err := loadConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifests, err := manifest.Generate(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating manifests: %w", err)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Print(string(manifests))
|
||||
return nil
|
||||
}
|
||||
|
||||
client, err := kube.NewClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connecting to cluster: %w", err)
|
||||
}
|
||||
|
||||
if err := client.ServerSideApply(ctx, manifests); err != nil {
|
||||
return fmt.Errorf("applying manifests: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] manifests applied")
|
||||
|
||||
if cfg.Sandbox.WarmPool.Enabled {
|
||||
fmt.Println("waiting for warm pool to become ready...")
|
||||
if err := client.WaitForReady(ctx, cfg.Namespace, "sandboxwarmpools", "sandbox-warm-pool"); err != nil {
|
||||
return fmt.Errorf("waiting for warm pool: %w", err)
|
||||
}
|
||||
fmt.Println("[ok] warm pool ready")
|
||||
}
|
||||
|
||||
fmt.Println("\ninfrastructure is up")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&dryRun, "dry-run", false, "print manifests to stdout without applying")
|
||||
|
||||
return cmd
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue