mirror of
https://github.com/getcompanion-ai/computer-host.git
synced 2026-04-15 03:00:42 +00:00
* feat: add Firecracker API client methods for VM pause/resume and snapshots
Add PatchVm, GetVm, PutSnapshotCreate, and PutSnapshotLoad methods to the
API client, along with supporting types (VmState, SnapshotCreateParams,
SnapshotLoadParams, MemBackend).
* feat: add snapshot data layer - contract types, model, store, config
Add SnapshotID and snapshot contract types, SnapshotRecord model,
store interface CRUD methods with file store implementation,
snapshot paths helper, SnapshotsDir config, and directory creation.
* feat: add runtime methods for VM pause, resume, snapshot, and restore
Implement Pause, Resume, CreateSnapshot, and RestoreBoot on the
firecracker Runtime. RestoreBoot launches a jailer, stages snapshot
files into the chroot, loads the snapshot, and resumes the VM.
* feat: add daemon snapshot create, restore, and reconciliation logic
Implement CreateSnapshot (pause, snapshot, COW-copy disk, resume),
RestoreSnapshot (COW-copy disk, RestoreBoot, wait for guest),
GetSnapshot, ListSnapshots, DeleteSnapshotByID, and crash recovery
reconciliation for snapshot and restore operations.
* feat: add HTTP endpoints for snapshot create, get, list, delete, restore
Wire 5 snapshot routes: POST /machines/{id}/snapshots (create),
GET /machines/{id}/snapshots (list), GET /snapshots/{id} (get),
DELETE /snapshots/{id} (delete), POST /snapshots/{id}/restore (restore).
* fix: cross-device rename, restore network, and snapshot cleanup
- Replace os.Rename with copy+remove for moving snapshot files out of
/proc/<pid>/root/ (cross-device link error on Linux)
- Reconfigure network interface after snapshot load so the restored VM
uses its own tap device instead of the source VM's
- Clean partial snapshot dirs immediately on failure instead of only
via reconcile
- Reject snapshot requests while a machine operation is already pending
* fix: test and modify snapshot runtime
* feat: snapshot lifecycle update, align runtime issues between host image
and daemon
368 lines
10 KiB
Go
368 lines
10 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/getcompanion-ai/computer-host/internal/firecracker"
|
|
"github.com/getcompanion-ai/computer-host/internal/model"
|
|
"github.com/getcompanion-ai/computer-host/internal/store"
|
|
contracthost "github.com/getcompanion-ai/computer-host/contract"
|
|
)
|
|
|
|
func (d *Daemon) GetMachine(ctx context.Context, id contracthost.MachineID) (*contracthost.GetMachineResponse, error) {
|
|
record, err := d.reconcileMachine(ctx, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return &contracthost.GetMachineResponse{Machine: machineToContract(*record)}, nil
|
|
}
|
|
|
|
func (d *Daemon) ListMachines(ctx context.Context) (*contracthost.ListMachinesResponse, error) {
|
|
records, err := d.store.ListMachines(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
machines := make([]contracthost.Machine, 0, len(records))
|
|
for _, record := range records {
|
|
reconciled, err := d.reconcileMachine(ctx, record.ID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
machines = append(machines, machineToContract(*reconciled))
|
|
}
|
|
return &contracthost.ListMachinesResponse{Machines: machines}, nil
|
|
}
|
|
|
|
func (d *Daemon) StopMachine(ctx context.Context, id contracthost.MachineID) error {
|
|
unlock := d.lockMachine(id)
|
|
defer unlock()
|
|
|
|
record, err := d.store.GetMachine(ctx, id)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if record.Phase == contracthost.MachinePhaseStopped {
|
|
return nil
|
|
}
|
|
|
|
if err := d.store.UpsertOperation(ctx, model.OperationRecord{
|
|
MachineID: id,
|
|
Type: model.MachineOperationStop,
|
|
StartedAt: time.Now().UTC(),
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
clearOperation := false
|
|
defer func() {
|
|
if clearOperation {
|
|
_ = d.store.DeleteOperation(context.Background(), id)
|
|
}
|
|
}()
|
|
|
|
if err := d.stopMachineRecord(ctx, record); err != nil {
|
|
return err
|
|
}
|
|
|
|
clearOperation = true
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) DeleteMachine(ctx context.Context, id contracthost.MachineID) error {
|
|
unlock := d.lockMachine(id)
|
|
defer unlock()
|
|
|
|
record, err := d.store.GetMachine(ctx, id)
|
|
if err == store.ErrNotFound {
|
|
return nil
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := d.store.UpsertOperation(ctx, model.OperationRecord{
|
|
MachineID: id,
|
|
Type: model.MachineOperationDelete,
|
|
StartedAt: time.Now().UTC(),
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
clearOperation := false
|
|
defer func() {
|
|
if clearOperation {
|
|
_ = d.store.DeleteOperation(context.Background(), id)
|
|
}
|
|
}()
|
|
|
|
if err := d.deleteMachineRecord(ctx, record); err != nil {
|
|
return err
|
|
}
|
|
|
|
clearOperation = true
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) Reconcile(ctx context.Context) error {
|
|
operations, err := d.store.ListOperations(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, operation := range operations {
|
|
switch operation.Type {
|
|
case model.MachineOperationCreate:
|
|
if err := d.reconcileCreate(ctx, operation.MachineID); err != nil {
|
|
return err
|
|
}
|
|
case model.MachineOperationStop:
|
|
if err := d.reconcileStop(ctx, operation.MachineID); err != nil {
|
|
return err
|
|
}
|
|
case model.MachineOperationDelete:
|
|
if err := d.reconcileDelete(ctx, operation.MachineID); err != nil {
|
|
return err
|
|
}
|
|
case model.MachineOperationSnapshot:
|
|
if err := d.reconcileSnapshot(ctx, operation); err != nil {
|
|
return err
|
|
}
|
|
case model.MachineOperationRestore:
|
|
if err := d.reconcileRestore(ctx, operation); err != nil {
|
|
return err
|
|
}
|
|
default:
|
|
return fmt.Errorf("unsupported operation type %q", operation.Type)
|
|
}
|
|
}
|
|
|
|
records, err := d.store.ListMachines(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, record := range records {
|
|
if _, err := d.reconcileMachine(ctx, record.ID); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) listRunningNetworks(ctx context.Context, ignore contracthost.MachineID) ([]firecracker.NetworkAllocation, error) {
|
|
records, err := d.store.ListMachines(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
networks := make([]firecracker.NetworkAllocation, 0, len(records))
|
|
for _, record := range records {
|
|
if record.ID == ignore || record.Phase != contracthost.MachinePhaseRunning {
|
|
continue
|
|
}
|
|
if strings.TrimSpace(record.RuntimeHost) == "" || strings.TrimSpace(record.TapDevice) == "" {
|
|
continue
|
|
}
|
|
network, err := firecracker.AllocationFromGuestIP(record.RuntimeHost, record.TapDevice)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
networks = append(networks, network)
|
|
}
|
|
return networks, nil
|
|
}
|
|
|
|
func (d *Daemon) reconcileCreate(ctx context.Context, machineID contracthost.MachineID) error {
|
|
_, err := d.store.GetMachine(ctx, machineID)
|
|
if err == nil {
|
|
if _, err := d.reconcileMachine(ctx, machineID); err != nil {
|
|
return err
|
|
}
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
if err != store.ErrNotFound {
|
|
return err
|
|
}
|
|
|
|
if err := os.Remove(d.systemVolumePath(machineID)); err != nil && !os.IsNotExist(err) {
|
|
return fmt.Errorf("cleanup system volume for %q: %w", machineID, err)
|
|
}
|
|
if err := d.store.DeleteVolume(ctx, d.systemVolumeID(machineID)); err != nil && err != store.ErrNotFound {
|
|
return err
|
|
}
|
|
if err := d.detachVolumesForMachine(ctx, machineID); err != nil {
|
|
return err
|
|
}
|
|
_ = os.RemoveAll(filepath.Dir(d.systemVolumePath(machineID)))
|
|
if err := os.RemoveAll(d.machineRuntimeBaseDir(machineID)); err != nil {
|
|
return fmt.Errorf("cleanup runtime dir for %q: %w", machineID, err)
|
|
}
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
|
|
func (d *Daemon) reconcileStop(ctx context.Context, machineID contracthost.MachineID) error {
|
|
record, err := d.store.GetMachine(ctx, machineID)
|
|
if err == store.ErrNotFound {
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := d.stopMachineRecord(ctx, record); err != nil {
|
|
return err
|
|
}
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
|
|
func (d *Daemon) reconcileDelete(ctx context.Context, machineID contracthost.MachineID) error {
|
|
record, err := d.store.GetMachine(ctx, machineID)
|
|
if err == store.ErrNotFound {
|
|
if err := os.Remove(d.systemVolumePath(machineID)); err != nil && !os.IsNotExist(err) {
|
|
return err
|
|
}
|
|
if err := d.store.DeleteVolume(ctx, d.systemVolumeID(machineID)); err != nil && err != store.ErrNotFound {
|
|
return err
|
|
}
|
|
if err := d.detachVolumesForMachine(ctx, machineID); err != nil {
|
|
return err
|
|
}
|
|
_ = os.RemoveAll(filepath.Dir(d.systemVolumePath(machineID)))
|
|
_ = os.RemoveAll(d.machineRuntimeBaseDir(machineID))
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := d.deleteMachineRecord(ctx, record); err != nil {
|
|
return err
|
|
}
|
|
return d.store.DeleteOperation(ctx, machineID)
|
|
}
|
|
|
|
func (d *Daemon) reconcileMachine(ctx context.Context, machineID contracthost.MachineID) (*model.MachineRecord, error) {
|
|
unlock := d.lockMachine(machineID)
|
|
defer unlock()
|
|
|
|
record, err := d.store.GetMachine(ctx, machineID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if record.Phase != contracthost.MachinePhaseRunning {
|
|
return record, nil
|
|
}
|
|
|
|
state, err := d.runtime.Inspect(machineToRuntimeState(*record))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if state.Phase == firecracker.PhaseRunning {
|
|
return record, nil
|
|
}
|
|
|
|
if err := d.runtime.Delete(ctx, *state); err != nil {
|
|
return nil, err
|
|
}
|
|
record.Phase = contracthost.MachinePhaseFailed
|
|
record.Error = state.Error
|
|
record.PID = 0
|
|
record.SocketPath = ""
|
|
record.RuntimeHost = ""
|
|
record.TapDevice = ""
|
|
record.StartedAt = nil
|
|
if err := d.store.UpdateMachine(ctx, *record); err != nil {
|
|
return nil, err
|
|
}
|
|
return record, nil
|
|
}
|
|
|
|
func (d *Daemon) deleteMachineRecord(ctx context.Context, record *model.MachineRecord) error {
|
|
if err := d.runtime.Delete(ctx, machineToRuntimeState(*record)); err != nil {
|
|
return err
|
|
}
|
|
if err := d.detachVolumesForMachine(ctx, record.ID); err != nil {
|
|
return err
|
|
}
|
|
|
|
systemVolume, err := d.store.GetVolume(ctx, record.SystemVolumeID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := os.Remove(systemVolume.Path); err != nil && !os.IsNotExist(err) {
|
|
return fmt.Errorf("remove system volume %q: %w", systemVolume.Path, err)
|
|
}
|
|
if err := os.RemoveAll(filepath.Dir(systemVolume.Path)); err != nil {
|
|
return fmt.Errorf("remove machine disk dir %q: %w", filepath.Dir(systemVolume.Path), err)
|
|
}
|
|
if err := d.store.DeleteVolume(ctx, record.SystemVolumeID); err != nil {
|
|
return err
|
|
}
|
|
return d.store.DeleteMachine(ctx, record.ID)
|
|
}
|
|
|
|
func (d *Daemon) stopMachineRecord(ctx context.Context, record *model.MachineRecord) error {
|
|
if err := d.runtime.Delete(ctx, machineToRuntimeState(*record)); err != nil {
|
|
return err
|
|
}
|
|
record.Phase = contracthost.MachinePhaseStopped
|
|
record.Error = ""
|
|
record.PID = 0
|
|
record.SocketPath = ""
|
|
record.RuntimeHost = ""
|
|
record.TapDevice = ""
|
|
record.StartedAt = nil
|
|
return d.store.UpdateMachine(ctx, *record)
|
|
}
|
|
|
|
func (d *Daemon) detachVolumesForMachine(ctx context.Context, machineID contracthost.MachineID) error {
|
|
volumes, err := d.store.ListVolumes(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, volume := range volumes {
|
|
if volume.AttachedMachineID == nil || *volume.AttachedMachineID != machineID {
|
|
continue
|
|
}
|
|
volume.AttachedMachineID = nil
|
|
if err := d.store.UpdateVolume(ctx, volume); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *Daemon) reconcileSnapshot(ctx context.Context, operation model.OperationRecord) error {
|
|
if operation.SnapshotID == nil {
|
|
return d.store.DeleteOperation(ctx, operation.MachineID)
|
|
}
|
|
_, err := d.store.GetSnapshot(ctx, *operation.SnapshotID)
|
|
if err == nil {
|
|
// Snapshot completed successfully, just clear the journal
|
|
return d.store.DeleteOperation(ctx, operation.MachineID)
|
|
}
|
|
// Snapshot did not complete: clean up partial snapshot directory and resume the machine
|
|
snapshotDir := filepath.Join(d.config.SnapshotsDir, string(*operation.SnapshotID))
|
|
_ = os.RemoveAll(snapshotDir)
|
|
|
|
// Try to resume the source machine in case it was left paused
|
|
record, err := d.store.GetMachine(ctx, operation.MachineID)
|
|
if err == nil && record.Phase == contracthost.MachinePhaseRunning && record.PID > 0 {
|
|
_ = d.runtime.Resume(ctx, machineToRuntimeState(*record))
|
|
}
|
|
return d.store.DeleteOperation(ctx, operation.MachineID)
|
|
}
|
|
|
|
func (d *Daemon) reconcileRestore(ctx context.Context, operation model.OperationRecord) error {
|
|
_, err := d.store.GetMachine(ctx, operation.MachineID)
|
|
if err == nil {
|
|
// Restore completed, clear journal
|
|
return d.store.DeleteOperation(ctx, operation.MachineID)
|
|
}
|
|
// Restore did not complete: clean up partial machine directory and disk
|
|
_ = os.RemoveAll(filepath.Dir(d.systemVolumePath(operation.MachineID)))
|
|
_ = os.RemoveAll(d.machineRuntimeBaseDir(operation.MachineID))
|
|
return d.store.DeleteOperation(ctx, operation.MachineID)
|
|
}
|