This repository has been archived on 2026-03-27. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
mcdeploy/deploy.go
Kyle Isom 8cd32cbb1c Initial implementation of mcdeploy deployment tool
Single Go binary with five commands:
- build: podman build locally with registry tags + git version
- push: podman push to MCR
- deploy: SSH pull/stop/rm/run on target node
- cert renew: issue TLS cert from Metacrypt via REST API
- status: show container status on a node

Config-driven via TOML service registry describing images,
Dockerfiles, container configs per node. Shells out to podman
for container operations and ssh for remote access.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 00:01:15 -07:00

123 lines
2.8 KiB
Go

package main
import (
"fmt"
"github.com/spf13/cobra"
)
func deployCommand() *cobra.Command {
var containerFlag string
cmd := &cobra.Command{
Use: "deploy <service> <node>",
Short: "Deploy a service to a node",
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := loadCfg()
if err != nil {
return err
}
svc, err := cfg.FindService(args[0])
if err != nil {
return err
}
node, err := cfg.FindNode(args[1])
if err != nil {
return err
}
host := node.Host
if node.User != "" {
host = node.User + "@" + node.Host
}
// Build the set of service images for filtering containers.
svcImages := make(map[string]bool)
for _, img := range svc.Images {
svcImages[img] = true
}
// Collect containers belonging to this service.
type target struct {
name string
ctr *ContainerConfig
}
var targets []target
for name, ctr := range node.Containers {
if containerFlag != "" {
if name != containerFlag {
continue
}
} else if !svcImages[ctr.Image] {
continue
}
targets = append(targets, target{name: name, ctr: ctr})
}
if len(targets) == 0 {
return fmt.Errorf("no containers for service %q on node %q", svc.Name, args[1])
}
for _, t := range targets {
fmt.Printf("\n=== Deploying container %s ===\n", t.name)
ref := cfg.ImageRef(t.ctr.Image) + ":latest"
// Pull latest image.
if err := sshRun(host, "podman pull "+ref); err != nil {
return fmt.Errorf("pull %s: %w", ref, err)
}
// Stop existing container (ignore errors).
_ = sshRun(host, "podman stop "+t.name)
// Remove existing container (ignore errors).
_ = sshRun(host, "podman rm "+t.name)
// Build podman run command.
runCmd := "podman run -d --name " + t.name
if t.ctr.Network != "" {
runCmd += " --network " + t.ctr.Network
}
if t.ctr.User != "" {
runCmd += " --user " + t.ctr.User
}
for _, vol := range t.ctr.Volumes {
runCmd += " -v " + vol
}
for _, port := range t.ctr.Ports {
runCmd += " -p " + port
}
if t.ctr.Restart != "" {
runCmd += " --restart " + t.ctr.Restart
}
runCmd += " " + ref
for _, arg := range t.ctr.Cmd {
runCmd += " " + arg
}
// Start new container.
if err := sshRun(host, runCmd); err != nil {
return fmt.Errorf("run %s: %w", t.name, err)
}
// Verify container is running.
status, err := sshOutput(host, "podman ps --filter name="+t.name+" --format '{{.Status}}'")
if err != nil {
return fmt.Errorf("verify %s: %w", t.name, err)
}
fmt.Printf("Status: %s\n", status)
}
fmt.Printf("\nDeployed %d container(s) to %s\n", len(targets), args[1])
return nil
},
}
cmd.Flags().StringVar(&containerFlag, "container", "", "deploy only this container")
return cmd
}