Add mcp build command and deploy auto-build
Extends MCP to own the full build-push-deploy lifecycle. When deploying, the CLI checks whether each component's image tag exists in the registry and builds/pushes automatically if missing and build config is present. - Add Build, Push, ImageExists to runtime.Runtime interface (podman impl) - Add mcp build <service>[/<image>] command - Add [build] section to CLI config (workspace path) - Add path and [build.images] to service definitions - Wire auto-build into mcp deploy before agent RPC - Update ARCHITECTURE.md with runtime interface and deploy auto-build docs Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -299,6 +299,12 @@ chain:
|
||||
If neither exists (first deploy, no file), the deploy fails with an error
|
||||
telling the operator to create a service definition.
|
||||
|
||||
Before pushing to the agent, the CLI checks that each component's image
|
||||
tag exists in the registry. If a tag is missing and a `[build]` section
|
||||
is configured, the CLI builds and pushes the image automatically (same
|
||||
logic as `mcp sync` auto-build, described below). This makes `mcp deploy`
|
||||
a single command for the bump-build-push-deploy workflow.
|
||||
|
||||
The CLI pushes the resolved spec to the agent. The agent records it in its
|
||||
registry and executes the deploy. The service definition file on disk is
|
||||
**not** modified -- it represents the operator's declared intent, not the
|
||||
@@ -656,6 +662,29 @@ The agent runs as a dedicated `mcp` system user. Podman runs rootless under
|
||||
this user. All containers are owned by `mcp`. The NixOS configuration
|
||||
provisions the `mcp` user with podman access.
|
||||
|
||||
#### Runtime Interface
|
||||
|
||||
The `runtime.Runtime` interface abstracts the container runtime. The agent
|
||||
(and the CLI, for build operations) use it for all container operations.
|
||||
|
||||
| Method | Used by | Purpose |
|
||||
|--------|---------|---------|
|
||||
| `Pull(image)` | Agent | `podman pull <image>` |
|
||||
| `Run(spec)` | Agent | `podman run -d ...` |
|
||||
| `Stop(name)` | Agent | `podman stop <name>` |
|
||||
| `Remove(name)` | Agent | `podman rm <name>` |
|
||||
| `Inspect(name)` | Agent | `podman inspect <name>` |
|
||||
| `List()` | Agent | `podman ps -a` |
|
||||
| `Build(image, contextDir, dockerfile)` | CLI | `podman build -t <image> -f <dockerfile> <contextDir>` |
|
||||
| `Push(image)` | CLI | `podman push <image>` |
|
||||
| `ImageExists(image)` | CLI | `podman manifest inspect docker://<image>` (checks remote registry) |
|
||||
|
||||
The first six methods are used by the agent during deploy and monitoring.
|
||||
The last three are used by the CLI during `mcp build` and `mcp deploy`
|
||||
auto-build. They are on the same interface because the CLI uses the local
|
||||
podman installation directly -- no gRPC RPC needed, since builds happen
|
||||
on the operator's workstation, not on the deployment node.
|
||||
|
||||
#### Deploy Flow
|
||||
|
||||
When the agent receives a `Deploy` RPC:
|
||||
@@ -1223,6 +1252,7 @@ mcp/
|
||||
│ ├── mcp/ CLI
|
||||
│ │ ├── main.go
|
||||
│ │ ├── login.go
|
||||
│ │ ├── build.go build and push images
|
||||
│ │ ├── deploy.go
|
||||
│ │ ├── lifecycle.go stop, start, restart
|
||||
│ │ ├── status.go list, ps, status
|
||||
|
||||
168
cmd/mcp/build.go
Normal file
168
cmd/mcp/build.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"git.wntrmute.dev/kyle/mcp/internal/config"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/runtime"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/servicedef"
|
||||
)
|
||||
|
||||
func buildCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "build <service>[/<image>]",
|
||||
Short: "Build and push images for a service",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := config.LoadCLIConfig(cfgPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load config: %w", err)
|
||||
}
|
||||
|
||||
serviceName, imageFilter := parseServiceArg(args[0])
|
||||
|
||||
def, err := loadServiceDef(cmd, cfg, serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rt := &runtime.Podman{}
|
||||
return buildServiceImages(cmd.Context(), cfg, def, rt, imageFilter)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// buildServiceImages builds and pushes images for a service definition.
|
||||
// If imageFilter is non-empty, only the matching image is built.
|
||||
func buildServiceImages(ctx context.Context, cfg *config.CLIConfig, def *servicedef.ServiceDef, rt *runtime.Podman, imageFilter string) error {
|
||||
if def.Build == nil || len(def.Build.Images) == 0 {
|
||||
return fmt.Errorf("service %q has no [build.images] configuration", def.Name)
|
||||
}
|
||||
if def.Path == "" {
|
||||
return fmt.Errorf("service %q has no path configured", def.Name)
|
||||
}
|
||||
if cfg.Build.Workspace == "" {
|
||||
return fmt.Errorf("build.workspace is not configured in %s", cfgPath)
|
||||
}
|
||||
|
||||
sourceDir := filepath.Join(cfg.Build.Workspace, def.Path)
|
||||
|
||||
for imageName, dockerfile := range def.Build.Images {
|
||||
if imageFilter != "" && imageName != imageFilter {
|
||||
continue
|
||||
}
|
||||
|
||||
imageRef := findImageRef(def, imageName)
|
||||
if imageRef == "" {
|
||||
return fmt.Errorf("no component references image %q in service %q", imageName, def.Name)
|
||||
}
|
||||
|
||||
fmt.Printf("building %s from %s\n", imageRef, dockerfile)
|
||||
if err := rt.Build(ctx, imageRef, sourceDir, dockerfile); err != nil {
|
||||
return fmt.Errorf("build %s: %w", imageRef, err)
|
||||
}
|
||||
|
||||
fmt.Printf("pushing %s\n", imageRef)
|
||||
if err := rt.Push(ctx, imageRef); err != nil {
|
||||
return fmt.Errorf("push %s: %w", imageRef, err)
|
||||
}
|
||||
}
|
||||
|
||||
if imageFilter != "" {
|
||||
if _, ok := def.Build.Images[imageFilter]; !ok {
|
||||
return fmt.Errorf("image %q not found in [build.images] for service %q", imageFilter, def.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findImageRef finds the full image reference for a build image name by
|
||||
// matching it against component image fields. The image name from
|
||||
// [build.images] matches the repository name in the component's image
|
||||
// reference (the path segment after the last slash, before the tag).
|
||||
func findImageRef(def *servicedef.ServiceDef, imageName string) string {
|
||||
for _, c := range def.Components {
|
||||
repoName := extractRepoName(c.Image)
|
||||
if repoName == imageName {
|
||||
return c.Image
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractRepoName returns the repository name from an image reference.
|
||||
// Examples:
|
||||
//
|
||||
// "mcr.svc.mcp.metacircular.net:8443/mcr:v1.1.0" -> "mcr"
|
||||
// "mcr.svc.mcp.metacircular.net:8443/mcr-web:v1.2.0" -> "mcr-web"
|
||||
// "mcr-web:v1.2.0" -> "mcr-web"
|
||||
// "mcr-web" -> "mcr-web"
|
||||
func extractRepoName(image string) string {
|
||||
// Strip registry prefix (everything up to and including the last slash).
|
||||
name := image
|
||||
if i := strings.LastIndex(image, "/"); i >= 0 {
|
||||
name = image[i+1:]
|
||||
}
|
||||
// Strip tag.
|
||||
if i := strings.LastIndex(name, ":"); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ensureImages checks that all component images exist in the registry.
|
||||
// If an image is missing and the service has build configuration, it
|
||||
// builds and pushes the image. Returns nil if all images are available.
|
||||
func ensureImages(ctx context.Context, cfg *config.CLIConfig, def *servicedef.ServiceDef, rt *runtime.Podman, component string) error {
|
||||
if def.Build == nil || len(def.Build.Images) == 0 {
|
||||
return nil // no build config, skip auto-build
|
||||
}
|
||||
|
||||
for _, c := range def.Components {
|
||||
if component != "" && c.Name != component {
|
||||
continue
|
||||
}
|
||||
|
||||
repoName := extractRepoName(c.Image)
|
||||
dockerfile, ok := def.Build.Images[repoName]
|
||||
if !ok {
|
||||
continue // no Dockerfile for this image, skip
|
||||
}
|
||||
|
||||
exists, err := rt.ImageExists(ctx, c.Image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check image %s: %w", c.Image, err)
|
||||
}
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Image missing — build and push.
|
||||
if def.Path == "" {
|
||||
return fmt.Errorf("image %s not found in registry and service %q has no path configured", c.Image, def.Name)
|
||||
}
|
||||
if cfg.Build.Workspace == "" {
|
||||
return fmt.Errorf("image %s not found in registry and build.workspace is not configured", c.Image)
|
||||
}
|
||||
|
||||
sourceDir := filepath.Join(cfg.Build.Workspace, def.Path)
|
||||
|
||||
fmt.Printf("image %s not found, building from %s\n", c.Image, dockerfile)
|
||||
if err := rt.Build(ctx, c.Image, sourceDir, dockerfile); err != nil {
|
||||
return fmt.Errorf("auto-build %s: %w", c.Image, err)
|
||||
}
|
||||
|
||||
fmt.Printf("pushing %s\n", c.Image)
|
||||
if err := rt.Push(ctx, c.Image); err != nil {
|
||||
return fmt.Errorf("auto-push %s: %w", c.Image, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
mcpv1 "git.wntrmute.dev/kyle/mcp/gen/mcp/v1"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/config"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/runtime"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/servicedef"
|
||||
)
|
||||
|
||||
@@ -31,6 +32,12 @@ func deployCmd() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
// Auto-build missing images if the service has build config.
|
||||
rt := &runtime.Podman{}
|
||||
if err := ensureImages(cmd.Context(), cfg, def, rt, component); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spec := servicedef.ToProto(def)
|
||||
|
||||
address, err := findNodeAddress(cfg, def.Node)
|
||||
|
||||
@@ -34,6 +34,7 @@ func main() {
|
||||
})
|
||||
|
||||
root.AddCommand(loginCmd())
|
||||
root.AddCommand(buildCmd())
|
||||
root.AddCommand(deployCmd())
|
||||
root.AddCommand(stopCmd())
|
||||
root.AddCommand(startCmd())
|
||||
|
||||
@@ -22,6 +22,10 @@ func (f *fakeRuntime) Pull(_ context.Context, _ string) error { re
|
||||
func (f *fakeRuntime) Run(_ context.Context, _ runtime.ContainerSpec) error { return nil }
|
||||
func (f *fakeRuntime) Stop(_ context.Context, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Remove(_ context.Context, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Build(_ context.Context, _, _, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Push(_ context.Context, _ string) error { return nil }
|
||||
|
||||
func (f *fakeRuntime) ImageExists(_ context.Context, _ string) (bool, error) { return true, nil }
|
||||
|
||||
func (f *fakeRuntime) List(_ context.Context) ([]runtime.ContainerInfo, error) {
|
||||
return f.containers, f.listErr
|
||||
|
||||
@@ -3,6 +3,7 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
toml "github.com/pelletier/go-toml/v2"
|
||||
)
|
||||
@@ -10,11 +11,17 @@ import (
|
||||
// CLIConfig is the configuration for the mcp CLI binary.
|
||||
type CLIConfig struct {
|
||||
Services ServicesConfig `toml:"services"`
|
||||
Build BuildConfig `toml:"build"`
|
||||
MCIAS MCIASConfig `toml:"mcias"`
|
||||
Auth AuthConfig `toml:"auth"`
|
||||
Nodes []NodeConfig `toml:"nodes"`
|
||||
}
|
||||
|
||||
// BuildConfig holds settings for building container images.
|
||||
type BuildConfig struct {
|
||||
Workspace string `toml:"workspace"`
|
||||
}
|
||||
|
||||
// ServicesConfig defines where service definition files live.
|
||||
type ServicesConfig struct {
|
||||
Dir string `toml:"dir"`
|
||||
@@ -66,6 +73,9 @@ func applyCLIEnvOverrides(cfg *CLIConfig) {
|
||||
if v := os.Getenv("MCP_SERVICES_DIR"); v != "" {
|
||||
cfg.Services.Dir = v
|
||||
}
|
||||
if v := os.Getenv("MCP_BUILD_WORKSPACE"); v != "" {
|
||||
cfg.Build.Workspace = v
|
||||
}
|
||||
if v := os.Getenv("MCP_MCIAS_SERVER_URL"); v != "" {
|
||||
cfg.MCIAS.ServerURL = v
|
||||
}
|
||||
@@ -93,5 +103,15 @@ func validateCLIConfig(cfg *CLIConfig) error {
|
||||
if cfg.Auth.TokenPath == "" {
|
||||
return fmt.Errorf("auth.token_path is required")
|
||||
}
|
||||
|
||||
// Expand ~ in workspace path.
|
||||
if strings.HasPrefix(cfg.Build.Workspace, "~/") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("expand workspace path: %w", err)
|
||||
}
|
||||
cfg.Build.Workspace = home + cfg.Build.Workspace[1:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -47,6 +47,10 @@ func (f *fakeRuntime) Pull(_ context.Context, _ string) error { re
|
||||
func (f *fakeRuntime) Run(_ context.Context, _ runtime.ContainerSpec) error { return nil }
|
||||
func (f *fakeRuntime) Stop(_ context.Context, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Remove(_ context.Context, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Build(_ context.Context, _, _, _ string) error { return nil }
|
||||
func (f *fakeRuntime) Push(_ context.Context, _ string) error { return nil }
|
||||
|
||||
func (f *fakeRuntime) ImageExists(_ context.Context, _ string) (bool, error) { return true, nil }
|
||||
|
||||
func (f *fakeRuntime) Inspect(_ context.Context, _ string) (runtime.ContainerInfo, error) {
|
||||
return runtime.ContainerInfo{}, nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package runtime
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -177,6 +178,40 @@ func (p *Podman) Inspect(ctx context.Context, name string) (ContainerInfo, error
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Build builds a container image from a Dockerfile.
|
||||
func (p *Podman) Build(ctx context.Context, image, contextDir, dockerfile string) error {
|
||||
args := []string{"build", "-t", image, "-f", dockerfile, contextDir}
|
||||
cmd := exec.CommandContext(ctx, p.command(), args...) //nolint:gosec // args built programmatically
|
||||
cmd.Dir = contextDir
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("podman build %q: %w: %s", image, err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push pushes a container image to a remote registry.
|
||||
func (p *Podman) Push(ctx context.Context, image string) error {
|
||||
cmd := exec.CommandContext(ctx, p.command(), "push", image) //nolint:gosec // args built programmatically
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("podman push %q: %w: %s", image, err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageExists checks whether an image tag exists in a remote registry.
|
||||
func (p *Podman) ImageExists(ctx context.Context, image string) (bool, error) {
|
||||
cmd := exec.CommandContext(ctx, p.command(), "manifest", "inspect", "docker://"+image) //nolint:gosec // args built programmatically
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Exit code 1 means the manifest was not found.
|
||||
var exitErr *exec.ExitError
|
||||
if ok := errors.As(err, &exitErr); ok && exitErr.ExitCode() == 1 {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("podman manifest inspect %q: %w", image, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// podmanPSEntry is a single entry from podman ps --format json.
|
||||
type podmanPSEntry struct {
|
||||
Names []string `json:"Names"`
|
||||
|
||||
@@ -34,7 +34,9 @@ type ContainerInfo struct {
|
||||
Started time.Time // when the container started (zero if not running)
|
||||
}
|
||||
|
||||
// Runtime is the container runtime abstraction.
|
||||
// Runtime is the container runtime abstraction. The first six methods are
|
||||
// used by the agent for container lifecycle. The last three are used by the
|
||||
// CLI for building and pushing images.
|
||||
type Runtime interface {
|
||||
Pull(ctx context.Context, image string) error
|
||||
Run(ctx context.Context, spec ContainerSpec) error
|
||||
@@ -42,6 +44,10 @@ type Runtime interface {
|
||||
Remove(ctx context.Context, name string) error
|
||||
Inspect(ctx context.Context, name string) (ContainerInfo, error)
|
||||
List(ctx context.Context) ([]ContainerInfo, error)
|
||||
|
||||
Build(ctx context.Context, image, contextDir, dockerfile string) error
|
||||
Push(ctx context.Context, image string) error
|
||||
ImageExists(ctx context.Context, image string) (bool, error)
|
||||
}
|
||||
|
||||
// ExtractVersion parses the tag from an image reference.
|
||||
|
||||
@@ -18,9 +18,17 @@ type ServiceDef struct {
|
||||
Name string `toml:"name"`
|
||||
Node string `toml:"node"`
|
||||
Active *bool `toml:"active,omitempty"`
|
||||
Path string `toml:"path,omitempty"`
|
||||
Build *BuildDef `toml:"build,omitempty"`
|
||||
Components []ComponentDef `toml:"components"`
|
||||
}
|
||||
|
||||
// BuildDef describes how to build container images for a service.
|
||||
type BuildDef struct {
|
||||
Images map[string]string `toml:"images"`
|
||||
UsesMCDSL bool `toml:"uses_mcdsl,omitempty"`
|
||||
}
|
||||
|
||||
// RouteDef describes a route for a component, used for automatic port
|
||||
// allocation and mc-proxy integration.
|
||||
type RouteDef struct {
|
||||
|
||||
Reference in New Issue
Block a user