P2.2-P2.9, P3.2-P3.10, P4.1-P4.3: Complete Phases 2, 3, and 4
11 work units built in parallel and merged: Agent handlers (Phase 2): - P2.2 Deploy: pull images, stop/remove/run containers, update registry - P2.3 Lifecycle: stop/start/restart with desired_state tracking - P2.4 Status: list (registry), live check (runtime), get status (drift+events) - P2.5 Sync: receive desired state, reconcile unmanaged containers - P2.6 File transfer: push/pull scoped to /srv/<service>/, path validation - P2.7 Adopt: match <service>-* containers, derive component names - P2.8 Monitor: continuous watch loop, drift/flap alerting, event pruning - P2.9 Snapshot: VACUUM INTO database backup command CLI commands (Phase 3): - P3.2 Login, P3.3 Deploy, P3.4 Stop/Start/Restart - P3.5 List/Ps/Status, P3.6 Sync, P3.7 Adopt - P3.8 Service show/edit/export, P3.9 Push/Pull, P3.10 Node list/add/remove Deployment artifacts (Phase 4): - Systemd units (agent service + backup timer) - Example configs (CLI + agent) - Install script (idempotent) All packages: build, vet, lint (0 issues), test (all pass). Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
157
internal/monitor/monitor.go
Normal file
157
internal/monitor/monitor.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/mcp/internal/config"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/registry"
|
||||
"git.wntrmute.dev/kyle/mcp/internal/runtime"
|
||||
)
|
||||
|
||||
// Monitor watches container states and compares them to the registry,
|
||||
// recording events and firing alerts on drift or flapping.
|
||||
type Monitor struct {
|
||||
db *sql.DB
|
||||
runtime runtime.Runtime
|
||||
cfg config.MonitorConfig
|
||||
logger *slog.Logger
|
||||
alerter *Alerter
|
||||
stopCh chan struct{}
|
||||
done chan struct{}
|
||||
|
||||
prevState map[string]string // key: "service/component", value: observed state
|
||||
}
|
||||
|
||||
// New creates a Monitor with the given dependencies.
|
||||
func New(db *sql.DB, rt runtime.Runtime, cfg config.MonitorConfig, nodeName string, logger *slog.Logger) *Monitor {
|
||||
return &Monitor{
|
||||
db: db,
|
||||
runtime: rt,
|
||||
cfg: cfg,
|
||||
logger: logger,
|
||||
alerter: NewAlerter(cfg, nodeName, db, logger),
|
||||
stopCh: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
prevState: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches the monitoring goroutine.
|
||||
func (m *Monitor) Start() {
|
||||
go m.run()
|
||||
}
|
||||
|
||||
// Stop signals the monitoring goroutine to stop and waits for it to exit.
|
||||
func (m *Monitor) Stop() {
|
||||
close(m.stopCh)
|
||||
<-m.done
|
||||
}
|
||||
|
||||
func (m *Monitor) run() {
|
||||
defer close(m.done)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
m.logger.Error("monitor panic recovered", "panic", fmt.Sprintf("%v", r))
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(m.cfg.Interval.Duration)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.tick()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) tick() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
m.logger.Error("monitor tick panic recovered", "panic", fmt.Sprintf("%v", r))
|
||||
}
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the current runtime state of all containers.
|
||||
containers, err := m.runtime.List(ctx)
|
||||
if err != nil {
|
||||
m.logger.Error("monitor: list containers", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Index runtime containers by name for fast lookup.
|
||||
runtimeState := make(map[string]string, len(containers))
|
||||
for _, c := range containers {
|
||||
runtimeState[c.Name] = c.State
|
||||
}
|
||||
|
||||
// Walk all registered services and their components.
|
||||
services, err := registry.ListServices(m.db)
|
||||
if err != nil {
|
||||
m.logger.Error("monitor: list services", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
for _, svc := range services {
|
||||
components, err := registry.ListComponents(m.db, svc.Name)
|
||||
if err != nil {
|
||||
m.logger.Error("monitor: list components", "error", err, "service", svc.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, comp := range components {
|
||||
key := comp.Service + "/" + comp.Name
|
||||
seen[key] = struct{}{}
|
||||
containerName := comp.Service + "-" + comp.Name
|
||||
|
||||
observed := "unknown"
|
||||
if state, ok := runtimeState[containerName]; ok {
|
||||
observed = state
|
||||
}
|
||||
|
||||
prev, hasPrev := m.prevState[key]
|
||||
if !hasPrev {
|
||||
prev = comp.ObservedState
|
||||
}
|
||||
|
||||
if observed != prev {
|
||||
if err := registry.InsertEvent(m.db, comp.Service, comp.Name, prev, observed); err != nil {
|
||||
m.logger.Error("monitor: insert event", "error", err, "key", key)
|
||||
}
|
||||
|
||||
if err := registry.UpdateComponentState(m.db, comp.Service, comp.Name, "", observed); err != nil {
|
||||
m.logger.Error("monitor: update observed state", "error", err, "key", key)
|
||||
}
|
||||
|
||||
m.logger.Info("state change", "service", comp.Service, "component", comp.Name, "prev", prev, "observed", observed)
|
||||
}
|
||||
|
||||
m.alerter.Evaluate(comp.Service, comp.Name, comp.DesiredState, observed, prev)
|
||||
|
||||
m.prevState[key] = observed
|
||||
}
|
||||
}
|
||||
|
||||
// Evict entries for components that no longer exist in the registry.
|
||||
for key := range m.prevState {
|
||||
if _, ok := seen[key]; !ok {
|
||||
delete(m.prevState, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune old events.
|
||||
if _, err := registry.PruneEvents(m.db, time.Now().Add(-m.cfg.Retention.Duration)); err != nil {
|
||||
m.logger.Error("monitor: prune events", "error", err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user