Add per-listener connection limits

Configurable maximum concurrent connections per listener. When the
limit is reached, new connections are closed immediately after accept.
0 means unlimited (default, preserving existing behavior).

Config: Listener gains max_connections field, validated non-negative.

DB: Migration 3 adds listeners.max_connections column.
UpdateListenerMaxConns method for runtime changes via gRPC.
CreateListener updated to persist max_connections on seed.

Server: ListenerState/ListenerData gain MaxConnections. Limit checked
in serve() after Accept but before handleConn — if ActiveConnections
>= MaxConnections, connection is closed and the accept loop continues.
SetMaxConnections method for runtime updates.

Proto: SetListenerMaxConnections RPC added. ListenerStatus gains
max_connections field. Generated code regenerated.

gRPC server: SetListenerMaxConnections implements write-through
(DB first, then in-memory update). GetStatus includes max_connections.

Client: SetListenerMaxConnections method, MaxConnections in
ListenerStatus.

Tests: DB CRUD and UpdateListenerMaxConns, server connection limit
enforcement (accept 2, reject 3rd, close one, accept again), gRPC
SetListenerMaxConnections round-trip with DB persistence, not-found
error handling.

Also updates PROJECT_PLAN.md with phases 6-8 and PROGRESS.md with
tracking for the new features.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-25 16:42:53 -07:00
parent 5bc8f4fc8e
commit 564e0a9c67
16 changed files with 595 additions and 102 deletions

View File

@@ -34,6 +34,7 @@ type ListenerState struct {
ID int64 // database primary key
Addr string
ProxyProtocol bool
MaxConnections int64 // 0 = unlimited
routes map[string]RouteInfo // lowercase hostname → route info
mu sync.RWMutex
ActiveConnections atomic.Int64
@@ -41,6 +42,13 @@ type ListenerState struct {
connMu sync.Mutex
}
// SetMaxConnections updates the connection limit at runtime.
func (ls *ListenerState) SetMaxConnections(n int64) {
ls.mu.Lock()
defer ls.mu.Unlock()
ls.MaxConnections = n
}
// Routes returns a snapshot of the listener's route table.
func (ls *ListenerState) Routes() map[string]RouteInfo {
ls.mu.RLock()
@@ -93,10 +101,11 @@ func (ls *ListenerState) lookupRoute(hostname string) (RouteInfo, bool) {
// ListenerData holds the data needed to construct a ListenerState.
type ListenerData struct {
ID int64
Addr string
ProxyProtocol bool
Routes map[string]RouteInfo // lowercase hostname → route info
ID int64
Addr string
ProxyProtocol bool
MaxConnections int64
Routes map[string]RouteInfo // lowercase hostname → route info
}
// Server is the mc-proxy server. It manages listeners, firewall evaluation,
@@ -116,11 +125,12 @@ func New(cfg *config.Config, fw *firewall.Firewall, listenerData []ListenerData,
var listeners []*ListenerState
for _, ld := range listenerData {
listeners = append(listeners, &ListenerState{
ID: ld.ID,
Addr: ld.Addr,
ProxyProtocol: ld.ProxyProtocol,
routes: ld.Routes,
activeConns: make(map[net.Conn]struct{}),
ID: ld.ID,
Addr: ld.Addr,
ProxyProtocol: ld.ProxyProtocol,
MaxConnections: ld.MaxConnections,
routes: ld.Routes,
activeConns: make(map[net.Conn]struct{}),
})
}
@@ -229,6 +239,13 @@ func (s *Server) serve(ctx context.Context, ln net.Listener, ls *ListenerState)
continue
}
// Enforce per-listener connection limit.
if ls.MaxConnections > 0 && ls.ActiveConnections.Load() >= ls.MaxConnections {
conn.Close()
s.logger.Debug("connection limit reached", "addr", ls.Addr, "limit", ls.MaxConnections)
continue
}
s.wg.Add(1)
ls.ActiveConnections.Add(1)
go s.handleConn(ctx, conn, ls)

View File

@@ -1052,6 +1052,96 @@ func TestProxyProtocolFirewallUsesRealIP(t *testing.T) {
wg.Wait()
}
// --- Connection limit tests ---
func TestConnectionLimitEnforced(t *testing.T) {
// Backend that holds connections open.
backendLn, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("backend listen: %v", err)
}
defer backendLn.Close()
go func() {
for {
conn, err := backendLn.Accept()
if err != nil {
return
}
go io.Copy(io.Discard, conn)
}
}()
proxyLn, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("proxy listen: %v", err)
}
proxyAddr := proxyLn.Addr().String()
proxyLn.Close()
srv := newTestServer(t, []ListenerData{
{
ID: 1,
Addr: proxyAddr,
MaxConnections: 2,
Routes: map[string]RouteInfo{
"limit.test": l4Route(backendLn.Addr().String()),
},
},
})
stop := startAndStop(t, srv)
defer stop()
// Open 2 connections (should succeed).
var conns []net.Conn
for i := range 2 {
conn, err := net.DialTimeout("tcp", proxyAddr, 2*time.Second)
if err != nil {
t.Fatalf("dial %d: %v", i, err)
}
conn.Write(buildClientHello("limit.test"))
conns = append(conns, conn)
}
time.Sleep(100 * time.Millisecond)
// 3rd connection should be rejected (closed immediately).
conn3, err := net.DialTimeout("tcp", proxyAddr, 2*time.Second)
if err != nil {
t.Fatalf("dial 3: %v", err)
}
conn3.Write(buildClientHello("limit.test"))
conn3.SetReadDeadline(time.Now().Add(2 * time.Second))
_, err = conn3.Read(make([]byte, 1))
if err == nil {
t.Fatal("expected 3rd connection to be closed due to limit")
}
conn3.Close()
// Close one existing connection.
conns[0].Close()
time.Sleep(200 * time.Millisecond)
// Now a new connection should succeed.
conn4, err := net.DialTimeout("tcp", proxyAddr, 2*time.Second)
if err != nil {
t.Fatalf("dial 4: %v", err)
}
defer conn4.Close()
conn4.Write(buildClientHello("limit.test"))
// Give it time to be proxied.
time.Sleep(100 * time.Millisecond)
if got := srv.TotalConnections(); got < 2 {
t.Fatalf("expected at least 2 connections, got %d", got)
}
// Clean up.
for _, c := range conns[1:] {
c.Close()
}
}
// --- Multi-hop integration tests ---
func TestMultiHopProxyProtocol(t *testing.T) {