Batch A: blob storage layer, MCIAS auth, OCI token endpoint

Phase 2 — internal/storage/:
Content-addressed blob storage with atomic writes via rename.
BlobWriter stages data in uploads dir with running SHA-256 hash,
commits by verifying digest then renaming to layers/sha256/<prefix>/<hex>.
Reader provides Open, Stat, Delete, Exists with digest validation.

Phase 3 — internal/auth/ + internal/server/:
MCIAS client with Login and ValidateToken, 30s SHA-256-keyed cache
with lazy eviction and injectable clock for testing. TLS 1.3 minimum
with optional custom CA cert.
Chi router with RequireAuth middleware (Bearer token extraction,
WWW-Authenticate header, OCI error format), token endpoint (Basic
auth → bearer exchange via MCIAS), and /v2/ version check handler.

52 tests passing (14 storage + 9 auth + 9 server + 20 existing).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-19 14:51:19 -07:00
parent fde66be9c1
commit 3314b7a618
25 changed files with 1696 additions and 6 deletions

View File

@@ -0,0 +1,9 @@
package storage
import "errors"
var (
ErrBlobNotFound = errors.New("storage: blob not found")
ErrDigestMismatch = errors.New("storage: digest mismatch")
ErrInvalidDigest = errors.New("storage: invalid digest format")
)

View File

@@ -0,0 +1,75 @@
package storage
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
)
// Open validates the digest and returns a ReadCloser for the blob.
// Returns ErrBlobNotFound if the blob does not exist on disk.
func (s *Store) Open(digest string) (io.ReadCloser, error) {
if err := validateDigest(digest); err != nil {
return nil, err
}
f, err := os.Open(s.blobPath(digest))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, ErrBlobNotFound
}
return nil, fmt.Errorf("storage: open blob: %w", err)
}
return f, nil
}
// Stat returns the size of the blob in bytes.
// Returns ErrBlobNotFound if the blob does not exist on disk.
func (s *Store) Stat(digest string) (int64, error) {
if err := validateDigest(digest); err != nil {
return 0, err
}
info, err := os.Stat(s.blobPath(digest))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return 0, ErrBlobNotFound
}
return 0, fmt.Errorf("storage: stat blob: %w", err)
}
return info.Size(), nil
}
// Delete removes the blob file and attempts to clean up its prefix
// directory. Non-empty or already-removed prefix directories are
// silently ignored.
func (s *Store) Delete(digest string) error {
if err := validateDigest(digest); err != nil {
return err
}
path := s.blobPath(digest)
if err := os.Remove(path); err != nil {
if errors.Is(err, os.ErrNotExist) {
return ErrBlobNotFound
}
return fmt.Errorf("storage: delete blob: %w", err)
}
// Best-effort cleanup of the prefix directory.
_ = os.Remove(filepath.Dir(path))
return nil
}
// Exists reports whether the blob exists on disk.
func (s *Store) Exists(digest string) bool {
if err := validateDigest(digest); err != nil {
return false
}
_, err := os.Stat(s.blobPath(digest))
return err == nil
}

View File

@@ -0,0 +1,107 @@
package storage
import (
"errors"
"io"
"testing"
)
func TestOpenAfterWrite(t *testing.T) {
s := newTestStore(t)
data := []byte("readable blob content")
digest := writeTestBlob(t, s, data)
rc, err := s.Open(digest)
if err != nil {
t.Fatalf("Open: %v", err)
}
defer func() { _ = rc.Close() }()
got, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if string(got) != string(data) {
t.Fatalf("Open content: got %q, want %q", got, data)
}
}
func TestStatAfterWrite(t *testing.T) {
s := newTestStore(t)
data := []byte("stat this blob")
digest := writeTestBlob(t, s, data)
size, err := s.Stat(digest)
if err != nil {
t.Fatalf("Stat: %v", err)
}
if size != int64(len(data)) {
t.Fatalf("Stat size: got %d, want %d", size, len(data))
}
}
func TestExists(t *testing.T) {
s := newTestStore(t)
data := []byte("existence check")
digest := writeTestBlob(t, s, data)
if !s.Exists(digest) {
t.Fatal("Exists returned false for written blob")
}
nonexistent := "sha256:0000000000000000000000000000000000000000000000000000000000000000"
if s.Exists(nonexistent) {
t.Fatal("Exists returned true for nonexistent blob")
}
}
func TestDelete(t *testing.T) {
s := newTestStore(t)
data := []byte("delete me")
digest := writeTestBlob(t, s, data)
if err := s.Delete(digest); err != nil {
t.Fatalf("Delete: %v", err)
}
if s.Exists(digest) {
t.Fatal("Exists returned true after Delete")
}
_, err := s.Open(digest)
if !errors.Is(err, ErrBlobNotFound) {
t.Fatalf("Open after Delete: got %v, want ErrBlobNotFound", err)
}
}
func TestOpenNotFound(t *testing.T) {
s := newTestStore(t)
digest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
_, err := s.Open(digest)
if !errors.Is(err, ErrBlobNotFound) {
t.Fatalf("Open nonexistent: got %v, want ErrBlobNotFound", err)
}
}
func TestInvalidDigestFormat(t *testing.T) {
s := newTestStore(t)
bad := "not-a-digest"
if _, err := s.Open(bad); !errors.Is(err, ErrInvalidDigest) {
t.Fatalf("Open with bad digest: got %v, want ErrInvalidDigest", err)
}
if _, err := s.Stat(bad); !errors.Is(err, ErrInvalidDigest) {
t.Fatalf("Stat with bad digest: got %v, want ErrInvalidDigest", err)
}
if err := s.Delete(bad); !errors.Is(err, ErrInvalidDigest) {
t.Fatalf("Delete with bad digest: got %v, want ErrInvalidDigest", err)
}
// Exists should return false for an invalid digest, not panic.
if s.Exists(bad) {
t.Fatal("Exists returned true for invalid digest")
}
}

View File

@@ -0,0 +1,38 @@
package storage
import (
"path/filepath"
"regexp"
)
var digestRe = regexp.MustCompile(`^sha256:[a-f0-9]{64}$`)
// Store manages blob storage on the local filesystem.
type Store struct {
layersPath string
uploadsPath string
}
// New creates a Store that will write final blobs under layersPath and
// stage in-progress uploads under uploadsPath.
func New(layersPath, uploadsPath string) *Store {
return &Store{
layersPath: layersPath,
uploadsPath: uploadsPath,
}
}
// validateDigest checks that digest matches sha256:<64 lowercase hex chars>.
func validateDigest(digest string) error {
if !digestRe.MatchString(digest) {
return ErrInvalidDigest
}
return nil
}
// blobPath returns the filesystem path for a blob with the given digest.
// The layout is: <layersPath>/sha256/<first-2-hex>/<full-64-hex>
func (s *Store) blobPath(digest string) string {
hex := digest[len("sha256:"):]
return filepath.Join(s.layersPath, "sha256", hex[0:2], hex)
}

View File

@@ -0,0 +1,68 @@
package storage
import (
"errors"
"path/filepath"
"testing"
)
func newTestStore(t *testing.T) *Store {
t.Helper()
dir := t.TempDir()
return New(filepath.Join(dir, "layers"), filepath.Join(dir, "uploads"))
}
func TestNew(t *testing.T) {
s := newTestStore(t)
if s == nil {
t.Fatal("New returned nil")
}
if s.layersPath == "" {
t.Fatal("layersPath is empty")
}
if s.uploadsPath == "" {
t.Fatal("uploadsPath is empty")
}
}
func TestValidateDigest(t *testing.T) {
valid := []string{
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"sha256:0000000000000000000000000000000000000000000000000000000000000000",
"sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
}
for _, d := range valid {
if err := validateDigest(d); err != nil {
t.Errorf("validateDigest(%q) = %v, want nil", d, err)
}
}
invalid := []string{
"",
"sha256:",
"sha256:abc",
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85", // 63 chars
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b8555", // 65 chars
"sha256:E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", // uppercase
"md5:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", // wrong algo
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", // missing prefix
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85g", // non-hex char
}
for _, d := range invalid {
if err := validateDigest(d); !errors.Is(err, ErrInvalidDigest) {
t.Errorf("validateDigest(%q) = %v, want ErrInvalidDigest", d, err)
}
}
}
func TestBlobPath(t *testing.T) {
s := New("/data/layers", "/data/uploads")
digest := "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
got := s.blobPath(digest)
want := filepath.Join("/data/layers", "sha256", "e3",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
if got != want {
t.Fatalf("blobPath(%q)\n got %q\nwant %q", digest, got, want)
}
}

107
internal/storage/writer.go Normal file
View File

@@ -0,0 +1,107 @@
package storage
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
"io"
"os"
"path/filepath"
)
// BlobWriter stages blob data in a temporary file while computing its
// SHA-256 digest on the fly.
type BlobWriter struct {
file *os.File
hash hash.Hash
mw io.Writer
path string
written int64
closed bool
store *Store
}
// StartUpload begins a new blob upload, creating a temp file at
// <uploadsPath>/<uuid>.
func (s *Store) StartUpload(uuid string) (*BlobWriter, error) {
if err := os.MkdirAll(s.uploadsPath, 0700); err != nil {
return nil, fmt.Errorf("storage: create uploads dir: %w", err)
}
path := filepath.Join(s.uploadsPath, uuid)
f, err := os.Create(path) //nolint:gosec // upload UUID is server-generated, not user input
if err != nil {
return nil, fmt.Errorf("storage: create upload file: %w", err)
}
h := sha256.New()
return &BlobWriter{
file: f,
hash: h,
mw: io.MultiWriter(f, h),
path: path,
store: s,
}, nil
}
// Write writes p to both the staging file and the running hash.
func (bw *BlobWriter) Write(p []byte) (int, error) {
n, err := bw.mw.Write(p)
bw.written += int64(n)
if err != nil {
return n, fmt.Errorf("storage: write: %w", err)
}
return n, nil
}
// Commit finalises the upload. It closes the staging file, verifies
// the computed digest matches expectedDigest, and atomically moves
// the file to its content-addressed location.
func (bw *BlobWriter) Commit(expectedDigest string) (string, error) {
if !bw.closed {
bw.closed = true
if err := bw.file.Close(); err != nil {
return "", fmt.Errorf("storage: close upload file: %w", err)
}
}
if err := validateDigest(expectedDigest); err != nil {
_ = os.Remove(bw.path)
return "", err
}
computed := "sha256:" + hex.EncodeToString(bw.hash.Sum(nil))
if computed != expectedDigest {
_ = os.Remove(bw.path)
return "", ErrDigestMismatch
}
dst := bw.store.blobPath(computed)
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil {
return "", fmt.Errorf("storage: create blob dir: %w", err)
}
if err := os.Rename(bw.path, dst); err != nil {
return "", fmt.Errorf("storage: rename blob: %w", err)
}
return computed, nil
}
// Cancel aborts the upload, closing and removing the temp file.
func (bw *BlobWriter) Cancel() error {
if !bw.closed {
bw.closed = true
_ = bw.file.Close()
}
if err := os.Remove(bw.path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("storage: remove upload file: %w", err)
}
return nil
}
// BytesWritten returns the number of bytes written so far.
func (bw *BlobWriter) BytesWritten() int64 {
return bw.written
}

View File

@@ -0,0 +1,188 @@
package storage
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"os"
"sync"
"testing"
"time"
)
func writeTestBlob(t *testing.T, s *Store, data []byte) string {
t.Helper()
uuid := "test-upload-" + fmt.Sprintf("%d", time.Now().UnixNano())
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write(data); err != nil {
t.Fatalf("Write: %v", err)
}
h := sha256.Sum256(data)
digest := "sha256:" + hex.EncodeToString(h[:])
got, err := w.Commit(digest)
if err != nil {
t.Fatalf("Commit: %v", err)
}
if got != digest {
t.Fatalf("Commit returned %q, want %q", got, digest)
}
return digest
}
func TestWriteAndCommit(t *testing.T) {
s := newTestStore(t)
data := []byte("hello, blob storage")
digest := writeTestBlob(t, s, data)
// Verify file exists at expected path.
path := s.blobPath(digest)
info, err := os.Stat(path)
if err != nil {
t.Fatalf("stat blob file: %v", err)
}
if info.Size() != int64(len(data)) {
t.Fatalf("blob size: got %d, want %d", info.Size(), len(data))
}
// Verify content.
content, err := os.ReadFile(path) //nolint:gosec // test file path from t.TempDir()
if err != nil {
t.Fatalf("read blob file: %v", err)
}
if string(content) != string(data) {
t.Fatalf("blob content: got %q, want %q", content, data)
}
}
func TestDigestMismatch(t *testing.T) {
s := newTestStore(t)
data := []byte("some data")
uuid := "mismatch-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write(data); err != nil {
t.Fatalf("Write: %v", err)
}
wrongDigest := "sha256:0000000000000000000000000000000000000000000000000000000000000000"
_, err = w.Commit(wrongDigest)
if !errors.Is(err, ErrDigestMismatch) {
t.Fatalf("Commit with wrong digest: got %v, want ErrDigestMismatch", err)
}
// Verify temp file was cleaned up.
tempPath := w.path
if _, err := os.Stat(tempPath); !os.IsNotExist(err) {
t.Fatalf("temp file should be removed after digest mismatch, stat err: %v", err)
}
}
func TestCancel(t *testing.T) {
s := newTestStore(t)
uuid := "cancel-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write([]byte("partial data")); err != nil {
t.Fatalf("Write: %v", err)
}
tempPath := w.path
if err := w.Cancel(); err != nil {
t.Fatalf("Cancel: %v", err)
}
if _, err := os.Stat(tempPath); !os.IsNotExist(err) {
t.Fatalf("temp file should be removed after Cancel, stat err: %v", err)
}
}
func TestBytesWritten(t *testing.T) {
s := newTestStore(t)
uuid := "bytes-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
t.Cleanup(func() { _ = w.Cancel() })
if w.BytesWritten() != 0 {
t.Fatalf("BytesWritten before write: got %d, want 0", w.BytesWritten())
}
data1 := []byte("first chunk")
if _, err := w.Write(data1); err != nil {
t.Fatalf("Write: %v", err)
}
if w.BytesWritten() != int64(len(data1)) {
t.Fatalf("BytesWritten after first write: got %d, want %d", w.BytesWritten(), len(data1))
}
data2 := []byte("second chunk")
if _, err := w.Write(data2); err != nil {
t.Fatalf("Write: %v", err)
}
want := int64(len(data1) + len(data2))
if w.BytesWritten() != want {
t.Fatalf("BytesWritten after second write: got %d, want %d", w.BytesWritten(), want)
}
}
func TestConcurrentWrites(t *testing.T) {
s := newTestStore(t)
type result struct {
digest string
err error
}
blobs := [][]byte{
[]byte("concurrent blob alpha"),
[]byte("concurrent blob beta"),
}
results := make([]result, len(blobs))
var wg sync.WaitGroup
for i, data := range blobs {
wg.Add(1)
go func(idx int, d []byte) {
defer wg.Done()
uuid := fmt.Sprintf("concurrent-%d-%d", idx, time.Now().UnixNano())
w, err := s.StartUpload(uuid)
if err != nil {
results[idx] = result{err: err}
return
}
if _, err := w.Write(d); err != nil {
results[idx] = result{err: err}
return
}
h := sha256.Sum256(d)
digest := "sha256:" + hex.EncodeToString(h[:])
got, err := w.Commit(digest)
results[idx] = result{digest: got, err: err}
}(i, data)
}
wg.Wait()
for i, r := range results {
if r.err != nil {
t.Fatalf("blob %d: %v", i, r.err)
}
if !s.Exists(r.digest) {
t.Fatalf("blob %d: digest %q not found after concurrent write", i, r.digest)
}
}
}