Files
mcr/internal/storage/writer_test.go
Kyle Isom 3314b7a618 Batch A: blob storage layer, MCIAS auth, OCI token endpoint
Phase 2 — internal/storage/:
Content-addressed blob storage with atomic writes via rename.
BlobWriter stages data in uploads dir with running SHA-256 hash,
commits by verifying digest then renaming to layers/sha256/<prefix>/<hex>.
Reader provides Open, Stat, Delete, Exists with digest validation.

Phase 3 — internal/auth/ + internal/server/:
MCIAS client with Login and ValidateToken, 30s SHA-256-keyed cache
with lazy eviction and injectable clock for testing. TLS 1.3 minimum
with optional custom CA cert.
Chi router with RequireAuth middleware (Bearer token extraction,
WWW-Authenticate header, OCI error format), token endpoint (Basic
auth → bearer exchange via MCIAS), and /v2/ version check handler.

52 tests passing (14 storage + 9 auth + 9 server + 20 existing).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 14:51:19 -07:00

189 lines
4.3 KiB
Go

package storage
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"os"
"sync"
"testing"
"time"
)
func writeTestBlob(t *testing.T, s *Store, data []byte) string {
t.Helper()
uuid := "test-upload-" + fmt.Sprintf("%d", time.Now().UnixNano())
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write(data); err != nil {
t.Fatalf("Write: %v", err)
}
h := sha256.Sum256(data)
digest := "sha256:" + hex.EncodeToString(h[:])
got, err := w.Commit(digest)
if err != nil {
t.Fatalf("Commit: %v", err)
}
if got != digest {
t.Fatalf("Commit returned %q, want %q", got, digest)
}
return digest
}
func TestWriteAndCommit(t *testing.T) {
s := newTestStore(t)
data := []byte("hello, blob storage")
digest := writeTestBlob(t, s, data)
// Verify file exists at expected path.
path := s.blobPath(digest)
info, err := os.Stat(path)
if err != nil {
t.Fatalf("stat blob file: %v", err)
}
if info.Size() != int64(len(data)) {
t.Fatalf("blob size: got %d, want %d", info.Size(), len(data))
}
// Verify content.
content, err := os.ReadFile(path) //nolint:gosec // test file path from t.TempDir()
if err != nil {
t.Fatalf("read blob file: %v", err)
}
if string(content) != string(data) {
t.Fatalf("blob content: got %q, want %q", content, data)
}
}
func TestDigestMismatch(t *testing.T) {
s := newTestStore(t)
data := []byte("some data")
uuid := "mismatch-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write(data); err != nil {
t.Fatalf("Write: %v", err)
}
wrongDigest := "sha256:0000000000000000000000000000000000000000000000000000000000000000"
_, err = w.Commit(wrongDigest)
if !errors.Is(err, ErrDigestMismatch) {
t.Fatalf("Commit with wrong digest: got %v, want ErrDigestMismatch", err)
}
// Verify temp file was cleaned up.
tempPath := w.path
if _, err := os.Stat(tempPath); !os.IsNotExist(err) {
t.Fatalf("temp file should be removed after digest mismatch, stat err: %v", err)
}
}
func TestCancel(t *testing.T) {
s := newTestStore(t)
uuid := "cancel-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
if _, err := w.Write([]byte("partial data")); err != nil {
t.Fatalf("Write: %v", err)
}
tempPath := w.path
if err := w.Cancel(); err != nil {
t.Fatalf("Cancel: %v", err)
}
if _, err := os.Stat(tempPath); !os.IsNotExist(err) {
t.Fatalf("temp file should be removed after Cancel, stat err: %v", err)
}
}
func TestBytesWritten(t *testing.T) {
s := newTestStore(t)
uuid := "bytes-upload"
w, err := s.StartUpload(uuid)
if err != nil {
t.Fatalf("StartUpload: %v", err)
}
t.Cleanup(func() { _ = w.Cancel() })
if w.BytesWritten() != 0 {
t.Fatalf("BytesWritten before write: got %d, want 0", w.BytesWritten())
}
data1 := []byte("first chunk")
if _, err := w.Write(data1); err != nil {
t.Fatalf("Write: %v", err)
}
if w.BytesWritten() != int64(len(data1)) {
t.Fatalf("BytesWritten after first write: got %d, want %d", w.BytesWritten(), len(data1))
}
data2 := []byte("second chunk")
if _, err := w.Write(data2); err != nil {
t.Fatalf("Write: %v", err)
}
want := int64(len(data1) + len(data2))
if w.BytesWritten() != want {
t.Fatalf("BytesWritten after second write: got %d, want %d", w.BytesWritten(), want)
}
}
func TestConcurrentWrites(t *testing.T) {
s := newTestStore(t)
type result struct {
digest string
err error
}
blobs := [][]byte{
[]byte("concurrent blob alpha"),
[]byte("concurrent blob beta"),
}
results := make([]result, len(blobs))
var wg sync.WaitGroup
for i, data := range blobs {
wg.Add(1)
go func(idx int, d []byte) {
defer wg.Done()
uuid := fmt.Sprintf("concurrent-%d-%d", idx, time.Now().UnixNano())
w, err := s.StartUpload(uuid)
if err != nil {
results[idx] = result{err: err}
return
}
if _, err := w.Write(d); err != nil {
results[idx] = result{err: err}
return
}
h := sha256.Sum256(d)
digest := "sha256:" + hex.EncodeToString(h[:])
got, err := w.Commit(digest)
results[idx] = result{digest: got, err: err}
}(i, data)
}
wg.Wait()
for i, r := range results {
if r.err != nil {
t.Fatalf("blob %d: %v", i, r.err)
}
if !s.Exists(r.digest) {
t.Fatalf("blob %d: digest %q not found after concurrent write", i, r.digest)
}
}
}