Phase 5 (OCI pull): internal/oci/ package with manifest GET/HEAD by tag/digest, blob GET/HEAD with repo membership check, tag listing with OCI pagination, catalog listing. Multi-segment repo names via parseOCIPath() right-split routing. DB query layer in internal/db/repository.go. Phase 6 (OCI push): blob uploads (monolithic and chunked) with uploadManager tracking in-progress BlobWriters, manifest push implementing full ARCHITECTURE.md §5 flow in a single SQLite transaction (create repo, upsert manifest, populate manifest_blobs, atomic tag move). Digest verification on both blob commit and manifest push-by-digest. Phase 8 (admin REST): /v1 endpoints for auth (login/logout/health), repository management (list/detail/delete), policy CRUD with engine reload, audit log listing with filters, GC trigger/status stubs. RequireAdmin middleware, platform-standard error format. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
292 lines
8.0 KiB
Go
292 lines
8.0 KiB
Go
package oci
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/go-chi/chi/v5"
|
|
|
|
"git.wntrmute.dev/kyle/mcr/internal/auth"
|
|
"git.wntrmute.dev/kyle/mcr/internal/storage"
|
|
)
|
|
|
|
// testHandlerWithStorage creates a handler with real storage in t.TempDir().
|
|
func testHandlerWithStorage(t *testing.T, fdb *fakeDB) (*Handler, *chi.Mux) {
|
|
t.Helper()
|
|
dir := t.TempDir()
|
|
store := storage.New(dir+"/layers", dir+"/uploads")
|
|
h := NewHandler(fdb, store, allowAll(), nil)
|
|
router := chi.NewRouter()
|
|
router.Mount("/v2", h.Router())
|
|
return h, router
|
|
}
|
|
|
|
func authedPushRequest(method, path string, body []byte) *http.Request {
|
|
var reader *bytes.Reader
|
|
if body != nil {
|
|
reader = bytes.NewReader(body)
|
|
} else {
|
|
reader = bytes.NewReader(nil)
|
|
}
|
|
req := httptest.NewRequest(method, path, reader)
|
|
claims := &auth.Claims{
|
|
Subject: "pusher",
|
|
AccountType: "human",
|
|
Roles: []string{"user"},
|
|
}
|
|
return req.WithContext(auth.ContextWithClaims(req.Context(), claims))
|
|
}
|
|
|
|
func TestUploadInitiate(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusAccepted {
|
|
t.Fatalf("status: got %d, want %d", rr.Code, http.StatusAccepted)
|
|
}
|
|
|
|
loc := rr.Header().Get("Location")
|
|
if !strings.HasPrefix(loc, "/v2/myrepo/blobs/uploads/") {
|
|
t.Fatalf("Location: got %q", loc)
|
|
}
|
|
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
if uuid == "" {
|
|
t.Fatal("Docker-Upload-UUID header missing")
|
|
}
|
|
|
|
rng := rr.Header().Get("Range")
|
|
if rng != "0-0" {
|
|
t.Fatalf("Range: got %q, want %q", rng, "0-0")
|
|
}
|
|
|
|
// Verify repo was implicitly created.
|
|
if _, ok := fdb.repos["myrepo"]; !ok {
|
|
t.Fatal("repository should have been implicitly created")
|
|
}
|
|
}
|
|
|
|
func TestUploadInitiateUniqueUUIDs(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
uuids := make(map[string]bool)
|
|
for range 5 {
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusAccepted {
|
|
t.Fatalf("status: got %d", rr.Code)
|
|
}
|
|
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
if uuids[uuid] {
|
|
t.Fatalf("duplicate UUID: %s", uuid)
|
|
}
|
|
uuids[uuid] = true
|
|
}
|
|
}
|
|
|
|
func TestMonolithicUpload(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
// Step 1: Initiate upload.
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusAccepted {
|
|
t.Fatalf("initiate status: got %d", rr.Code)
|
|
}
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
|
|
// Step 2: Complete upload with body and digest in a single PUT.
|
|
blobData := []byte("hello world blob data")
|
|
sum := sha256.Sum256(blobData)
|
|
digest := "sha256:" + hex.EncodeToString(sum[:])
|
|
|
|
putURL := "/v2/myrepo/blobs/uploads/" + uuid + "?digest=" + digest
|
|
req = authedPushRequest("PUT", putURL, blobData)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusCreated {
|
|
t.Fatalf("complete status: got %d, body: %s", rr.Code, rr.Body.String())
|
|
}
|
|
|
|
loc := rr.Header().Get("Location")
|
|
if !strings.Contains(loc, digest) {
|
|
t.Fatalf("Location should contain digest: got %q", loc)
|
|
}
|
|
|
|
dcd := rr.Header().Get("Docker-Content-Digest")
|
|
if dcd != digest {
|
|
t.Fatalf("Docker-Content-Digest: got %q, want %q", dcd, digest)
|
|
}
|
|
|
|
// Verify blob was inserted in fake DB.
|
|
if !fdb.allBlobs[digest] {
|
|
t.Fatal("blob should exist in DB after upload")
|
|
}
|
|
}
|
|
|
|
func TestChunkedUpload(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
// Step 1: Initiate.
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
|
|
// Step 2: PATCH chunk 1.
|
|
chunk1 := []byte("chunk-one-data-")
|
|
req = authedPushRequest("PATCH", "/v2/myrepo/blobs/uploads/"+uuid, chunk1)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusAccepted {
|
|
t.Fatalf("patch 1 status: got %d", rr.Code)
|
|
}
|
|
|
|
// Step 3: PATCH chunk 2.
|
|
chunk2 := []byte("chunk-two-data")
|
|
req = authedPushRequest("PATCH", "/v2/myrepo/blobs/uploads/"+uuid, chunk2)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusAccepted {
|
|
t.Fatalf("patch 2 status: got %d", rr.Code)
|
|
}
|
|
|
|
// Step 4: Complete with PUT.
|
|
allData := append(chunk1, chunk2...)
|
|
sum := sha256.Sum256(allData)
|
|
digest := "sha256:" + hex.EncodeToString(sum[:])
|
|
|
|
req = authedPushRequest("PUT", "/v2/myrepo/blobs/uploads/"+uuid+"?digest="+digest, nil)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusCreated {
|
|
t.Fatalf("complete status: got %d, body: %s", rr.Code, rr.Body.String())
|
|
}
|
|
if rr.Header().Get("Docker-Content-Digest") != digest {
|
|
t.Fatalf("Docker-Content-Digest: got %q, want %q", rr.Header().Get("Docker-Content-Digest"), digest)
|
|
}
|
|
}
|
|
|
|
func TestUploadDigestMismatch(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
// Initiate.
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
|
|
// Complete with wrong digest.
|
|
blobData := []byte("some data")
|
|
wrongDigest := "sha256:0000000000000000000000000000000000000000000000000000000000000000"
|
|
|
|
req = authedPushRequest("PUT", "/v2/myrepo/blobs/uploads/"+uuid+"?digest="+wrongDigest, blobData)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusBadRequest {
|
|
t.Fatalf("status: got %d, want %d", rr.Code, http.StatusBadRequest)
|
|
}
|
|
|
|
var body ociErrorResponse
|
|
if err := json.NewDecoder(rr.Body).Decode(&body); err != nil {
|
|
t.Fatalf("decode error: %v", err)
|
|
}
|
|
if len(body.Errors) != 1 || body.Errors[0].Code != "DIGEST_INVALID" {
|
|
t.Fatalf("error code: got %+v, want DIGEST_INVALID", body.Errors)
|
|
}
|
|
}
|
|
|
|
func TestUploadStatus(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
// Initiate.
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
|
|
// Check status.
|
|
req = authedPushRequest("GET", "/v2/myrepo/blobs/uploads/"+uuid, nil)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusNoContent {
|
|
t.Fatalf("status: got %d, want %d", rr.Code, http.StatusNoContent)
|
|
}
|
|
|
|
if rr.Header().Get("Docker-Upload-UUID") != uuid {
|
|
t.Fatalf("Docker-Upload-UUID: got %q", rr.Header().Get("Docker-Upload-UUID"))
|
|
}
|
|
}
|
|
|
|
func TestUploadCancel(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
// Initiate.
|
|
req := authedPushRequest("POST", "/v2/myrepo/blobs/uploads/", nil)
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
uuid := rr.Header().Get("Docker-Upload-UUID")
|
|
|
|
// Cancel.
|
|
req = authedPushRequest("DELETE", "/v2/myrepo/blobs/uploads/"+uuid, nil)
|
|
rr = httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusNoContent {
|
|
t.Fatalf("status: got %d, want %d", rr.Code, http.StatusNoContent)
|
|
}
|
|
|
|
// Verify upload was removed from DB.
|
|
if _, ok := fdb.uploads[uuid]; ok {
|
|
t.Fatal("upload should have been deleted from DB")
|
|
}
|
|
}
|
|
|
|
func TestUploadNonexistentUUID(t *testing.T) {
|
|
fdb := newFakeDB()
|
|
_, router := testHandlerWithStorage(t, fdb)
|
|
|
|
req := authedPushRequest("PATCH", "/v2/myrepo/blobs/uploads/nonexistent-uuid", []byte("data"))
|
|
rr := httptest.NewRecorder()
|
|
router.ServeHTTP(rr, req)
|
|
|
|
if rr.Code != http.StatusNotFound {
|
|
t.Fatalf("status: got %d, want %d", rr.Code, http.StatusNotFound)
|
|
}
|
|
|
|
var body ociErrorResponse
|
|
if err := json.NewDecoder(rr.Body).Decode(&body); err != nil {
|
|
t.Fatalf("decode error: %v", err)
|
|
}
|
|
if len(body.Errors) != 1 || body.Errors[0].Code != "BLOB_UPLOAD_UNKNOWN" {
|
|
t.Fatalf("error code: got %+v, want BLOB_UPLOAD_UNKNOWN", body.Errors)
|
|
}
|
|
}
|