GC engine (internal/gc/): Collector.Run() implements the two-phase algorithm — Phase 1 finds unreferenced blobs and deletes DB rows in a single transaction, Phase 2 deletes blob files from storage. Registry-wide mutex blocks concurrent GC runs. Collector.Reconcile() scans filesystem for orphaned files with no DB row (crash recovery). Wired into admin_gc.go: POST /v1/gc now launches the real collector in a goroutine with gc_started/gc_completed audit events. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
44 lines
974 B
Go
44 lines
974 B
Go
package storage
|
|
|
|
import (
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
)
|
|
|
|
// ListBlobDigests scans the layers directory and returns all blob digests
|
|
// found on disk. Used by GC reconciliation to find orphaned files.
|
|
func (s *Store) ListBlobDigests() ([]string, error) {
|
|
sha256Dir := filepath.Join(s.layersPath, "sha256")
|
|
prefixEntries, err := os.ReadDir(sha256Dir)
|
|
if err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, nil
|
|
}
|
|
return nil, fmt.Errorf("storage: list prefix dirs: %w", err)
|
|
}
|
|
|
|
var digests []string
|
|
for _, prefix := range prefixEntries {
|
|
if !prefix.IsDir() || len(prefix.Name()) != 2 {
|
|
continue
|
|
}
|
|
prefixPath := filepath.Join(sha256Dir, prefix.Name())
|
|
blobEntries, err := os.ReadDir(prefixPath)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
for _, blob := range blobEntries {
|
|
if blob.IsDir() {
|
|
continue
|
|
}
|
|
digest := "sha256:" + blob.Name()
|
|
if validateDigest(digest) == nil {
|
|
digests = append(digests, digest)
|
|
}
|
|
}
|
|
}
|
|
|
|
return digests, nil
|
|
}
|