Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a573f1cd20 | |||
| f93cf5fa9c | |||
| b879d62384 | |||
| c99ffd4394 | |||
| ed8c07c1c5 | |||
| cf2b016433 | |||
| 2899885c42 | |||
| b92e16fa4d | |||
| 6fbdece4be |
@@ -450,6 +450,8 @@ linters:
|
||||
linters: [ forbidigo ]
|
||||
- path: 'logging/example_test.go'
|
||||
linters: [ testableexamples ]
|
||||
- path: 'main.go'
|
||||
linters: [ forbidigo, mnd, reassign ]
|
||||
- source: 'TODO'
|
||||
linters: [ godot ]
|
||||
- text: 'should have a package comment'
|
||||
|
||||
78
CHANGELOG
78
CHANGELOG
@@ -1,45 +1,59 @@
|
||||
Unreleased - 2025-11-15
|
||||
CHANGELOG
|
||||
|
||||
"Error handling modernization" (in progress)
|
||||
v1.11.0 - 2025-11-15
|
||||
|
||||
- Introduced typed, wrapped errors via certlib/certerr.Error (Source, Kind, Op, Err) with Unwrap.
|
||||
- Standardized helper constructors: DecodeError, ParsingError, VerifyError, LoadingError.
|
||||
- Preserved sentinel errors (e.g., ErrEncryptedPrivateKey, ErrInvalidPEMType, ErrEmptyCertificate) for errors.Is.
|
||||
- Refactored certlib to use certerr in key paths (CSR parsing/verification, PEM cert pool, certificate read/load).
|
||||
- Migrated logging/file.go and cmd/kgz away from github.com/pkg/errors to stdlib wrapping.
|
||||
- Removed dependency on github.com/pkg/errors; ran go mod tidy.
|
||||
- Added package docs for certerr and a README section on error handling and matching.
|
||||
- Added unit tests for certerr (Is/As and message formatting).
|
||||
Added
|
||||
- cache/mru: introduce MRU cache implementation with timestamp utilities.
|
||||
|
||||
Planned next steps:
|
||||
- Continue refactoring remaining error paths for consistent wrapping.
|
||||
- Add focused tests for key flows (encrypted private key, CSR invalid PEM types, etc.).
|
||||
- Run golangci-lint (errorlint, errcheck) and address findings.
|
||||
Changed
|
||||
- certlib: complete overhaul to simplify APIs and internals.
|
||||
- repo: widespread linting cleanups across many packages (config, dbg, die,
|
||||
fileutil, log/logging, mwc, sbuf, seekbuf, tee, testio, etc.).
|
||||
- cmd: general program cleanups; `cert-bundler` lint fixes.
|
||||
|
||||
Release 1.2.1 - 2018-09-15
|
||||
Removed
|
||||
- rand: remove unused package.
|
||||
- testutil: remove unused code.
|
||||
|
||||
+ Add missing format argument to Errorf call in kgz.
|
||||
|
||||
Release 1.2.0 - 2018-09-15
|
||||
v1.10.1 — 2025-11-15
|
||||
|
||||
+ Adds the kgz command line utility.
|
||||
Changed
|
||||
- certlib: major overhaul and refactor.
|
||||
- repo: linter autofixes ahead of release.
|
||||
|
||||
Release 1.1.0 - 2017-11-16
|
||||
|
||||
+ A number of new command line utilities were added
|
||||
v1.10.0 — 2025-11-14
|
||||
|
||||
+ atping
|
||||
+ cruntar
|
||||
+ renfnv
|
||||
+
|
||||
+ ski
|
||||
+ subjhash
|
||||
+ yamll
|
||||
Added
|
||||
- cmd: add `cert-revcheck` command.
|
||||
|
||||
+ new package: ahash
|
||||
+ package for loading hashes from an algorithm string
|
||||
Changed
|
||||
- ci/lint: add golangci-lint stage and initial cleanup.
|
||||
|
||||
+ new certificate loading functions in the lib package
|
||||
|
||||
+ new package: tee
|
||||
+ emulates tee(1)
|
||||
v1.9.1 — 2025-11-15
|
||||
|
||||
Fixed
|
||||
- die: correct calls to `die.With`.
|
||||
|
||||
|
||||
v1.9.0 — 2025-11-14
|
||||
|
||||
Added
|
||||
- cmd: add `cert-bundler` tool.
|
||||
|
||||
Changed
|
||||
- misc: minor updates and maintenance.
|
||||
|
||||
|
||||
v1.8.1 — 2025-11-14
|
||||
|
||||
Added
|
||||
- cmd: add `tlsinfo` tool.
|
||||
|
||||
|
||||
v1.8.0 — 2025-11-14
|
||||
|
||||
Baseline
|
||||
- Initial baseline for this changelog series.
|
||||
|
||||
179
cache/lru/lru.go
vendored
Normal file
179
cache/lru/lru.go
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
// Package lru implements a Least Recently Used cache.
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
type item[V any] struct {
|
||||
V V
|
||||
access int64
|
||||
}
|
||||
|
||||
// A Cache is a map that retains a limited number of items. It must be
|
||||
// initialized with New, providing a maximum capacity for the cache.
|
||||
// Only the least recently used items are retained.
|
||||
type Cache[K comparable, V any] struct {
|
||||
store map[K]*item[V]
|
||||
access *timestamps[K]
|
||||
cap int
|
||||
clock clock.Clock
|
||||
// All public methods that have the possibility of modifying the
|
||||
// cache should lock it.
|
||||
mtx *sync.Mutex
|
||||
}
|
||||
|
||||
// New must be used to create a new Cache.
|
||||
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||
return &Cache[K, V]{
|
||||
store: map[K]*item[V]{},
|
||||
access: newTimestamps[K](icap),
|
||||
cap: icap,
|
||||
clock: clock.New(),
|
||||
mtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||
type StringKeyCache[V any] struct {
|
||||
*Cache[string, V]
|
||||
}
|
||||
|
||||
// NewStringKeyCache creates a new LRU cache keyed by string.
|
||||
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) lock() {
|
||||
c.mtx.Lock()
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) unlock() {
|
||||
c.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
return len(c.store)
|
||||
}
|
||||
|
||||
// evict should remove the least-recently-used cache item.
|
||||
func (c *Cache[K, V]) evict() {
|
||||
if c.access.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
k := c.access.K(0)
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
// evictKey should remove the entry given by the key item.
|
||||
func (c *Cache[K, V]) evictKey(k K) {
|
||||
delete(c.store, k)
|
||||
i, ok := c.access.Find(k)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.access.Delete(i)
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) sanityCheck() {
|
||||
if len(c.store) != c.access.Len() {
|
||||
panic(fmt.Sprintf("LRU cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len()))
|
||||
}
|
||||
}
|
||||
|
||||
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||
// data structures are consistent. It is not normally required, and it
|
||||
// is primarily used in testing.
|
||||
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
if err := c.access.ConsistencyCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(c.store) != c.access.Len() {
|
||||
return fmt.Errorf("lru: cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len())
|
||||
}
|
||||
|
||||
for i := range c.access.ts {
|
||||
itm, ok := c.store[c.access.K(i)]
|
||||
if !ok {
|
||||
return errors.New("lru: key in access is not in store")
|
||||
}
|
||||
|
||||
if c.access.T(i) != itm.access {
|
||||
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||
itm.access, c.access.T(i))
|
||||
}
|
||||
}
|
||||
|
||||
if !sort.IsSorted(c.access) {
|
||||
return errors.New("lru: timestamps aren't sorted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store adds the value v to the cache under the k.
|
||||
func (c *Cache[K, V]) Store(k K, v V) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
if len(c.store) == c.cap {
|
||||
c.evict()
|
||||
}
|
||||
|
||||
if _, ok := c.store[k]; ok {
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
itm := &item[V]{
|
||||
V: v,
|
||||
access: c.clock.Now().UnixNano(),
|
||||
}
|
||||
|
||||
c.store[k] = itm
|
||||
c.access.Update(k, itm.access)
|
||||
}
|
||||
|
||||
// Get returns the value stored in the cache. If the item isn't present,
|
||||
// it will return false.
|
||||
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
itm, ok := c.store[k]
|
||||
if !ok {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
|
||||
c.store[k].access = c.clock.Now().UnixNano()
|
||||
c.access.Update(k, itm.access)
|
||||
return itm.V, true
|
||||
}
|
||||
|
||||
// Has returns true if the cache has an entry for k. It will not update
|
||||
// the timestamp on the item.
|
||||
func (c *Cache[K, V]) Has(k K) bool {
|
||||
// Don't need to lock as we don't modify anything.
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
_, ok := c.store[k]
|
||||
return ok
|
||||
}
|
||||
87
cache/lru/lru_internal_test.go
vendored
Normal file
87
cache/lru/lru_internal_test.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
// These tests mirror the MRU-style behavior present in this LRU package
|
||||
// implementation (eviction removes the most-recently-used entry).
|
||||
func TestBasicCacheEviction(t *testing.T) {
|
||||
mock := clock.NewMock()
|
||||
c := NewStringKeyCache[int](2)
|
||||
c.clock = mock
|
||||
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c.Len() != 0 {
|
||||
t.Fatal("cache should have size 0")
|
||||
}
|
||||
|
||||
c.evict()
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c.Store("raven", 1)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 1 {
|
||||
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("owl", 2)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("goat", 3)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
// Since this implementation evicts the most-recently-used item, inserting
|
||||
// "goat" when full evicts "owl" (the most recent at that time).
|
||||
mock.Add(time.Second)
|
||||
if _, ok := c.Get("owl"); ok {
|
||||
t.Fatal("store should not have an entry for owl (MRU-evicted)")
|
||||
}
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("elk", 4)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !c.Has("elk") {
|
||||
t.Fatal("store should contain an entry for 'elk'")
|
||||
}
|
||||
|
||||
// Before storing elk, keys were: raven (older), goat (newer). Evict MRU -> goat.
|
||||
if !c.Has("raven") {
|
||||
t.Fatal("store should contain an entry for 'raven'")
|
||||
}
|
||||
|
||||
if c.Has("goat") {
|
||||
t.Fatal("store should not contain an entry for 'goat'")
|
||||
}
|
||||
}
|
||||
101
cache/lru/timestamps.go
vendored
Normal file
101
cache/lru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||
// by timestamp.
|
||||
|
||||
type timestamp[K comparable] struct {
|
||||
t int64
|
||||
k K
|
||||
}
|
||||
|
||||
type timestamps[K comparable] struct {
|
||||
ts []timestamp[K]
|
||||
cap int
|
||||
}
|
||||
|
||||
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||
return ×tamps[K]{
|
||||
ts: make([]timestamp[K], 0, icap),
|
||||
cap: icap,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) K(i int) K {
|
||||
return ts.ts[i].k
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) T(i int) int64 {
|
||||
return ts.ts[i].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Len() int {
|
||||
return len(ts.ts)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||
return ts.ts[i].t > ts.ts[j].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Swap(i, j int) {
|
||||
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||
for i := range ts.ts {
|
||||
if ts.ts[i].k == k {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||
i, ok := ts.Find(k)
|
||||
if !ok {
|
||||
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||
sort.Sort(ts)
|
||||
return false
|
||||
}
|
||||
|
||||
ts.ts[i].t = t
|
||||
sort.Sort(ts)
|
||||
return true
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||
if !sort.IsSorted(ts) {
|
||||
return errors.New("lru: timestamps are not sorted")
|
||||
}
|
||||
|
||||
keys := map[K]bool{}
|
||||
for i := range ts.ts {
|
||||
if keys[ts.ts[i].k] {
|
||||
return fmt.Errorf("lru: duplicate key %v detected", ts.ts[i].k)
|
||||
}
|
||||
keys[ts.ts[i].k] = true
|
||||
}
|
||||
|
||||
if len(keys) != len(ts.ts) {
|
||||
return fmt.Errorf("lru: timestamp contains %d duplicate keys",
|
||||
len(ts.ts)-len(keys))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Delete(i int) {
|
||||
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||
for i := range ts.ts {
|
||||
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||
}
|
||||
}
|
||||
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
// These tests validate timestamps ordering semantics for the LRU package.
|
||||
// Note: The LRU timestamps are sorted with most-recent-first (descending by t).
|
||||
func TestTimestamps(t *testing.T) {
|
||||
ts := newTimestamps[string](3)
|
||||
mock := clock.NewMock()
|
||||
|
||||
// raven
|
||||
ts.Update("raven", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl
|
||||
mock.Add(time.Millisecond)
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl, goat
|
||||
mock.Add(time.Second)
|
||||
ts.Update("goat", mock.Now().UnixNano())
|
||||
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make owl the most recent
|
||||
mock.Add(time.Millisecond)
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// For LRU timestamps: most recent first. Expected order: owl, goat, raven.
|
||||
if ts.K(0) != "owl" {
|
||||
t.Fatalf("first key should be owl, have %s", ts.K(0))
|
||||
}
|
||||
|
||||
if ts.K(1) != "goat" {
|
||||
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||
}
|
||||
|
||||
if ts.K(2) != "raven" {
|
||||
t.Fatalf("third key should be raven, have %s", ts.K(2))
|
||||
}
|
||||
}
|
||||
178
cache/mru/mru.go
vendored
Normal file
178
cache/mru/mru.go
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
type item[V any] struct {
|
||||
V V
|
||||
access int64
|
||||
}
|
||||
|
||||
// A Cache is a map that retains a limited number of items. It must be
|
||||
// initialized with New, providing a maximum capacity for the cache.
|
||||
// Only the most recently used items are retained.
|
||||
type Cache[K comparable, V any] struct {
|
||||
store map[K]*item[V]
|
||||
access *timestamps[K]
|
||||
cap int
|
||||
clock clock.Clock
|
||||
// All public methods that have the possibility of modifying the
|
||||
// cache should lock it.
|
||||
mtx *sync.Mutex
|
||||
}
|
||||
|
||||
// New must be used to create a new Cache.
|
||||
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||
return &Cache[K, V]{
|
||||
store: map[K]*item[V]{},
|
||||
access: newTimestamps[K](icap),
|
||||
cap: icap,
|
||||
clock: clock.New(),
|
||||
mtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||
type StringKeyCache[V any] struct {
|
||||
*Cache[string, V]
|
||||
}
|
||||
|
||||
// NewStringKeyCache creates a new MRU cache keyed by string.
|
||||
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) lock() {
|
||||
c.mtx.Lock()
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) unlock() {
|
||||
c.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
return len(c.store)
|
||||
}
|
||||
|
||||
// evict should remove the least-recently-used cache item.
|
||||
func (c *Cache[K, V]) evict() {
|
||||
if c.access.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
k := c.access.K(0)
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
// evictKey should remove the entry given by the key item.
|
||||
func (c *Cache[K, V]) evictKey(k K) {
|
||||
delete(c.store, k)
|
||||
i, ok := c.access.Find(k)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.access.Delete(i)
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) sanityCheck() {
|
||||
if len(c.store) != c.access.Len() {
|
||||
panic(fmt.Sprintf("MRU cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len()))
|
||||
}
|
||||
}
|
||||
|
||||
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||
// data structures are consistent. It is not normally required, and it
|
||||
// is primarily used in testing.
|
||||
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
if err := c.access.ConsistencyCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(c.store) != c.access.Len() {
|
||||
return fmt.Errorf("mru: cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len())
|
||||
}
|
||||
|
||||
for i := range c.access.ts {
|
||||
itm, ok := c.store[c.access.K(i)]
|
||||
if !ok {
|
||||
return errors.New("mru: key in access is not in store")
|
||||
}
|
||||
|
||||
if c.access.T(i) != itm.access {
|
||||
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||
itm.access, c.access.T(i))
|
||||
}
|
||||
}
|
||||
|
||||
if !sort.IsSorted(c.access) {
|
||||
return errors.New("mru: timestamps aren't sorted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store adds the value v to the cache under the k.
|
||||
func (c *Cache[K, V]) Store(k K, v V) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
if len(c.store) == c.cap {
|
||||
c.evict()
|
||||
}
|
||||
|
||||
if _, ok := c.store[k]; ok {
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
itm := &item[V]{
|
||||
V: v,
|
||||
access: c.clock.Now().UnixNano(),
|
||||
}
|
||||
|
||||
c.store[k] = itm
|
||||
c.access.Update(k, itm.access)
|
||||
}
|
||||
|
||||
// Get returns the value stored in the cache. If the item isn't present,
|
||||
// it will return false.
|
||||
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
itm, ok := c.store[k]
|
||||
if !ok {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
|
||||
c.store[k].access = c.clock.Now().UnixNano()
|
||||
c.access.Update(k, itm.access)
|
||||
return itm.V, true
|
||||
}
|
||||
|
||||
// Has returns true if the cache has an entry for k. It will not update
|
||||
// the timestamp on the item.
|
||||
func (c *Cache[K, V]) Has(k K) bool {
|
||||
// Don't need to lock as we don't modify anything.
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
_, ok := c.store[k]
|
||||
return ok
|
||||
}
|
||||
92
cache/mru/mru_internal_test.go
vendored
Normal file
92
cache/mru/mru_internal_test.go
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
func TestBasicCacheEviction(t *testing.T) {
|
||||
mock := clock.NewMock()
|
||||
c := NewStringKeyCache[int](2)
|
||||
c.clock = mock
|
||||
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c.Len() != 0 {
|
||||
t.Fatal("cache should have size 0")
|
||||
}
|
||||
|
||||
c.evict()
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c.Store("raven", 1)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 1 {
|
||||
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("owl", 2)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("goat", 3)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
v, ok := c.Get("owl")
|
||||
if !ok {
|
||||
t.Fatal("store should have an entry for owl")
|
||||
}
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
itm := v
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if itm != 2 {
|
||||
t.Fatalf("stored item should be 2, have %d", itm)
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("elk", 4)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !c.Has("elk") {
|
||||
t.Fatal("store should contain an entry for 'elk'")
|
||||
}
|
||||
|
||||
if !c.Has("owl") {
|
||||
t.Fatal("store should contain an entry for 'owl'")
|
||||
}
|
||||
|
||||
if c.Has("goat") {
|
||||
t.Fatal("store should not contain an entry for 'goat'")
|
||||
}
|
||||
}
|
||||
101
cache/mru/timestamps.go
vendored
Normal file
101
cache/mru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||
// by timestamp.
|
||||
|
||||
type timestamp[K comparable] struct {
|
||||
t int64
|
||||
k K
|
||||
}
|
||||
|
||||
type timestamps[K comparable] struct {
|
||||
ts []timestamp[K]
|
||||
cap int
|
||||
}
|
||||
|
||||
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||
return ×tamps[K]{
|
||||
ts: make([]timestamp[K], 0, icap),
|
||||
cap: icap,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) K(i int) K {
|
||||
return ts.ts[i].k
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) T(i int) int64 {
|
||||
return ts.ts[i].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Len() int {
|
||||
return len(ts.ts)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||
return ts.ts[i].t < ts.ts[j].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Swap(i, j int) {
|
||||
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||
for i := range ts.ts {
|
||||
if ts.ts[i].k == k {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||
i, ok := ts.Find(k)
|
||||
if !ok {
|
||||
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||
sort.Sort(ts)
|
||||
return false
|
||||
}
|
||||
|
||||
ts.ts[i].t = t
|
||||
sort.Sort(ts)
|
||||
return true
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||
if !sort.IsSorted(ts) {
|
||||
return errors.New("mru: timestamps are not sorted")
|
||||
}
|
||||
|
||||
keys := map[K]bool{}
|
||||
for i := range ts.ts {
|
||||
if keys[ts.ts[i].k] {
|
||||
return fmt.Errorf("duplicate key %v detected", ts.ts[i].k)
|
||||
}
|
||||
keys[ts.ts[i].k] = true
|
||||
}
|
||||
|
||||
if len(keys) != len(ts.ts) {
|
||||
return fmt.Errorf("mru: timestamp contains %d duplicate keys",
|
||||
len(ts.ts)-len(keys))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Delete(i int) {
|
||||
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||
for i := range ts.ts {
|
||||
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||
}
|
||||
}
|
||||
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
func TestTimestamps(t *testing.T) {
|
||||
ts := newTimestamps[string](3)
|
||||
mock := clock.NewMock()
|
||||
|
||||
// raven
|
||||
ts.Update("raven", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl
|
||||
mock.Add(time.Millisecond)
|
||||
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl, goat
|
||||
mock.Add(time.Second)
|
||||
ts.Update("goat", mock.Now().UnixNano())
|
||||
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mock.Add(time.Millisecond)
|
||||
|
||||
// raven, goat, owl
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// at this point, the keys should be raven, goat, owl.
|
||||
if ts.K(0) != "raven" {
|
||||
t.Fatalf("first key should be raven, have %s", ts.K(0))
|
||||
}
|
||||
|
||||
if ts.K(1) != "goat" {
|
||||
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||
}
|
||||
|
||||
if ts.K(2) != "owl" {
|
||||
t.Fatalf("third key should be owl, have %s", ts.K(2))
|
||||
}
|
||||
}
|
||||
@@ -79,24 +79,23 @@ func (e *Error) Error() string {
|
||||
|
||||
func (e *Error) Unwrap() error { return e.Err }
|
||||
|
||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
||||
// InvalidPEMTypeError is used to indicate that we were expecting one type of PEM
|
||||
// file, but saw another.
|
||||
type InvalidPEMType struct {
|
||||
type InvalidPEMTypeError struct {
|
||||
have string
|
||||
want []string
|
||||
}
|
||||
|
||||
func (err *InvalidPEMType) Error() string {
|
||||
func (err *InvalidPEMTypeError) Error() string {
|
||||
if len(err.want) == 1 {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||
} else {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
|
||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
||||
// ErrInvalidPEMType returns a new InvalidPEMTypeError error.
|
||||
func ErrInvalidPEMType(have string, want ...string) error {
|
||||
return &InvalidPEMType{
|
||||
return &InvalidPEMTypeError{
|
||||
have: have,
|
||||
want: want,
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package certerr
|
||||
|
||||
import (
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||
// byte slice.
|
||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
||||
func ReadCertificate(in []byte) (*x509.Certificate, []byte, error) {
|
||||
if len(in) == 0 {
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, certerr.ErrEmptyCertificate)
|
||||
}
|
||||
@@ -22,7 +22,7 @@ func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error)
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("invalid PEM file"))
|
||||
}
|
||||
|
||||
rest = remaining
|
||||
rest := remaining
|
||||
if p.Type != "CERTIFICATE" {
|
||||
return nil, rest, certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
@@ -31,19 +31,26 @@ func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error)
|
||||
}
|
||||
|
||||
in = p.Bytes
|
||||
cert, err := x509.ParseCertificate(in)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, rest, nil
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(in)
|
||||
cert, err := x509.ParseCertificate(in)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, rest, nil
|
||||
return cert, nil, nil
|
||||
}
|
||||
|
||||
// ReadCertificates tries to read all the certificates in a
|
||||
// PEM-encoded collection.
|
||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
func ReadCertificates(in []byte) ([]*x509.Certificate, error) {
|
||||
var cert *x509.Certificate
|
||||
var certs []*x509.Certificate
|
||||
var err error
|
||||
for {
|
||||
cert, in, err = ReadCertificate(in)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package certlib
|
||||
|
||||
import (
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
@@ -47,29 +48,36 @@ import (
|
||||
// private key. The key must not be in PEM format. If an error is returned, it
|
||||
// may contain information about the private key, so care should be taken when
|
||||
// displaying it directly.
|
||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
}
|
||||
func ParsePrivateKeyDER(keyDER []byte) (crypto.Signer, error) {
|
||||
// Try common encodings in order without deep nesting.
|
||||
if k, err := x509.ParsePKCS8PrivateKey(keyDER); err == nil {
|
||||
switch kk := k.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return kk, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return kk, nil
|
||||
case ed25519.PrivateKey:
|
||||
return kk, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||
}
|
||||
}
|
||||
|
||||
switch generalKey := generalKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case ed25519.PrivateKey:
|
||||
return generalKey, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
||||
if k, err := x509.ParsePKCS1PrivateKey(keyDER); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
if k, err := x509.ParseECPrivateKey(keyDER); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
if k, err := ParseEd25519PrivateKey(keyDER); err == nil {
|
||||
if kk, ok := k.(ed25519.PrivateKey); ok {
|
||||
return kk, nil
|
||||
}
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||
}
|
||||
// If all parsers failed, return the last error from Ed25519 attempt (approximate cause).
|
||||
if _, err := ParseEd25519PrivateKey(keyDER); err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
// Fallback (should be unreachable)
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, errors.New("unknown key encoding"))
|
||||
}
|
||||
|
||||
@@ -65,12 +65,14 @@ func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
const bitsPerByte = 8
|
||||
|
||||
spki := subjectPublicKeyInfo{
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PublicKey: asn1.BitString{
|
||||
BitLength: len(pub) * 8,
|
||||
BitLength: len(pub) * bitsPerByte,
|
||||
Bytes: pub,
|
||||
},
|
||||
}
|
||||
@@ -91,7 +93,8 @@ func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
||||
const bitsPerByte = 8
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*bitsPerByte {
|
||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||
}
|
||||
|
||||
|
||||
@@ -49,14 +49,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
|
||||
ct "github.com/google/certificate-transparency-go"
|
||||
cttls "github.com/google/certificate-transparency-go/tls"
|
||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
@@ -68,7 +68,7 @@ const OneDay = 24 * time.Hour
|
||||
// DelegationUsage is the OID for the DelegationUseage extensions.
|
||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||
|
||||
// DelegationExtension.
|
||||
// DelegationExtension is a non-critical extension marking delegation usage.
|
||||
var DelegationExtension = pkix.Extension{
|
||||
Id: DelegationUsage,
|
||||
Critical: false,
|
||||
@@ -81,13 +81,19 @@ func InclusiveDate(year int, month time.Month, day int) time.Time {
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||
}
|
||||
|
||||
const (
|
||||
year2012 = 2012
|
||||
year2015 = 2015
|
||||
day1 = 1
|
||||
)
|
||||
|
||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 5 years.
|
||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
||||
var Jul2012 = InclusiveDate(year2012, time.July, day1)
|
||||
|
||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 39 months.
|
||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
||||
var Apr2015 = InclusiveDate(year2015, time.April, day1)
|
||||
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey.
|
||||
func KeyLength(key any) int {
|
||||
@@ -108,11 +114,11 @@ func KeyLength(key any) int {
|
||||
}
|
||||
|
||||
// ExpiryTime returns the time when the certificate chain is expired.
|
||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
||||
func ExpiryTime(chain []*x509.Certificate) time.Time {
|
||||
var notAfter time.Time
|
||||
if len(chain) == 0 {
|
||||
return notAfter
|
||||
}
|
||||
|
||||
notAfter = chain[0].NotAfter
|
||||
for _, cert := range chain {
|
||||
if notAfter.After(cert.NotAfter) {
|
||||
@@ -158,18 +164,23 @@ func ValidExpiry(c *x509.Certificate) bool {
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
var signatureString = map[x509.SignatureAlgorithm]string{
|
||||
x509.MD2WithRSA: "MD2WithRSA",
|
||||
x509.MD5WithRSA: "MD5WithRSA",
|
||||
x509.SHA1WithRSA: "SHA1WithRSA",
|
||||
x509.SHA256WithRSA: "SHA256WithRSA",
|
||||
x509.SHA384WithRSA: "SHA384WithRSA",
|
||||
x509.SHA512WithRSA: "SHA512WithRSA",
|
||||
x509.DSAWithSHA1: "DSAWithSHA1",
|
||||
x509.DSAWithSHA256: "DSAWithSHA256",
|
||||
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
|
||||
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
|
||||
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
|
||||
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
|
||||
x509.UnknownSignatureAlgorithm: "Unknown Signature",
|
||||
x509.MD2WithRSA: "MD2WithRSA",
|
||||
x509.MD5WithRSA: "MD5WithRSA",
|
||||
x509.SHA1WithRSA: "SHA1WithRSA",
|
||||
x509.SHA256WithRSA: "SHA256WithRSA",
|
||||
x509.SHA384WithRSA: "SHA384WithRSA",
|
||||
x509.SHA512WithRSA: "SHA512WithRSA",
|
||||
x509.SHA256WithRSAPSS: "SHA256WithRSAPSS",
|
||||
x509.SHA384WithRSAPSS: "SHA384WithRSAPSS",
|
||||
x509.SHA512WithRSAPSS: "SHA512WithRSAPSS",
|
||||
x509.DSAWithSHA1: "DSAWithSHA1",
|
||||
x509.DSAWithSHA256: "DSAWithSHA256",
|
||||
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
|
||||
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
|
||||
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
|
||||
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
|
||||
x509.PureEd25519: "PureEd25519",
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
@@ -184,18 +195,23 @@ func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
var hashAlgoString = map[x509.SignatureAlgorithm]string{
|
||||
x509.MD2WithRSA: "MD2",
|
||||
x509.MD5WithRSA: "MD5",
|
||||
x509.SHA1WithRSA: "SHA1",
|
||||
x509.SHA256WithRSA: "SHA256",
|
||||
x509.SHA384WithRSA: "SHA384",
|
||||
x509.SHA512WithRSA: "SHA512",
|
||||
x509.DSAWithSHA1: "SHA1",
|
||||
x509.DSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA1: "SHA1",
|
||||
x509.ECDSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA384: "SHA384",
|
||||
x509.ECDSAWithSHA512: "SHA512",
|
||||
x509.UnknownSignatureAlgorithm: "Unknown Hash Algorithm",
|
||||
x509.MD2WithRSA: "MD2",
|
||||
x509.MD5WithRSA: "MD5",
|
||||
x509.SHA1WithRSA: "SHA1",
|
||||
x509.SHA256WithRSA: "SHA256",
|
||||
x509.SHA384WithRSA: "SHA384",
|
||||
x509.SHA512WithRSA: "SHA512",
|
||||
x509.SHA256WithRSAPSS: "SHA256",
|
||||
x509.SHA384WithRSAPSS: "SHA384",
|
||||
x509.SHA512WithRSAPSS: "SHA512",
|
||||
x509.DSAWithSHA1: "SHA1",
|
||||
x509.DSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA1: "SHA1",
|
||||
x509.ECDSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA384: "SHA384",
|
||||
x509.ECDSAWithSHA512: "SHA512",
|
||||
x509.PureEd25519: "SHA512", // per x509 docs Ed25519 uses SHA-512 internally
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
@@ -273,7 +289,7 @@ func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
||||
|
||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||
// either PKCS #7, PKCS #12, or raw x509.
|
||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
||||
func ParseCertificatesDER(certsDER []byte, password string) ([]*x509.Certificate, crypto.Signer, error) {
|
||||
certsDER = bytes.TrimSpace(certsDER)
|
||||
|
||||
// First, try PKCS #7
|
||||
@@ -284,7 +300,7 @@ func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certi
|
||||
errors.New("can only extract certificates from signed data content info"),
|
||||
)
|
||||
}
|
||||
certs = pkcs7data.Content.SignedData.Certificates
|
||||
certs := pkcs7data.Content.SignedData.Certificates
|
||||
if certs == nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||
}
|
||||
@@ -304,7 +320,7 @@ func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certi
|
||||
}
|
||||
|
||||
// Finally, attempt to parse raw X.509 certificates
|
||||
certs, err = x509.ParseCertificates(certsDER)
|
||||
certs, err := x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
@@ -318,7 +334,8 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
||||
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
|
||||
if err != nil {
|
||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, nil
|
||||
@@ -362,8 +379,8 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err2 := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err2 != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
@@ -382,7 +399,7 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
if certsFile == "" {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // no CA file provided -> treat as no pool and no error
|
||||
}
|
||||
pemCerts, err := os.ReadFile(certsFile)
|
||||
if err != nil {
|
||||
@@ -395,7 +412,7 @@ func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
if len(pemCerts) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // empty input means no pool needed
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
@@ -409,14 +426,14 @@ func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (crypto.Signer, error) {
|
||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (crypto.Signer, error) {
|
||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -436,47 +453,49 @@ func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if keyDER != nil {
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
||||
if strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
if keyDER == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
}
|
||||
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok && strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
// nolintlint requires rationale:
|
||||
//nolint:staticcheck // legacy RFC1423 PEM encryption supported for backward compatibility when caller supplies a password
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
}
|
||||
|
||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
||||
func ParseCSR(in []byte) (*x509.CertificateRequest, []byte, error) {
|
||||
in = bytes.TrimSpace(in)
|
||||
p, rest := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(
|
||||
certerr.ErrorSourceCSR,
|
||||
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
|
||||
)
|
||||
if p == nil {
|
||||
csr, err := x509.ParseCertificateRequest(in)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||
}
|
||||
|
||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
||||
} else {
|
||||
csr, err = x509.ParseCertificateRequest(in)
|
||||
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||
}
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(
|
||||
certerr.ErrorSourceCSR,
|
||||
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
|
||||
)
|
||||
}
|
||||
|
||||
csr, err := x509.ParseCertificateRequest(p.Bytes)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||
}
|
||||
|
||||
err = csr.CheckSignature()
|
||||
if err != nil {
|
||||
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, err)
|
||||
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||
}
|
||||
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
@@ -484,7 +503,7 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
|
||||
// It does not check the signature. This is useful for dumping data from a CSR
|
||||
// locally.
|
||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
block, _ := pem.Decode([]byte(csrPEM))
|
||||
block, _ := pem.Decode(csrPEM)
|
||||
if block == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||
}
|
||||
@@ -499,15 +518,20 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
|
||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
const (
|
||||
rsaBits2048 = 2048
|
||||
rsaBits3072 = 3072
|
||||
rsaBits4096 = 4096
|
||||
)
|
||||
switch pub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
bitLength := pub.N.BitLen()
|
||||
switch {
|
||||
case bitLength >= 4096:
|
||||
case bitLength >= rsaBits4096:
|
||||
return x509.SHA512WithRSA
|
||||
case bitLength >= 3072:
|
||||
case bitLength >= rsaBits3072:
|
||||
return x509.SHA384WithRSA
|
||||
case bitLength >= 2048:
|
||||
case bitLength >= rsaBits2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
@@ -537,7 +561,7 @@ func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, e
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // absence of client cert is not an error
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config object from certs and roots.
|
||||
@@ -549,6 +573,7 @@ func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Confi
|
||||
return &tls.Config{
|
||||
Certificates: certs,
|
||||
RootCAs: remoteCAs,
|
||||
MinVersion: tls.VersionTLS12, // secure default
|
||||
}
|
||||
}
|
||||
|
||||
@@ -582,11 +607,11 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||
for i, serializedSCT := range sctList.SCTList {
|
||||
var sct ct.SignedCertificateTimestamp
|
||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
rest2, err2 := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
if len(rest2) != 0 {
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceSCTList,
|
||||
errors.New("serialized SCT list contained trailing garbage"),
|
||||
@@ -602,12 +627,12 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
||||
// unmarshalled.
|
||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||
// This loop finds the SCTListExtension in the OCSP response.
|
||||
var SCTListExtension, ext pkix.Extension
|
||||
var sctListExtension, ext pkix.Extension
|
||||
for _, ext = range response.Extensions {
|
||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||
if ext.Id.Equal(sctExtOid) {
|
||||
SCTListExtension = ext
|
||||
sctListExtension = ext
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -615,10 +640,10 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
||||
// This code block extracts the sctList from the SCT extension.
|
||||
var sctList []ct.SignedCertificateTimestamp
|
||||
var err error
|
||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
||||
if numBytes := len(sctListExtension.Value); numBytes != 0 {
|
||||
var serializedSCTList []byte
|
||||
rest := make([]byte, numBytes)
|
||||
copy(rest, SCTListExtension.Value)
|
||||
copy(rest, sctListExtension.Value)
|
||||
for len(rest) != 0 {
|
||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const defaultHTTPSPort = 443
|
||||
|
||||
type Target struct {
|
||||
Host string
|
||||
Port int
|
||||
@@ -29,29 +31,29 @@ func parseURL(host string) (string, int, error) {
|
||||
}
|
||||
|
||||
if url.Port() == "" {
|
||||
return url.Hostname(), 443, nil
|
||||
return url.Hostname(), defaultHTTPSPort, nil
|
||||
}
|
||||
|
||||
port, err := strconv.ParseInt(url.Port(), 10, 16)
|
||||
if err != nil {
|
||||
portInt, err2 := strconv.ParseInt(url.Port(), 10, 16)
|
||||
if err2 != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
||||
}
|
||||
|
||||
return url.Hostname(), int(port), nil
|
||||
return url.Hostname(), int(portInt), nil
|
||||
}
|
||||
|
||||
func parseHostPort(host string) (string, int, error) {
|
||||
host, sport, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
port, err := strconv.ParseInt(sport, 10, 16)
|
||||
if err != nil {
|
||||
portInt, err2 := strconv.ParseInt(sport, 10, 16)
|
||||
if err2 != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||
}
|
||||
|
||||
return host, int(port), nil
|
||||
return host, int(portInt), nil
|
||||
}
|
||||
|
||||
return host, 443, nil
|
||||
return host, defaultHTTPSPort, nil
|
||||
}
|
||||
|
||||
func ParseHost(host string) (*Target, error) {
|
||||
|
||||
@@ -158,9 +158,9 @@ type EncryptedContentInfo struct {
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
func unmarshalInit(raw []byte) (init initPKCS7, err error) {
|
||||
_, err = asn1.Unmarshal(raw, &init)
|
||||
if err != nil {
|
||||
func unmarshalInit(raw []byte) (initPKCS7, error) {
|
||||
var init initPKCS7
|
||||
if _, err := asn1.Unmarshal(raw, &init); err != nil {
|
||||
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return init, nil
|
||||
@@ -218,28 +218,28 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
||||
|
||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||
// PKCS7 structure.
|
||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
||||
func ParsePKCS7(raw []byte) (*PKCS7, error) {
|
||||
pkcs7, err := unmarshalInit(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg = new(PKCS7)
|
||||
msg := new(PKCS7)
|
||||
msg.Raw = pkcs7.Raw
|
||||
msg.ContentInfo = pkcs7.ContentType.String()
|
||||
|
||||
switch msg.ContentInfo {
|
||||
case ObjIDData:
|
||||
if err := populateData(msg, pkcs7.Content); err != nil {
|
||||
return nil, err
|
||||
if e := populateData(msg, pkcs7.Content); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
case ObjIDSignedData:
|
||||
if err := populateSignedData(msg, pkcs7.Content.Bytes); err != nil {
|
||||
return nil, err
|
||||
if e := populateSignedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
case ObjIDEncryptedData:
|
||||
if err := populateEncryptedData(msg, pkcs7.Content.Bytes); err != nil {
|
||||
return nil, err
|
||||
if e := populateEncryptedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
default:
|
||||
return nil, certerr.ParsingError(
|
||||
|
||||
@@ -5,6 +5,7 @@ package revoke
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
@@ -90,34 +91,34 @@ func ldapURL(url string) bool {
|
||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||
// - true, false: failure to check revocation status causes verification to fail.
|
||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
func revCheck(cert *x509.Certificate) (bool, bool, error) {
|
||||
for _, url := range cert.CRLDistributionPoints {
|
||||
if ldapURL(url) {
|
||||
log.Infof("skipping LDAP CRL: %s", url)
|
||||
continue
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
||||
if rvk, ok2, err2 := certIsRevokedCRL(cert, url); !ok2 {
|
||||
log.Warning("error checking revocation via CRL")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
return true, false, err2
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
return false, false, err2
|
||||
} else if rvk {
|
||||
log.Info("certificate is revoked via CRL")
|
||||
return true, true, err
|
||||
return true, true, err2
|
||||
}
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
||||
if rvk, ok2, err2 := certIsRevokedOCSP(cert, HardFail); !ok2 {
|
||||
log.Warning("error checking revocation via OCSP")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
return true, false, err2
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
return false, false, err2
|
||||
} else if rvk {
|
||||
log.Info("certificate is revoked via OCSP")
|
||||
return true, true, err
|
||||
return true, true, err2
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
@@ -125,13 +126,17 @@ func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
|
||||
// fetchCRL fetches and parses a CRL.
|
||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 300 {
|
||||
if resp.StatusCode >= http.StatusMultipleChoices {
|
||||
return nil, errors.New("failed to retrieve CRL")
|
||||
}
|
||||
|
||||
@@ -158,7 +163,7 @@ func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
||||
|
||||
// check a cert against a specific CRL. Returns the same bool pair
|
||||
// as revCheck, plus an error if one occurred.
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (bool, bool, error) {
|
||||
crlLock.Lock()
|
||||
crl, ok := CRLSet[url]
|
||||
if ok && crl == nil {
|
||||
@@ -186,10 +191,9 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
||||
|
||||
// check CRL signature
|
||||
if issuer != nil {
|
||||
err = crl.CheckSignatureFrom(issuer)
|
||||
if err != nil {
|
||||
log.Warningf("failed to verify CRL: %v", err)
|
||||
return false, false, err
|
||||
if sigErr := crl.CheckSignatureFrom(issuer); sigErr != nil {
|
||||
log.Warningf("failed to verify CRL: %v", sigErr)
|
||||
return false, false, sigErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -198,26 +202,26 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
||||
crlLock.Unlock()
|
||||
}
|
||||
|
||||
for _, revoked := range crl.RevokedCertificates {
|
||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
||||
for _, entry := range crl.RevokedCertificateEntries {
|
||||
if cert.SerialNumber.Cmp(entry.SerialNumber) == 0 {
|
||||
log.Info("Serial number match: intermediate is revoked.")
|
||||
return true, true, err
|
||||
return true, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, true, err
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
||||
revoked, ok, _ = VerifyCertificateError(cert)
|
||||
func VerifyCertificate(cert *x509.Certificate) (bool, bool) {
|
||||
revoked, ok, _ := VerifyCertificateError(cert)
|
||||
return revoked, ok
|
||||
}
|
||||
|
||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
func VerifyCertificateError(cert *x509.Certificate) (bool, bool, error) {
|
||||
if !time.Now().Before(cert.NotAfter) {
|
||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||
log.Info(msg)
|
||||
@@ -231,7 +235,11 @@ func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error
|
||||
}
|
||||
|
||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -254,8 +262,12 @@ var ocspOpts = ocsp.RequestOptions{
|
||||
Hash: crypto.SHA1,
|
||||
}
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
||||
var err error
|
||||
const ocspGetURLMaxLen = 256
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (bool, bool, error) {
|
||||
var revoked bool
|
||||
var ok bool
|
||||
var lastErr error
|
||||
|
||||
ocspURLs := leaf.OCSPServer
|
||||
if len(ocspURLs) == 0 {
|
||||
@@ -271,15 +283,16 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
|
||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||
if err != nil {
|
||||
return revoked, ok, err
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
for _, server := range ocspURLs {
|
||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if err != nil {
|
||||
resp, e := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if e != nil {
|
||||
if strict {
|
||||
return revoked, ok, err
|
||||
return false, false, e
|
||||
}
|
||||
lastErr = e
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -291,9 +304,9 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
revoked = true
|
||||
}
|
||||
|
||||
return revoked, ok, err
|
||||
return revoked, ok, nil
|
||||
}
|
||||
return revoked, ok, err
|
||||
return revoked, ok, lastErr
|
||||
}
|
||||
|
||||
// sendOCSPRequest attempts to request an OCSP response from the
|
||||
@@ -302,12 +315,21 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
if len(req) > 256 {
|
||||
if len(req) > ocspGetURLMaxLen {
|
||||
buf := bytes.NewBuffer(req)
|
||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
||||
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodPost, server, buf)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/ocsp-request")
|
||||
resp, err = HTTPClient.Do(httpReq)
|
||||
} else {
|
||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||
resp, err = HTTPClient.Get(reqURL)
|
||||
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodGet, reqURL, nil)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
resp, err = HTTPClient.Do(httpReq)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package revoke
|
||||
|
||||
import (
|
||||
@@ -153,7 +154,7 @@ func mustParse(pemData string) *x509.Certificate {
|
||||
panic("Invalid PEM type.")
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -28,10 +29,16 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
||||
|
||||
if verbose {
|
||||
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
||||
os.Stdout.Sync()
|
||||
if err = os.Stdout.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout(proto, addr, timeout)
|
||||
dialer := &net.Dialer{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(context.Background(), proto, addr)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
fmt.Println("failed.")
|
||||
@@ -42,8 +49,8 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
||||
if verbose {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
conn.Close()
|
||||
return nil
|
||||
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"crypto/x509"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -14,22 +15,22 @@ import (
|
||||
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
||||
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
||||
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
||||
var certs []*x509.Certificate
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
if certs, err = certlib.ParseCertificatesPEM(data); err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
if certs, _, err = certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
||||
@@ -56,49 +57,50 @@ var embeddedTestdata embed.FS
|
||||
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
||||
// PEM or DER/PKCS#7 format.
|
||||
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
certs, err := certlib.ParseCertificatesPEM(data)
|
||||
if err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
|
||||
certs, _, err = certlib.ParseCertificatesDER(data, "")
|
||||
if err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
||||
certs, err := loadCertsFromBytes(data)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, fmt.Errorf("failed to load CA certificates from embedded bytes")
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
certs, err := loadCertsFromBytes(data)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, errors.New("failed to load CA certificates from embedded bytes")
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// isSelfSigned returns true if the given certificate is self-signed.
|
||||
// It checks that the subject and issuer match and that the certificate's
|
||||
// signature verifies against its own public key.
|
||||
func isSelfSigned(cert *x509.Certificate) bool {
|
||||
if cert == nil {
|
||||
return false
|
||||
}
|
||||
// Quick check: subject and issuer match
|
||||
if cert.Subject.String() != cert.Issuer.String() {
|
||||
return false
|
||||
}
|
||||
// Cryptographic check: the certificate is signed by itself
|
||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
if cert == nil {
|
||||
return false
|
||||
}
|
||||
// Quick check: subject and issuer match
|
||||
if cert.Subject.String() != cert.Issuer.String() {
|
||||
return false
|
||||
}
|
||||
// Cryptographic check: the certificate is signed by itself
|
||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string) {
|
||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (bool, string) {
|
||||
certs, err := loadCertsFromFile(path)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
@@ -117,14 +119,14 @@ func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
if _, err = leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expiry string) {
|
||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (bool, string) {
|
||||
certs, err := loadCertsFromBytes(certData)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
@@ -143,92 +145,159 @@ func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expi
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
if _, err = leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
// selftest runs built-in validation using embedded certificates.
|
||||
func selftest() int {
|
||||
type testCase struct {
|
||||
name string
|
||||
caFile string
|
||||
certFile string
|
||||
expectOK bool
|
||||
type testCase struct {
|
||||
name string
|
||||
caFile string
|
||||
certFile string
|
||||
expectOK bool
|
||||
}
|
||||
|
||||
func (tc testCase) Run() error {
|
||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.caFile, err)
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
{name: "ISRG Root X1 validates LE E7", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/le-e7.pem", expectOK: true},
|
||||
{name: "ISRG Root X1 does NOT validate Google WR2", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/goog-wr2.pem", expectOK: false},
|
||||
{name: "GTS R1 validates Google WR2", caFile: "testdata/gts-r1.pem", certFile: "testdata/goog-wr2.pem", expectOK: true},
|
||||
{name: "GTS R1 does NOT validate LE E7", caFile: "testdata/gts-r1.pem", certFile: "testdata/le-e7.pem", expectOK: false},
|
||||
}
|
||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.certFile, err)
|
||||
}
|
||||
|
||||
failures := 0
|
||||
for _, tc := range cases {
|
||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||
pool, err := makePoolFromBytes(caBytes)
|
||||
if err != nil || pool == nil {
|
||||
return fmt.Errorf("selftest: failed to build CA pool for %s: %w", tc.caFile, err)
|
||||
}
|
||||
|
||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||
if ok != tc.expectOK {
|
||||
return fmt.Errorf("%s: unexpected result: got %v, want %v", tc.name, ok, tc.expectOK)
|
||||
}
|
||||
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||
}
|
||||
|
||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var cases = []testCase{
|
||||
{
|
||||
name: "ISRG Root X1 validates LE E7",
|
||||
caFile: "testdata/isrg-root-x1.pem",
|
||||
certFile: "testdata/le-e7.pem",
|
||||
expectOK: true,
|
||||
},
|
||||
{
|
||||
name: "ISRG Root X1 does NOT validate Google WR2",
|
||||
caFile: "testdata/isrg-root-x1.pem",
|
||||
certFile: "testdata/goog-wr2.pem",
|
||||
expectOK: false,
|
||||
},
|
||||
{
|
||||
name: "GTS R1 validates Google WR2",
|
||||
caFile: "testdata/gts-r1.pem",
|
||||
certFile: "testdata/goog-wr2.pem",
|
||||
expectOK: true,
|
||||
},
|
||||
{
|
||||
name: "GTS R1 does NOT validate LE E7",
|
||||
caFile: "testdata/gts-r1.pem",
|
||||
certFile: "testdata/le-e7.pem",
|
||||
expectOK: false,
|
||||
},
|
||||
}
|
||||
|
||||
// selftest runs built-in validation using embedded certificates.
|
||||
func selftest() int {
|
||||
failures := 0
|
||||
for _, tc := range cases {
|
||||
err := tc.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.caFile, err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||
}
|
||||
|
||||
// Verify that both embedded root CAs are detected as self-signed
|
||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||
for _, root := range roots {
|
||||
b, err := embeddedTestdata.ReadFile(root)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.certFile, err)
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
pool, err := makePoolFromBytes(caBytes)
|
||||
if err != nil || pool == nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to build CA pool for %s: %v\n", tc.caFile, err)
|
||||
certs, err := loadCertsFromBytes(b)
|
||||
if err != nil || len(certs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||
if ok != tc.expectOK {
|
||||
fmt.Printf("%s: unexpected result: got %v, want %v\n", tc.name, ok, tc.expectOK)
|
||||
failures++
|
||||
leaf := certs[0]
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||
} else {
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||
}
|
||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that both embedded root CAs are detected as self-signed
|
||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||
for _, root := range roots {
|
||||
b, err := embeddedTestdata.ReadFile(root)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certs, err := loadCertsFromBytes(b)
|
||||
if err != nil || len(certs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
leaf := certs[0]
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||
} else {
|
||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
if failures == 0 {
|
||||
fmt.Println("selftest: PASS")
|
||||
return 0
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||
return 1
|
||||
}
|
||||
|
||||
if failures == 0 {
|
||||
fmt.Println("selftest: PASS")
|
||||
return 0
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||
return 1
|
||||
// expiryString returns a YYYY-MM-DD date string to display for certificate
|
||||
// expiry. If an explicit exp string is provided, it is used. Otherwise, if a
|
||||
// leaf certificate is available, its NotAfter is formatted. As a last resort,
|
||||
// it falls back to today's date (should not normally happen).
|
||||
func expiryString(leaf *x509.Certificate, exp string) string {
|
||||
if exp != "" {
|
||||
return exp
|
||||
}
|
||||
if leaf != nil {
|
||||
return leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
return time.Now().Format("2006-01-02")
|
||||
}
|
||||
|
||||
// processCert verifies a single certificate file against the provided CA pool
|
||||
// and prints the result in the required format, handling self-signed
|
||||
// certificates specially.
|
||||
func processCert(caPool *x509.CertPool, certPath string) {
|
||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||
name := filepath.Base(certPath)
|
||||
|
||||
// Try to load the leaf cert for self-signed detection and expiry fallback
|
||||
var leaf *x509.Certificate
|
||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||
leaf = certs[0]
|
||||
}
|
||||
|
||||
// Prefer the SELF-SIGNED label if applicable
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", name, expiryString(leaf, exp))
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s: INVALID\n", name)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -250,38 +319,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, certPath := range os.Args[2:] {
|
||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||
name := filepath.Base(certPath)
|
||||
// Load the leaf once for self-signed detection and potential expiry fallback
|
||||
var leaf *x509.Certificate
|
||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||
leaf = certs[0]
|
||||
}
|
||||
|
||||
// If the certificate is self-signed, prefer the SELF-SIGNED label
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Display with the requested format
|
||||
// Example: file: OK (expires 2031-01-01)
|
||||
// Ensure deterministic date formatting
|
||||
// Note: no timezone displayed; date only as per example
|
||||
// If exp ended up empty for some reason, recompute safely
|
||||
if exp == "" {
|
||||
if leaf != nil {
|
||||
exp = leaf.NotAfter.Format("2006-01-02")
|
||||
} else {
|
||||
// fallback to the current date to avoid empty; though shouldn't happen
|
||||
exp = time.Now().Format("2006-01-02")
|
||||
}
|
||||
}
|
||||
fmt.Printf("%s: OK (expires %s)\n", name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID\n", name)
|
||||
}
|
||||
}
|
||||
for _, certPath := range os.Args[2:] {
|
||||
processCert(caPool, certPath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"crypto/x509"
|
||||
_ "embed"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -19,7 +21,7 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Config represents the top-level YAML configuration
|
||||
// Config represents the top-level YAML configuration.
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
@@ -28,19 +30,19 @@ type Config struct {
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains
|
||||
// ChainGroup represents a named group of certificate chains.
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates
|
||||
// CertChain represents a root certificate and its intermediates.
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options
|
||||
// Outputs defines output format options.
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
@@ -95,7 +97,8 @@ func main() {
|
||||
}
|
||||
|
||||
// Create output directory if it doesn't exist
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
err = os.MkdirAll(outputDir, 0750)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -108,9 +111,9 @@ func main() {
|
||||
}
|
||||
createdFiles := make([]string, 0, totalFormats)
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, err := processChainGroup(groupName, group, expiryDuration)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, err)
|
||||
files, perr := processChainGroup(groupName, group, expiryDuration)
|
||||
if perr != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
|
||||
os.Exit(1)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
@@ -119,8 +122,8 @@ func main() {
|
||||
// Generate hash file for all created archives
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if err := generateHashFile(hashFile, createdFiles); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", err)
|
||||
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -135,8 +138,8 @@ func loadConfig(path string) (*Config, error) {
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, err
|
||||
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||
return nil, uerr
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
@@ -200,72 +203,107 @@ func processChainGroup(groupName string, group ChainGroup, expiryDuration time.D
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing
|
||||
func loadAndCollectCerts(chains []CertChain, outputs Outputs, expiryDuration time.Duration) ([]*x509.Certificate, []certWithPath, error) {
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||
func loadAndCollectCerts(
|
||||
chains []CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) ([]*x509.Certificate, []certWithPath, error) {
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
|
||||
for _, chain := range chains {
|
||||
// Load root certificate
|
||||
rootCert, err := certlib.LoadCertificate(chain.Root)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %v", chain.Root, err)
|
||||
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||
if cerr != nil {
|
||||
return nil, nil, cerr
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, rootCert)
|
||||
if len(s) > 0 {
|
||||
singleFileCerts = append(singleFileCerts, s...)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: rootCert,
|
||||
path: chain.Root,
|
||||
})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, err := certlib.LoadCertificate(intPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %v", intPath, err)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if err := intCert.CheckSignatureFrom(rootCert); err != nil {
|
||||
return nil, nil, fmt.Errorf("intermediate %s is not properly signed by root %s: %v", intPath, chain.Root, err)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: intCert,
|
||||
path: intPath,
|
||||
})
|
||||
}
|
||||
if len(i) > 0 {
|
||||
individualCerts = append(individualCerts, i...)
|
||||
}
|
||||
}
|
||||
|
||||
return singleFileCerts, individualCerts, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives
|
||||
func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []certWithPath, outputs Outputs, encoding string) ([]fileEntry, error) {
|
||||
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||
func collectFromChain(
|
||||
chain CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) (
|
||||
[]*x509.Certificate,
|
||||
[]certWithPath,
|
||||
error,
|
||||
) {
|
||||
var single []*x509.Certificate
|
||||
var indiv []certWithPath
|
||||
|
||||
// Load root certificate
|
||||
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||
if rerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, rootCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||
if lerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"intermediate %s is not properly signed by root %s: %w",
|
||||
intPath,
|
||||
chain.Root,
|
||||
sigErr,
|
||||
)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||
}
|
||||
}
|
||||
|
||||
return single, indiv, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives.
|
||||
func prepareArchiveFiles(
|
||||
singleFileCerts []*x509.Certificate,
|
||||
individualCerts []certWithPath,
|
||||
outputs Outputs,
|
||||
encoding string,
|
||||
) ([]fileEntry, error) {
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Handle a single bundle file
|
||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %v", err)
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
@@ -276,7 +314,7 @@ func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %v", cp.path, err)
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
@@ -294,7 +332,7 @@ func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []
|
||||
return archiveFiles, nil
|
||||
}
|
||||
|
||||
// createArchiveFiles creates archive files in the specified formats
|
||||
// createArchiveFiles creates archive files in the specified formats.
|
||||
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
|
||||
createdFiles := make([]string, 0, len(formats))
|
||||
|
||||
@@ -307,11 +345,11 @@ func createArchiveFiles(groupName string, formats []string, archiveFiles []fileE
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %v", err)
|
||||
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %v", err)
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
@@ -329,7 +367,12 @@ func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Durati
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s has EXPIRED (expired %d days ago)\n", path, -daysUntilExpiry)
|
||||
fmt.Fprintf(
|
||||
os.Stderr,
|
||||
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||
path,
|
||||
-daysUntilExpiry,
|
||||
)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
@@ -347,8 +390,13 @@ type certWithPath struct {
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file
|
||||
func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding string, isSingle bool) ([]fileEntry, error) {
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||
func encodeCertsToFiles(
|
||||
certs []*x509.Certificate,
|
||||
baseName string,
|
||||
encoding string,
|
||||
isSingle bool,
|
||||
) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
@@ -369,14 +417,12 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
} else if len(certs) > 0 {
|
||||
// Individual DER file (should only have one cert)
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
@@ -395,13 +441,11 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
} else if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
@@ -410,7 +454,7 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format
|
||||
// encodeCertsToPEM encodes certificates to PEM format.
|
||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
@@ -435,40 +479,49 @@ func generateManifest(files []fileEntry) []byte {
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||
for _, c := range closers {
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := c.Close(); cerr != nil {
|
||||
baseErr = errors.Join(baseErr, cerr)
|
||||
}
|
||||
}
|
||||
return baseErr
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
f, zerr := os.Create(path)
|
||||
if zerr != nil {
|
||||
return zerr
|
||||
}
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
for _, file := range files {
|
||||
fw, err := w.Create(file.name)
|
||||
if err != nil {
|
||||
w.Close()
|
||||
f.Close()
|
||||
return err
|
||||
fw, werr := w.Create(file.name)
|
||||
if werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
if _, err := fw.Write(file.content); err != nil {
|
||||
w.Close()
|
||||
f.Close()
|
||||
return err
|
||||
if _, werr = fw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations
|
||||
if err := w.Close(); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
if cerr := w.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
f, terr := os.Create(path)
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
@@ -480,29 +533,23 @@ func createTarGzArchive(path string, files []fileEntry) error {
|
||||
Mode: 0644,
|
||||
Size: int64(len(file.content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
tw.Close()
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||
return closeWithErr(herr, tw, gw, f)
|
||||
}
|
||||
if _, err := tw.Write(file.content); err != nil {
|
||||
tw.Close()
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
if _, werr := tw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, tw, gw, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations in the correct order
|
||||
if err := tw.Close(); err != nil {
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
if cerr := tw.Close(); cerr != nil {
|
||||
_ = gw.Close()
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
if cerr := gw.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
@@ -515,9 +562,9 @@ func generateHashFile(path string, files []string) error {
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
data, rerr := os.ReadFile(file)
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
|
||||
1
go.mod
1
go.mod
@@ -12,6 +12,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/certificate-transparency-go v1.0.21
|
||||
)
|
||||
|
||||
2
go.sum
2
go.sum
@@ -1,3 +1,5 @@
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
|
||||
Reference in New Issue
Block a user