Compare commits

...

21 Commits

Author SHA1 Message Date
a573f1cd20 Update CHANGELOG. 2025-11-15 23:48:54 -08:00
f93cf5fa9c adding lru/mru cache. 2025-11-15 23:48:00 -08:00
b879d62384 cert-bundler: lint fixes 2025-11-15 23:27:50 -08:00
c99ffd4394 cmd: cleaning up programs 2025-11-15 23:17:40 -08:00
ed8c07c1c5 Add 'mru/' from commit '2899885c4220560df4f60e4c052a6ab9773a0386'
git-subtree-dir: mru
git-subtree-mainline: cf2b016433
git-subtree-split: 2899885c42
2025-11-15 22:54:26 -08:00
cf2b016433 certlib: complete overhaul. 2025-11-15 22:54:12 -08:00
2899885c42 linter fixes 2025-11-15 22:46:42 -08:00
f3b4838cf6 Overhauling certlib.
LICENSE to Apache 2.0.
2025-11-15 22:00:29 -08:00
8ed30e9960 certlib: linter autofixes 2025-11-15 21:10:09 -08:00
c7de3919b0 log: linting fixes 2025-11-15 21:06:16 -08:00
840066004a logging: linter fixes 2025-11-15 21:02:19 -08:00
9fb93a3802 mwc: linter fixes 2025-11-15 20:39:21 -08:00
ecc7e5ab1e rand: remove unused package 2025-11-15 20:37:02 -08:00
a934c42aa1 temp fix before removing 2025-11-15 20:36:14 -08:00
948986ba60 testutil: remove unused code
It was probably a WIP for something else; it was started in
2016 and not touched since.
2025-11-15 20:25:37 -08:00
3be86573aa testio: linting fixes 2025-11-15 20:24:00 -08:00
e3a6355edb tee: add tests; linter fixes.
Additionally, disable reassign in testing files.
2025-11-15 20:18:09 -08:00
66d16acebc seekbuf: linter fixes 2025-11-15 19:58:41 -08:00
fdff2e0afe sbuf: linter fixes 2025-11-15 19:53:18 -08:00
b92e16fa4d Handle evictions properly when cache is empty. 2023-08-27 18:01:16 -07:00
6fbdece4be Initial import. 2022-02-24 21:39:10 -08:00
49 changed files with 2564 additions and 1081 deletions

View File

@@ -228,6 +228,8 @@ linters:
# Such cases aren't reported by default.
# Default: false
check-type-assertions: true
exclude-functions:
- (*git.wntrmute.dev/kyle/goutils/sbuf.Buffer).Write
exhaustive:
# Program elements to check for exhaustiveness.
@@ -341,11 +343,6 @@ linters:
skip-single-param: true
mnd:
# List of function patterns to exclude from analysis.
# Values always ignored: `time.Date`,
# `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
# `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
# Default: []
ignored-functions:
- args.Error
- flag.Arg
@@ -449,6 +446,12 @@ linters:
linters: [ testpackage ]
- path: 'dbg/dbg_test.go'
linters: [ testpackage ]
- path: 'log/logger.go'
linters: [ forbidigo ]
- path: 'logging/example_test.go'
linters: [ testableexamples ]
- path: 'main.go'
linters: [ forbidigo, mnd, reassign ]
- source: 'TODO'
linters: [ godot ]
- text: 'should have a package comment'
@@ -470,4 +473,5 @@ linters:
- goconst
- gosec
- noctx
- reassign
- wrapcheck

View File

@@ -1,27 +1,59 @@
Release 1.2.1 - 2018-09-15
CHANGELOG
+ Add missing format argument to Errorf call in kgz.
v1.11.0 - 2025-11-15
Release 1.2.0 - 2018-09-15
Added
- cache/mru: introduce MRU cache implementation with timestamp utilities.
+ Adds the kgz command line utility.
Changed
- certlib: complete overhaul to simplify APIs and internals.
- repo: widespread linting cleanups across many packages (config, dbg, die,
fileutil, log/logging, mwc, sbuf, seekbuf, tee, testio, etc.).
- cmd: general program cleanups; `cert-bundler` lint fixes.
Release 1.1.0 - 2017-11-16
Removed
- rand: remove unused package.
- testutil: remove unused code.
+ A number of new command line utilities were added
+ atping
+ cruntar
+ renfnv
+
+ ski
+ subjhash
+ yamll
v1.10.1 — 2025-11-15
+ new package: ahash
+ package for loading hashes from an algorithm string
Changed
- certlib: major overhaul and refactor.
- repo: linter autofixes ahead of release.
+ new certificate loading functions in the lib package
+ new package: tee
+ emulates tee(1)
v1.10.0 — 2025-11-14
Added
- cmd: add `cert-revcheck` command.
Changed
- ci/lint: add golangci-lint stage and initial cleanup.
v1.9.1 — 2025-11-15
Fixed
- die: correct calls to `die.With`.
v1.9.0 — 2025-11-14
Added
- cmd: add `cert-bundler` tool.
Changed
- misc: minor updates and maintenance.
v1.8.1 — 2025-11-14
Added
- cmd: add `tlsinfo` tool.
v1.8.0 — 2025-11-14
Baseline
- Initial baseline for this changelog series.

197
LICENSE
View File

@@ -1,19 +1,194 @@
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
Copyright 2025 K. Isom <kyle@imap.cc>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
=======================================================================
The backoff package (written during my time at Cloudflare) is released
under the following license:

View File

@@ -78,3 +78,43 @@ Each program should have a small README in the directory with more
information.
All code here is licensed under the ISC license.
Error handling
--------------
This repo standardizes on Go 1.13+ error wrapping and matching. Libraries and
CLIs should:
- Wrap causes with context using `fmt.Errorf("context: %w", err)`.
- Use typed, structured errors from `certlib/certerr` for certificate-related
operations. These include a typed `*certerr.Error` with `Source` and `Kind`.
- Match errors programmatically:
- `errors.Is(err, certerr.ErrEncryptedPrivateKey)` to detect sentinel states.
- `errors.As(err, &e)` (where `var e *certerr.Error`) to inspect
`e.Source`/`e.Kind`.
Examples:
```
cert, err := certlib.LoadCertificate(path)
if err != nil {
// sentinel match
if errors.Is(err, certerr.ErrEmptyCertificate) {
// handle empty input
}
// typed error match
var ce *certerr.Error
if errors.As(err, &ce) {
switch ce.Kind {
case certerr.KindParse:
// parse error handling
case certerr.KindLoad:
// file loading error handling
}
}
}
```
Avoid including sensitive data (keys, passwords, tokens) in error messages.

179
cache/lru/lru.go vendored Normal file
View File

@@ -0,0 +1,179 @@
// Package lru implements a Least Recently Used cache.
package lru
import (
"errors"
"fmt"
"sort"
"sync"
"github.com/benbjohnson/clock"
)
type item[V any] struct {
V V
access int64
}
// A Cache is a map that retains a limited number of items. It must be
// initialized with New, providing a maximum capacity for the cache.
// Only the least recently used items are retained.
type Cache[K comparable, V any] struct {
store map[K]*item[V]
access *timestamps[K]
cap int
clock clock.Clock
// All public methods that have the possibility of modifying the
// cache should lock it.
mtx *sync.Mutex
}
// New must be used to create a new Cache.
func New[K comparable, V any](icap int) *Cache[K, V] {
return &Cache[K, V]{
store: map[K]*item[V]{},
access: newTimestamps[K](icap),
cap: icap,
clock: clock.New(),
mtx: &sync.Mutex{},
}
}
// StringKeyCache is a convenience wrapper for cache keyed by string.
type StringKeyCache[V any] struct {
*Cache[string, V]
}
// NewStringKeyCache creates a new LRU cache keyed by string.
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
return &StringKeyCache[V]{Cache: New[string, V](icap)}
}
func (c *Cache[K, V]) lock() {
c.mtx.Lock()
}
func (c *Cache[K, V]) unlock() {
c.mtx.Unlock()
}
// Len returns the number of items currently in the cache.
func (c *Cache[K, V]) Len() int {
return len(c.store)
}
// evict should remove the least-recently-used cache item.
func (c *Cache[K, V]) evict() {
if c.access.Len() == 0 {
return
}
k := c.access.K(0)
c.evictKey(k)
}
// evictKey should remove the entry given by the key item.
func (c *Cache[K, V]) evictKey(k K) {
delete(c.store, k)
i, ok := c.access.Find(k)
if !ok {
return
}
c.access.Delete(i)
}
func (c *Cache[K, V]) sanityCheck() {
if len(c.store) != c.access.Len() {
panic(fmt.Sprintf("LRU cache is out of sync; store len = %d, access len = %d",
len(c.store), c.access.Len()))
}
}
// ConsistencyCheck runs a series of checks to ensure that the cache's
// data structures are consistent. It is not normally required, and it
// is primarily used in testing.
func (c *Cache[K, V]) ConsistencyCheck() error {
c.lock()
defer c.unlock()
if err := c.access.ConsistencyCheck(); err != nil {
return err
}
if len(c.store) != c.access.Len() {
return fmt.Errorf("lru: cache is out of sync; store len = %d, access len = %d",
len(c.store), c.access.Len())
}
for i := range c.access.ts {
itm, ok := c.store[c.access.K(i)]
if !ok {
return errors.New("lru: key in access is not in store")
}
if c.access.T(i) != itm.access {
return fmt.Errorf("timestamps are out of sync (%d != %d)",
itm.access, c.access.T(i))
}
}
if !sort.IsSorted(c.access) {
return errors.New("lru: timestamps aren't sorted")
}
return nil
}
// Store adds the value v to the cache under the k.
func (c *Cache[K, V]) Store(k K, v V) {
c.lock()
defer c.unlock()
c.sanityCheck()
if len(c.store) == c.cap {
c.evict()
}
if _, ok := c.store[k]; ok {
c.evictKey(k)
}
itm := &item[V]{
V: v,
access: c.clock.Now().UnixNano(),
}
c.store[k] = itm
c.access.Update(k, itm.access)
}
// Get returns the value stored in the cache. If the item isn't present,
// it will return false.
func (c *Cache[K, V]) Get(k K) (V, bool) {
c.lock()
defer c.unlock()
c.sanityCheck()
itm, ok := c.store[k]
if !ok {
var zero V
return zero, false
}
c.store[k].access = c.clock.Now().UnixNano()
c.access.Update(k, itm.access)
return itm.V, true
}
// Has returns true if the cache has an entry for k. It will not update
// the timestamp on the item.
func (c *Cache[K, V]) Has(k K) bool {
// Don't need to lock as we don't modify anything.
c.sanityCheck()
_, ok := c.store[k]
return ok
}

87
cache/lru/lru_internal_test.go vendored Normal file
View File

@@ -0,0 +1,87 @@
package lru
import (
"testing"
"time"
"github.com/benbjohnson/clock"
)
// These tests mirror the MRU-style behavior present in this LRU package
// implementation (eviction removes the most-recently-used entry).
func TestBasicCacheEviction(t *testing.T) {
mock := clock.NewMock()
c := NewStringKeyCache[int](2)
c.clock = mock
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if c.Len() != 0 {
t.Fatal("cache should have size 0")
}
c.evict()
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
c.Store("raven", 1)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 1 {
t.Fatalf("store should have length=1, have length=%d", len(c.store))
}
mock.Add(time.Second)
c.Store("owl", 2)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 2 {
t.Fatalf("store should have length=2, have length=%d", len(c.store))
}
mock.Add(time.Second)
c.Store("goat", 3)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 2 {
t.Fatalf("store should have length=2, have length=%d", len(c.store))
}
// Since this implementation evicts the most-recently-used item, inserting
// "goat" when full evicts "owl" (the most recent at that time).
mock.Add(time.Second)
if _, ok := c.Get("owl"); ok {
t.Fatal("store should not have an entry for owl (MRU-evicted)")
}
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
mock.Add(time.Second)
c.Store("elk", 4)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if !c.Has("elk") {
t.Fatal("store should contain an entry for 'elk'")
}
// Before storing elk, keys were: raven (older), goat (newer). Evict MRU -> goat.
if !c.Has("raven") {
t.Fatal("store should contain an entry for 'raven'")
}
if c.Has("goat") {
t.Fatal("store should not contain an entry for 'goat'")
}
}

101
cache/lru/timestamps.go vendored Normal file
View File

@@ -0,0 +1,101 @@
package lru
import (
"errors"
"fmt"
"io"
"sort"
)
// timestamps contains datastructures for maintaining a list of keys sortable
// by timestamp.
type timestamp[K comparable] struct {
t int64
k K
}
type timestamps[K comparable] struct {
ts []timestamp[K]
cap int
}
func newTimestamps[K comparable](icap int) *timestamps[K] {
return &timestamps[K]{
ts: make([]timestamp[K], 0, icap),
cap: icap,
}
}
func (ts *timestamps[K]) K(i int) K {
return ts.ts[i].k
}
func (ts *timestamps[K]) T(i int) int64 {
return ts.ts[i].t
}
func (ts *timestamps[K]) Len() int {
return len(ts.ts)
}
func (ts *timestamps[K]) Less(i, j int) bool {
return ts.ts[i].t > ts.ts[j].t
}
func (ts *timestamps[K]) Swap(i, j int) {
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
}
func (ts *timestamps[K]) Find(k K) (int, bool) {
for i := range ts.ts {
if ts.ts[i].k == k {
return i, true
}
}
return -1, false
}
func (ts *timestamps[K]) Update(k K, t int64) bool {
i, ok := ts.Find(k)
if !ok {
ts.ts = append(ts.ts, timestamp[K]{t, k})
sort.Sort(ts)
return false
}
ts.ts[i].t = t
sort.Sort(ts)
return true
}
func (ts *timestamps[K]) ConsistencyCheck() error {
if !sort.IsSorted(ts) {
return errors.New("lru: timestamps are not sorted")
}
keys := map[K]bool{}
for i := range ts.ts {
if keys[ts.ts[i].k] {
return fmt.Errorf("lru: duplicate key %v detected", ts.ts[i].k)
}
keys[ts.ts[i].k] = true
}
if len(keys) != len(ts.ts) {
return fmt.Errorf("lru: timestamp contains %d duplicate keys",
len(ts.ts)-len(keys))
}
return nil
}
func (ts *timestamps[K]) Delete(i int) {
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
}
func (ts *timestamps[K]) Dump(w io.Writer) {
for i := range ts.ts {
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
}
}

50
cache/lru/timestamps_internal_test.go vendored Normal file
View File

@@ -0,0 +1,50 @@
package lru
import (
"testing"
"time"
"github.com/benbjohnson/clock"
)
// These tests validate timestamps ordering semantics for the LRU package.
// Note: The LRU timestamps are sorted with most-recent-first (descending by t).
func TestTimestamps(t *testing.T) {
ts := newTimestamps[string](3)
mock := clock.NewMock()
// raven
ts.Update("raven", mock.Now().UnixNano())
// raven, owl
mock.Add(time.Millisecond)
ts.Update("owl", mock.Now().UnixNano())
// raven, owl, goat
mock.Add(time.Second)
ts.Update("goat", mock.Now().UnixNano())
if err := ts.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
// make owl the most recent
mock.Add(time.Millisecond)
ts.Update("owl", mock.Now().UnixNano())
if err := ts.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
// For LRU timestamps: most recent first. Expected order: owl, goat, raven.
if ts.K(0) != "owl" {
t.Fatalf("first key should be owl, have %s", ts.K(0))
}
if ts.K(1) != "goat" {
t.Fatalf("second key should be goat, have %s", ts.K(1))
}
if ts.K(2) != "raven" {
t.Fatalf("third key should be raven, have %s", ts.K(2))
}
}

178
cache/mru/mru.go vendored Normal file
View File

@@ -0,0 +1,178 @@
package mru
import (
"errors"
"fmt"
"sort"
"sync"
"github.com/benbjohnson/clock"
)
type item[V any] struct {
V V
access int64
}
// A Cache is a map that retains a limited number of items. It must be
// initialized with New, providing a maximum capacity for the cache.
// Only the most recently used items are retained.
type Cache[K comparable, V any] struct {
store map[K]*item[V]
access *timestamps[K]
cap int
clock clock.Clock
// All public methods that have the possibility of modifying the
// cache should lock it.
mtx *sync.Mutex
}
// New must be used to create a new Cache.
func New[K comparable, V any](icap int) *Cache[K, V] {
return &Cache[K, V]{
store: map[K]*item[V]{},
access: newTimestamps[K](icap),
cap: icap,
clock: clock.New(),
mtx: &sync.Mutex{},
}
}
// StringKeyCache is a convenience wrapper for cache keyed by string.
type StringKeyCache[V any] struct {
*Cache[string, V]
}
// NewStringKeyCache creates a new MRU cache keyed by string.
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
return &StringKeyCache[V]{Cache: New[string, V](icap)}
}
func (c *Cache[K, V]) lock() {
c.mtx.Lock()
}
func (c *Cache[K, V]) unlock() {
c.mtx.Unlock()
}
// Len returns the number of items currently in the cache.
func (c *Cache[K, V]) Len() int {
return len(c.store)
}
// evict should remove the least-recently-used cache item.
func (c *Cache[K, V]) evict() {
if c.access.Len() == 0 {
return
}
k := c.access.K(0)
c.evictKey(k)
}
// evictKey should remove the entry given by the key item.
func (c *Cache[K, V]) evictKey(k K) {
delete(c.store, k)
i, ok := c.access.Find(k)
if !ok {
return
}
c.access.Delete(i)
}
func (c *Cache[K, V]) sanityCheck() {
if len(c.store) != c.access.Len() {
panic(fmt.Sprintf("MRU cache is out of sync; store len = %d, access len = %d",
len(c.store), c.access.Len()))
}
}
// ConsistencyCheck runs a series of checks to ensure that the cache's
// data structures are consistent. It is not normally required, and it
// is primarily used in testing.
func (c *Cache[K, V]) ConsistencyCheck() error {
c.lock()
defer c.unlock()
if err := c.access.ConsistencyCheck(); err != nil {
return err
}
if len(c.store) != c.access.Len() {
return fmt.Errorf("mru: cache is out of sync; store len = %d, access len = %d",
len(c.store), c.access.Len())
}
for i := range c.access.ts {
itm, ok := c.store[c.access.K(i)]
if !ok {
return errors.New("mru: key in access is not in store")
}
if c.access.T(i) != itm.access {
return fmt.Errorf("timestamps are out of sync (%d != %d)",
itm.access, c.access.T(i))
}
}
if !sort.IsSorted(c.access) {
return errors.New("mru: timestamps aren't sorted")
}
return nil
}
// Store adds the value v to the cache under the k.
func (c *Cache[K, V]) Store(k K, v V) {
c.lock()
defer c.unlock()
c.sanityCheck()
if len(c.store) == c.cap {
c.evict()
}
if _, ok := c.store[k]; ok {
c.evictKey(k)
}
itm := &item[V]{
V: v,
access: c.clock.Now().UnixNano(),
}
c.store[k] = itm
c.access.Update(k, itm.access)
}
// Get returns the value stored in the cache. If the item isn't present,
// it will return false.
func (c *Cache[K, V]) Get(k K) (V, bool) {
c.lock()
defer c.unlock()
c.sanityCheck()
itm, ok := c.store[k]
if !ok {
var zero V
return zero, false
}
c.store[k].access = c.clock.Now().UnixNano()
c.access.Update(k, itm.access)
return itm.V, true
}
// Has returns true if the cache has an entry for k. It will not update
// the timestamp on the item.
func (c *Cache[K, V]) Has(k K) bool {
// Don't need to lock as we don't modify anything.
c.sanityCheck()
_, ok := c.store[k]
return ok
}

92
cache/mru/mru_internal_test.go vendored Normal file
View File

@@ -0,0 +1,92 @@
package mru
import (
"testing"
"time"
"github.com/benbjohnson/clock"
)
func TestBasicCacheEviction(t *testing.T) {
mock := clock.NewMock()
c := NewStringKeyCache[int](2)
c.clock = mock
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if c.Len() != 0 {
t.Fatal("cache should have size 0")
}
c.evict()
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
c.Store("raven", 1)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 1 {
t.Fatalf("store should have length=1, have length=%d", len(c.store))
}
mock.Add(time.Second)
c.Store("owl", 2)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 2 {
t.Fatalf("store should have length=2, have length=%d", len(c.store))
}
mock.Add(time.Second)
c.Store("goat", 3)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if len(c.store) != 2 {
t.Fatalf("store should have length=2, have length=%d", len(c.store))
}
mock.Add(time.Second)
v, ok := c.Get("owl")
if !ok {
t.Fatal("store should have an entry for owl")
}
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
itm := v
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if itm != 2 {
t.Fatalf("stored item should be 2, have %d", itm)
}
mock.Add(time.Second)
c.Store("elk", 4)
if err := c.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
if !c.Has("elk") {
t.Fatal("store should contain an entry for 'elk'")
}
if !c.Has("owl") {
t.Fatal("store should contain an entry for 'owl'")
}
if c.Has("goat") {
t.Fatal("store should not contain an entry for 'goat'")
}
}

101
cache/mru/timestamps.go vendored Normal file
View File

@@ -0,0 +1,101 @@
package mru
import (
"errors"
"fmt"
"io"
"sort"
)
// timestamps contains datastructures for maintaining a list of keys sortable
// by timestamp.
type timestamp[K comparable] struct {
t int64
k K
}
type timestamps[K comparable] struct {
ts []timestamp[K]
cap int
}
func newTimestamps[K comparable](icap int) *timestamps[K] {
return &timestamps[K]{
ts: make([]timestamp[K], 0, icap),
cap: icap,
}
}
func (ts *timestamps[K]) K(i int) K {
return ts.ts[i].k
}
func (ts *timestamps[K]) T(i int) int64 {
return ts.ts[i].t
}
func (ts *timestamps[K]) Len() int {
return len(ts.ts)
}
func (ts *timestamps[K]) Less(i, j int) bool {
return ts.ts[i].t < ts.ts[j].t
}
func (ts *timestamps[K]) Swap(i, j int) {
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
}
func (ts *timestamps[K]) Find(k K) (int, bool) {
for i := range ts.ts {
if ts.ts[i].k == k {
return i, true
}
}
return -1, false
}
func (ts *timestamps[K]) Update(k K, t int64) bool {
i, ok := ts.Find(k)
if !ok {
ts.ts = append(ts.ts, timestamp[K]{t, k})
sort.Sort(ts)
return false
}
ts.ts[i].t = t
sort.Sort(ts)
return true
}
func (ts *timestamps[K]) ConsistencyCheck() error {
if !sort.IsSorted(ts) {
return errors.New("mru: timestamps are not sorted")
}
keys := map[K]bool{}
for i := range ts.ts {
if keys[ts.ts[i].k] {
return fmt.Errorf("duplicate key %v detected", ts.ts[i].k)
}
keys[ts.ts[i].k] = true
}
if len(keys) != len(ts.ts) {
return fmt.Errorf("mru: timestamp contains %d duplicate keys",
len(ts.ts)-len(keys))
}
return nil
}
func (ts *timestamps[K]) Delete(i int) {
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
}
func (ts *timestamps[K]) Dump(w io.Writer) {
for i := range ts.ts {
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
}
}

49
cache/mru/timestamps_internal_test.go vendored Normal file
View File

@@ -0,0 +1,49 @@
package mru
import (
"testing"
"time"
"github.com/benbjohnson/clock"
)
func TestTimestamps(t *testing.T) {
ts := newTimestamps[string](3)
mock := clock.NewMock()
// raven
ts.Update("raven", mock.Now().UnixNano())
// raven, owl
mock.Add(time.Millisecond)
ts.Update("owl", mock.Now().UnixNano())
// raven, owl, goat
mock.Add(time.Second)
ts.Update("goat", mock.Now().UnixNano())
if err := ts.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
mock.Add(time.Millisecond)
// raven, goat, owl
ts.Update("owl", mock.Now().UnixNano())
if err := ts.ConsistencyCheck(); err != nil {
t.Fatal(err)
}
// at this point, the keys should be raven, goat, owl.
if ts.K(0) != "raven" {
t.Fatalf("first key should be raven, have %s", ts.K(0))
}
if ts.K(1) != "goat" {
t.Fatalf("second key should be goat, have %s", ts.K(1))
}
if ts.K(2) != "owl" {
t.Fatalf("third key should be owl, have %s", ts.K(2))
}
}

33
certlib/certerr/doc.go Normal file
View File

@@ -0,0 +1,33 @@
// Package certerr provides typed errors and helpers for certificate-related
// operations across the repository. It standardizes error construction and
// matching so callers can reliably branch on error source/kind using the
// Go 1.13+ `errors.Is` and `errors.As` helpers.
//
// Guidelines
// - Always wrap underlying causes using the helper constructors or with
// fmt.Errorf("context: %w", err).
// - Do not include sensitive data (keys, passwords, tokens) in error
// messages; add only non-sensitive, actionable context.
// - Prefer programmatic checks via errors.Is (for sentinel errors) and
// errors.As (to retrieve *certerr.Error) rather than relying on error
// string contents.
//
// Typical usage
//
// if err := doParse(); err != nil {
// return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
// }
//
// Callers may branch on error kinds and sources:
//
// var e *certerr.Error
// if errors.As(err, &e) {
// switch e.Kind {
// case certerr.KindParse:
// // handle parse error
// }
// }
//
// Sentinel errors are provided for common conditions like
// `certerr.ErrEncryptedPrivateKey` and can be matched with `errors.Is`.
package certerr

View File

@@ -37,43 +37,84 @@ const (
ErrorSourceKeypair ErrorSourceType = 5
)
// InvalidPEMType is used to indicate that we were expecting one type of PEM
// ErrorKind is a broad classification describing what went wrong.
type ErrorKind uint8
const (
KindParse ErrorKind = iota + 1
KindDecode
KindVerify
KindLoad
)
func (k ErrorKind) String() string {
switch k {
case KindParse:
return "parse"
case KindDecode:
return "decode"
case KindVerify:
return "verify"
case KindLoad:
return "load"
default:
return "unknown"
}
}
// Error is a typed, wrapped error with structured context for programmatic checks.
// It implements error and supports errors.Is/As via Unwrap.
type Error struct {
Source ErrorSourceType // which domain produced the error (certificate, private key, etc.)
Kind ErrorKind // operation category (parse, decode, verify, load)
Op string // optional operation or function name
Err error // wrapped cause
}
func (e *Error) Error() string {
// Keep message format consistent with existing helpers: "failed to <kind> <source>: <err>"
// Do not include Op by default to preserve existing output expectations.
return fmt.Sprintf("failed to %s %s: %v", e.Kind.String(), e.Source.String(), e.Err)
}
func (e *Error) Unwrap() error { return e.Err }
// InvalidPEMTypeError is used to indicate that we were expecting one type of PEM
// file, but saw another.
type InvalidPEMType struct {
type InvalidPEMTypeError struct {
have string
want []string
}
func (err *InvalidPEMType) Error() string {
func (err *InvalidPEMTypeError) Error() string {
if len(err.want) == 1 {
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
} else {
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
}
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
}
// ErrInvalidPEMType returns a new InvalidPEMType error.
// ErrInvalidPEMType returns a new InvalidPEMTypeError error.
func ErrInvalidPEMType(have string, want ...string) error {
return &InvalidPEMType{
return &InvalidPEMTypeError{
have: have,
want: want,
}
}
func LoadingError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to load %s from disk: %w", t, err)
return &Error{Source: t, Kind: KindLoad, Err: err}
}
func ParsingError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to parse %s: %w", t, err)
return &Error{Source: t, Kind: KindParse, Err: err}
}
func DecodeError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to decode %s: %w", t, err)
return &Error{Source: t, Kind: KindDecode, Err: err}
}
func VerifyError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to verify %s: %w", t, err)
return &Error{Source: t, Kind: KindVerify, Err: err}
}
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")

View File

@@ -0,0 +1,56 @@
//nolint:testpackage // keep tests in the same package for internal symbol access
package certerr
import (
"errors"
"strings"
"testing"
)
func TestTypedErrorWrappingAndFormatting(t *testing.T) {
cause := errors.New("bad data")
err := DecodeError(ErrorSourceCertificate, cause)
// Ensure we can retrieve the typed error
var e *Error
if !errors.As(err, &e) {
t.Fatalf("expected errors.As to retrieve *certerr.Error, got %T", err)
}
if e.Kind != KindDecode {
t.Fatalf("unexpected kind: %v", e.Kind)
}
if e.Source != ErrorSourceCertificate {
t.Fatalf("unexpected source: %v", e.Source)
}
// Check message format (no trailing punctuation enforced by content)
msg := e.Error()
if !strings.Contains(msg, "failed to decode certificate") || !strings.Contains(msg, "bad data") {
t.Fatalf("unexpected error message: %q", msg)
}
}
func TestErrorsIsOnWrappedSentinel(t *testing.T) {
err := DecodeError(ErrorSourcePrivateKey, ErrEncryptedPrivateKey)
if !errors.Is(err, ErrEncryptedPrivateKey) {
t.Fatalf("expected errors.Is to match ErrEncryptedPrivateKey")
}
}
func TestInvalidPEMTypeMessageSingle(t *testing.T) {
err := ErrInvalidPEMType("FOO", "CERTIFICATE")
want := "invalid PEM type: have FOO, expected CERTIFICATE"
if err.Error() != want {
t.Fatalf("unexpected error message: got %q, want %q", err.Error(), want)
}
}
func TestInvalidPEMTypeMessageMultiple(t *testing.T) {
err := ErrInvalidPEMType("FOO", "CERTIFICATE", "NEW CERTIFICATE REQUEST")
if !strings.Contains(
err.Error(),
"invalid PEM type: have FOO, expected one of CERTIFICATE, NEW CERTIFICATE REQUEST",
) {
t.Fatalf("unexpected error message: %q", err.Error())
}
}

View File

@@ -4,43 +4,53 @@ import (
"crypto/x509"
"encoding/pem"
"errors"
"io/ioutil"
"os"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
)
// ReadCertificate reads a DER or PEM-encoded certificate from the
// byte slice.
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
func ReadCertificate(in []byte) (*x509.Certificate, []byte, error) {
if len(in) == 0 {
err = certerr.ErrEmptyCertificate
return
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, certerr.ErrEmptyCertificate)
}
if in[0] == '-' {
p, remaining := pem.Decode(in)
if p == nil {
err = errors.New("certlib: invalid PEM file")
return
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("invalid PEM file"))
}
rest = remaining
rest := remaining
if p.Type != "CERTIFICATE" {
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
return
return nil, rest, certerr.ParsingError(
certerr.ErrorSourceCertificate,
certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE"),
)
}
in = p.Bytes
cert, err := x509.ParseCertificate(in)
if err != nil {
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
return cert, rest, nil
}
cert, err = x509.ParseCertificate(in)
return
cert, err := x509.ParseCertificate(in)
if err != nil {
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
return cert, nil, nil
}
// ReadCertificates tries to read all the certificates in a
// PEM-encoded collection.
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
func ReadCertificates(in []byte) ([]*x509.Certificate, error) {
var cert *x509.Certificate
var certs []*x509.Certificate
var err error
for {
cert, in, err = ReadCertificate(in)
if err != nil {
@@ -64,9 +74,9 @@ func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
// the file contains multiple certificates (e.g. a chain), only the
// first certificate is returned.
func LoadCertificate(path string) (*x509.Certificate, error) {
in, err := ioutil.ReadFile(path)
in, err := os.ReadFile(path)
if err != nil {
return nil, err
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
}
cert, _, err := ReadCertificate(in)
@@ -76,9 +86,9 @@ func LoadCertificate(path string) (*x509.Certificate, error) {
// LoadCertificates tries to read all the certificates in a file,
// returning them in the order that it found them in the file.
func LoadCertificates(path string) ([]*x509.Certificate, error) {
in, err := ioutil.ReadFile(path)
in, err := os.ReadFile(path)
if err != nil {
return nil, err
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
}
return ReadCertificates(in)

View File

@@ -1,3 +1,4 @@
//nolint:testpackage // keep tests in the same package for internal symbol access
package certlib
import (

View File

@@ -38,6 +38,7 @@ import (
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
@@ -47,29 +48,36 @@ import (
// private key. The key must not be in PEM format. If an error is returned, it
// may contain information about the private key, so care should be taken when
// displaying it directly.
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParseECPrivateKey(keyDER)
if err != nil {
generalKey, err = ParseEd25519PrivateKey(keyDER)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
}
}
func ParsePrivateKeyDER(keyDER []byte) (crypto.Signer, error) {
// Try common encodings in order without deep nesting.
if k, err := x509.ParsePKCS8PrivateKey(keyDER); err == nil {
switch kk := k.(type) {
case *rsa.PrivateKey:
return kk, nil
case *ecdsa.PrivateKey:
return kk, nil
case ed25519.PrivateKey:
return kk, nil
default:
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
}
}
switch generalKey := generalKey.(type) {
case *rsa.PrivateKey:
return generalKey, nil
case *ecdsa.PrivateKey:
return generalKey, nil
case ed25519.PrivateKey:
return generalKey, nil
default:
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
if k, err := x509.ParsePKCS1PrivateKey(keyDER); err == nil {
return k, nil
}
if k, err := x509.ParseECPrivateKey(keyDER); err == nil {
return k, nil
}
if k, err := ParseEd25519PrivateKey(keyDER); err == nil {
if kk, ok := k.(ed25519.PrivateKey); ok {
return kk, nil
}
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
}
// If all parsers failed, return the last error from Ed25519 attempt (approximate cause).
if _, err := ParseEd25519PrivateKey(keyDER); err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
}
// Fallback (should be unreachable)
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, errors.New("unknown key encoding"))
}

View File

@@ -65,12 +65,14 @@ func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
return nil, errEd25519WrongKeyType
}
const bitsPerByte = 8
spki := subjectPublicKeyInfo{
Algorithm: pkix.AlgorithmIdentifier{
Algorithm: ed25519OID,
},
PublicKey: asn1.BitString{
BitLength: len(pub) * 8,
BitLength: len(pub) * bitsPerByte,
Bytes: pub,
},
}
@@ -91,7 +93,8 @@ func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
return nil, errEd25519WrongID
}
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
const bitsPerByte = 8
if spki.PublicKey.BitLength != ed25519.PublicKeySize*bitsPerByte {
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
}

View File

@@ -49,14 +49,14 @@ import (
"strings"
"time"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
ct "github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
ctx509 "github.com/google/certificate-transparency-go/x509"
"golang.org/x/crypto/ocsp"
"golang.org/x/crypto/pkcs12"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
)
// OneYear is a time.Duration representing a year's worth of seconds.
@@ -65,10 +65,10 @@ const OneYear = 8760 * time.Hour
// OneDay is a time.Duration representing a day's worth of seconds.
const OneDay = 24 * time.Hour
// DelegationUsage is the OID for the DelegationUseage extensions
// DelegationUsage is the OID for the DelegationUseage extensions.
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
// DelegationExtension
// DelegationExtension is a non-critical extension marking delegation usage.
var DelegationExtension = pkix.Extension{
Id: DelegationUsage,
Critical: false,
@@ -81,41 +81,51 @@ func InclusiveDate(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
}
const (
year2012 = 2012
year2015 = 2015
day1 = 1
)
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 5 years.
var Jul2012 = InclusiveDate(2012, time.July, 01)
var Jul2012 = InclusiveDate(year2012, time.July, day1)
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 39 months.
var Apr2015 = InclusiveDate(2015, time.April, 01)
var Apr2015 = InclusiveDate(year2015, time.April, day1)
// KeyLength returns the bit size of ECDSA or RSA PublicKey
func KeyLength(key interface{}) int {
if key == nil {
// KeyLength returns the bit size of ECDSA or RSA PublicKey.
func KeyLength(key any) int {
switch k := key.(type) {
case *ecdsa.PublicKey:
if k == nil {
return 0
}
return k.Curve.Params().BitSize
case *rsa.PublicKey:
if k == nil {
return 0
}
return k.N.BitLen()
default:
return 0
}
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
return ecdsaKey.Curve.Params().BitSize
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
return rsaKey.N.BitLen()
}
return 0
}
// ExpiryTime returns the time when the certificate chain is expired.
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
func ExpiryTime(chain []*x509.Certificate) time.Time {
var notAfter time.Time
if len(chain) == 0 {
return
return notAfter
}
notAfter = chain[0].NotAfter
for _, cert := range chain {
if notAfter.After(cert.NotAfter) {
notAfter = cert.NotAfter
}
}
return
return notAfter
}
// MonthsValid returns the number of months for which a certificate is valid.
@@ -144,109 +154,109 @@ func ValidExpiry(c *x509.Certificate) bool {
maxMonths = 39
case issued.After(Jul2012):
maxMonths = 60
case issued.Before(Jul2012):
default:
maxMonths = 120
}
if MonthsValid(c) > maxMonths {
return false
}
return true
return MonthsValid(c) <= maxMonths
}
// SignatureString returns the TLS signature string corresponding to
// an X509 signature algorithm.
var signatureString = map[x509.SignatureAlgorithm]string{
x509.UnknownSignatureAlgorithm: "Unknown Signature",
x509.MD2WithRSA: "MD2WithRSA",
x509.MD5WithRSA: "MD5WithRSA",
x509.SHA1WithRSA: "SHA1WithRSA",
x509.SHA256WithRSA: "SHA256WithRSA",
x509.SHA384WithRSA: "SHA384WithRSA",
x509.SHA512WithRSA: "SHA512WithRSA",
x509.SHA256WithRSAPSS: "SHA256WithRSAPSS",
x509.SHA384WithRSAPSS: "SHA384WithRSAPSS",
x509.SHA512WithRSAPSS: "SHA512WithRSAPSS",
x509.DSAWithSHA1: "DSAWithSHA1",
x509.DSAWithSHA256: "DSAWithSHA256",
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
x509.PureEd25519: "PureEd25519",
}
// SignatureString returns the TLS signature string corresponding to
// an X509 signature algorithm.
func SignatureString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2WithRSA"
case x509.MD5WithRSA:
return "MD5WithRSA"
case x509.SHA1WithRSA:
return "SHA1WithRSA"
case x509.SHA256WithRSA:
return "SHA256WithRSA"
case x509.SHA384WithRSA:
return "SHA384WithRSA"
case x509.SHA512WithRSA:
return "SHA512WithRSA"
case x509.DSAWithSHA1:
return "DSAWithSHA1"
case x509.DSAWithSHA256:
return "DSAWithSHA256"
case x509.ECDSAWithSHA1:
return "ECDSAWithSHA1"
case x509.ECDSAWithSHA256:
return "ECDSAWithSHA256"
case x509.ECDSAWithSHA384:
return "ECDSAWithSHA384"
case x509.ECDSAWithSHA512:
return "ECDSAWithSHA512"
default:
return "Unknown Signature"
if s, ok := signatureString[alg]; ok {
return s
}
return "Unknown Signature"
}
// HashAlgoString returns the hash algorithm name contains in the signature
// method.
var hashAlgoString = map[x509.SignatureAlgorithm]string{
x509.UnknownSignatureAlgorithm: "Unknown Hash Algorithm",
x509.MD2WithRSA: "MD2",
x509.MD5WithRSA: "MD5",
x509.SHA1WithRSA: "SHA1",
x509.SHA256WithRSA: "SHA256",
x509.SHA384WithRSA: "SHA384",
x509.SHA512WithRSA: "SHA512",
x509.SHA256WithRSAPSS: "SHA256",
x509.SHA384WithRSAPSS: "SHA384",
x509.SHA512WithRSAPSS: "SHA512",
x509.DSAWithSHA1: "SHA1",
x509.DSAWithSHA256: "SHA256",
x509.ECDSAWithSHA1: "SHA1",
x509.ECDSAWithSHA256: "SHA256",
x509.ECDSAWithSHA384: "SHA384",
x509.ECDSAWithSHA512: "SHA512",
x509.PureEd25519: "SHA512", // per x509 docs Ed25519 uses SHA-512 internally
}
// HashAlgoString returns the hash algorithm name contains in the signature
// method.
func HashAlgoString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2"
case x509.MD5WithRSA:
return "MD5"
case x509.SHA1WithRSA:
return "SHA1"
case x509.SHA256WithRSA:
return "SHA256"
case x509.SHA384WithRSA:
return "SHA384"
case x509.SHA512WithRSA:
return "SHA512"
case x509.DSAWithSHA1:
return "SHA1"
case x509.DSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA1:
return "SHA1"
case x509.ECDSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA384:
return "SHA384"
case x509.ECDSAWithSHA512:
return "SHA512"
default:
return "Unknown Hash Algorithm"
if s, ok := hashAlgoString[alg]; ok {
return s
}
return "Unknown Hash Algorithm"
}
// StringTLSVersion returns underlying enum values from human names for TLS
// versions, defaults to current golang default of TLS 1.0
// versions, defaults to current golang default of TLS 1.0.
func StringTLSVersion(version string) uint16 {
switch version {
case "1.3":
return tls.VersionTLS13
case "1.2":
return tls.VersionTLS12
case "1.1":
return tls.VersionTLS11
case "1.0":
return tls.VersionTLS10
default:
// Default to Go's historical default of TLS 1.0 for unknown values
return tls.VersionTLS10
}
}
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM.
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
var buffer bytes.Buffer
for _, cert := range certs {
pem.Encode(&buffer, &pem.Block{
if err := pem.Encode(&buffer, &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
})
}); err != nil {
return nil
}
}
return buffer.Bytes()
}
// EncodeCertificatePEM encodes a single x509 certificates to PEM
// EncodeCertificatePEM encodes a single x509 certificates to PEM.
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
return EncodeCertificatesPEM([]*x509.Certificate{cert})
}
@@ -269,38 +279,52 @@ func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
certs = append(certs, cert...)
}
if len(certsPEM) > 0 {
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
return nil, certerr.DecodeError(
certerr.ErrorSourceCertificate,
errors.New("trailing data at end of certificate"),
)
}
return certs, nil
}
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
// either PKCS #7, PKCS #12, or raw x509.
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
func ParseCertificatesDER(certsDER []byte, password string) ([]*x509.Certificate, crypto.Signer, error) {
certsDER = bytes.TrimSpace(certsDER)
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
if err != nil {
var pkcs12data interface{}
certs = make([]*x509.Certificate, 1)
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
if err != nil {
certs, err = x509.ParseCertificates(certsDER)
if err != nil {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
}
} else {
key = pkcs12data.(crypto.Signer)
}
} else {
// First, try PKCS #7
if pkcs7data, err7 := pkcs7.ParsePKCS7(certsDER); err7 == nil {
if pkcs7data.ContentInfo != "SignedData" {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
return nil, nil, certerr.DecodeError(
certerr.ErrorSourceCertificate,
errors.New("can only extract certificates from signed data content info"),
)
}
certs = pkcs7data.Content.SignedData.Certificates
certs := pkcs7data.Content.SignedData.Certificates
if certs == nil {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
}
return certs, nil, nil
}
if certs == nil {
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
// Next, try PKCS #12
if pkcs12data, cert, err12 := pkcs12.Decode(certsDER, password); err12 == nil {
signer, ok := pkcs12data.(crypto.Signer)
if !ok {
return nil, nil, certerr.DecodeError(
certerr.ErrorSourcePrivateKey,
errors.New("PKCS12 data does not contain a private key"),
)
}
return []*x509.Certificate{cert}, signer, nil
}
return certs, key, nil
// Finally, attempt to parse raw X.509 certificates
certs, err := x509.ParseCertificates(certsDER)
if err != nil {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
}
return certs, nil, nil
}
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
@@ -310,7 +334,8 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
return nil, err
}
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
if err != nil {
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
}
return cert, nil
@@ -320,17 +345,26 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
// can handle PEM encoded PKCS #7 structures.
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
certPEM = bytes.TrimSpace(certPEM)
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
certs, rest, err := ParseOneCertificateFromPEM(certPEM)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
} else if cert == nil {
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
} else if len(rest) > 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
} else if len(cert) > 1 {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
}
return cert[0], nil
if certs == nil {
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
}
if len(rest) > 0 {
return nil, certerr.ParsingError(
certerr.ErrorSourceCertificate,
errors.New("the PEM file should contain only one object"),
)
}
if len(certs) > 1 {
return nil, certerr.ParsingError(
certerr.ErrorSourceCertificate,
errors.New("the PKCS7 object in the PEM file should contain only one certificate"),
)
}
return certs[0], nil
}
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
@@ -338,7 +372,6 @@ func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
// multiple certificates, from the top of certsPEM, which itself may
// contain multiple PEM encoded certificate objects.
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
block, rest := pem.Decode(certsPEM)
if block == nil {
return nil, rest, nil
@@ -346,8 +379,8 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
if err != nil {
pkcs7data, err2 := pkcs7.ParsePKCS7(block.Bytes)
if err2 != nil {
return nil, rest, err
}
if pkcs7data.ContentInfo != "SignedData" {
@@ -366,7 +399,7 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
// LoadPEMCertPool loads a pool of PEM certificates from file.
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
if certsFile == "" {
return nil, nil
return nil, nil //nolint:nilnil // no CA file provided -> treat as no pool and no error
}
pemCerts, err := os.ReadFile(certsFile)
if err != nil {
@@ -379,12 +412,12 @@ func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
// PEMToCertPool concerts PEM certificates to a CertPool.
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
if len(pemCerts) == 0 {
return nil, nil
return nil, nil //nolint:nilnil // empty input means no pool needed
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemCerts) {
return nil, errors.New("failed to load cert pool")
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, errors.New("failed to load cert pool"))
}
return certPool, nil
@@ -393,14 +426,14 @@ func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
func ParsePrivateKeyPEM(keyPEM []byte) (crypto.Signer, error) {
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
}
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (crypto.Signer, error) {
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
if err != nil {
return nil, err
@@ -420,44 +453,49 @@ func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
break
}
}
if keyDER != nil {
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
if strings.Contains(procType, "ENCRYPTED") {
if password != nil {
return x509.DecryptPEMBlock(keyDER, password)
}
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
}
}
return keyDER.Bytes, nil
if keyDER == nil {
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
}
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
if procType, ok := keyDER.Headers["Proc-Type"]; ok && strings.Contains(procType, "ENCRYPTED") {
if password != nil {
// nolintlint requires rationale:
//nolint:staticcheck // legacy RFC1423 PEM encryption supported for backward compatibility when caller supplies a password
return x509.DecryptPEMBlock(keyDER, password)
}
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
}
return keyDER.Bytes, nil
}
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
func ParseCSR(in []byte) (*x509.CertificateRequest, []byte, error) {
in = bytes.TrimSpace(in)
p, rest := pem.Decode(in)
if p != nil {
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
if p == nil {
csr, err := x509.ParseCertificateRequest(in)
if err != nil {
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
}
csr, err = x509.ParseCertificateRequest(p.Bytes)
} else {
csr, err = x509.ParseCertificateRequest(in)
if sigErr := csr.CheckSignature(); sigErr != nil {
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
}
return csr, rest, nil
}
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
return nil, rest, certerr.ParsingError(
certerr.ErrorSourceCSR,
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
)
}
csr, err := x509.ParseCertificateRequest(p.Bytes)
if err != nil {
return nil, rest, err
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
}
err = csr.CheckSignature()
if err != nil {
return nil, rest, err
if sigErr := csr.CheckSignature(); sigErr != nil {
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
}
return csr, rest, nil
}
@@ -465,14 +503,14 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
// It does not check the signature. This is useful for dumping data from a CSR
// locally.
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode([]byte(csrPEM))
block, _ := pem.Decode(csrPEM)
if block == nil {
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
}
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
return nil, certerr.ParsingError(certerr.ErrorSourceCSR, err)
}
return csrObject, nil
@@ -480,15 +518,20 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
const (
rsaBits2048 = 2048
rsaBits3072 = 3072
rsaBits4096 = 4096
)
switch pub := priv.Public().(type) {
case *rsa.PublicKey:
bitLength := pub.N.BitLen()
switch {
case bitLength >= 4096:
case bitLength >= rsaBits4096:
return x509.SHA512WithRSA
case bitLength >= 3072:
case bitLength >= rsaBits3072:
return x509.SHA384WithRSA
case bitLength >= 2048:
case bitLength >= rsaBits2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
@@ -509,7 +552,7 @@ func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
}
}
// LoadClientCertificate load key/certificate from pem files
// LoadClientCertificate load key/certificate from pem files.
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
if certFile != "" && keyFile != "" {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
@@ -518,10 +561,10 @@ func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, e
}
return &cert, nil
}
return nil, nil
return nil, nil //nolint:nilnil // absence of client cert is not an error
}
// CreateTLSConfig creates a tls.Config object from certs and roots
// CreateTLSConfig creates a tls.Config object from certs and roots.
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
var certs []tls.Certificate
if cert != nil {
@@ -530,6 +573,7 @@ func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Confi
return &tls.Config{
Certificates: certs,
RootCAs: remoteCAs,
MinVersion: tls.VersionTLS12, // secure default
}
}
@@ -554,18 +598,24 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
return nil, err
}
if len(rest) != 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
return nil, certerr.ParsingError(
certerr.ErrorSourceSCTList,
errors.New("serialized SCT list contained trailing garbage"),
)
}
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
for i, serializedSCT := range sctList.SCTList {
var sct ct.SignedCertificateTimestamp
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
if err != nil {
return nil, err
rest2, err2 := cttls.Unmarshal(serializedSCT.Val, &sct)
if err2 != nil {
return nil, err2
}
if len(rest) != 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
if len(rest2) != 0 {
return nil, certerr.ParsingError(
certerr.ErrorSourceSCTList,
errors.New("serialized SCT list contained trailing garbage"),
)
}
list[i] = sct
}
@@ -577,12 +627,12 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
// unmarshalled.
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
// This loop finds the SCTListExtension in the OCSP response.
var SCTListExtension, ext pkix.Extension
var sctListExtension, ext pkix.Extension
for _, ext = range response.Extensions {
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
if ext.Id.Equal(sctExtOid) {
SCTListExtension = ext
sctListExtension = ext
break
}
}
@@ -590,10 +640,10 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
// This code block extracts the sctList from the SCT extension.
var sctList []ct.SignedCertificateTimestamp
var err error
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
if numBytes := len(sctListExtension.Value); numBytes != 0 {
var serializedSCTList []byte
rest := make([]byte, numBytes)
copy(rest, SCTListExtension.Value)
copy(rest, sctListExtension.Value)
for len(rest) != 0 {
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
if err != nil {
@@ -611,20 +661,16 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
// the subsequent file. If no prefix is provided, valFile is assumed to be a
// file path.
func ReadBytes(valFile string) ([]byte, error) {
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
case 1:
prefix, rest, found := strings.Cut(valFile, ":")
if !found {
return os.ReadFile(valFile)
case 2:
switch splitVal[0] {
case "env":
return []byte(os.Getenv(splitVal[1])), nil
case "file":
return os.ReadFile(splitVal[1])
default:
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
}
}
switch prefix {
case "env":
return []byte(os.Getenv(rest)), nil
case "file":
return os.ReadFile(rest)
default:
return nil, fmt.Errorf("multiple prefixes: %s",
strings.Join(splitVal[:len(splitVal)-1], ", "))
return nil, fmt.Errorf("unknown prefix: %s", prefix)
}
}

View File

@@ -9,6 +9,8 @@ import (
"strings"
)
const defaultHTTPSPort = 443
type Target struct {
Host string
Port int
@@ -29,29 +31,29 @@ func parseURL(host string) (string, int, error) {
}
if url.Port() == "" {
return url.Hostname(), 443, nil
return url.Hostname(), defaultHTTPSPort, nil
}
port, err := strconv.ParseInt(url.Port(), 10, 16)
if err != nil {
portInt, err2 := strconv.ParseInt(url.Port(), 10, 16)
if err2 != nil {
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
}
return url.Hostname(), int(port), nil
return url.Hostname(), int(portInt), nil
}
func parseHostPort(host string) (string, int, error) {
host, sport, err := net.SplitHostPort(host)
if err == nil {
port, err := strconv.ParseInt(sport, 10, 16)
if err != nil {
portInt, err2 := strconv.ParseInt(sport, 10, 16)
if err2 != nil {
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
}
return host, int(port), nil
return host, int(portInt), nil
}
return host, 443, nil
return host, defaultHTTPSPort, nil
}
func ParseHost(host string) (*Target, error) {

View File

@@ -93,7 +93,7 @@ type signedData struct {
Version int
DigestAlgorithms asn1.RawValue
ContentInfo asn1.RawValue
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
Certificates asn1.RawValue `asn1:"optional"`
Crls asn1.RawValue `asn1:"optional"`
SignerInfos asn1.RawValue
}
@@ -158,9 +158,9 @@ type EncryptedContentInfo struct {
EncryptedContent []byte `asn1:"tag:0,optional"`
}
func unmarshalInit(raw []byte) (init initPKCS7, err error) {
_, err = asn1.Unmarshal(raw, &init)
if err != nil {
func unmarshalInit(raw []byte) (initPKCS7, error) {
var init initPKCS7
if _, err := asn1.Unmarshal(raw, &init); err != nil {
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
return init, nil
@@ -207,7 +207,10 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
if ed.Version != 0 {
return certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
return certerr.ParsingError(
certerr.ErrorSourceCertificate,
errors.New("only PKCS #7 encryptedData version 0 is supported"),
)
}
msg.Content.EncryptedData = ed
return nil
@@ -215,34 +218,35 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
// ParsePKCS7 attempts to parse the DER encoded bytes of a
// PKCS7 structure.
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
func ParsePKCS7(raw []byte) (*PKCS7, error) {
pkcs7, err := unmarshalInit(raw)
if err != nil {
return nil, err
}
msg = new(PKCS7)
msg := new(PKCS7)
msg.Raw = pkcs7.Raw
msg.ContentInfo = pkcs7.ContentType.String()
switch msg.ContentInfo {
case ObjIDData:
if err := populateData(msg, pkcs7.Content); err != nil {
return nil, err
if e := populateData(msg, pkcs7.Content); e != nil {
return nil, e
}
case ObjIDSignedData:
if err := populateSignedData(msg, pkcs7.Content.Bytes); err != nil {
return nil, err
if e := populateSignedData(msg, pkcs7.Content.Bytes); e != nil {
return nil, e
}
case ObjIDEncryptedData:
if err := populateEncryptedData(msg, pkcs7.Content.Bytes); err != nil {
return nil, err
if e := populateEncryptedData(msg, pkcs7.Content.Bytes); e != nil {
return nil, e
}
default:
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
return nil, certerr.ParsingError(
certerr.ErrorSourceCertificate,
errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"),
)
}
return msg, nil
}

View File

@@ -5,6 +5,7 @@ package revoke
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"encoding/base64"
@@ -89,35 +90,35 @@ func ldapURL(url string) bool {
// - false, false: an error was encountered while checking revocations.
// - false, true: the certificate was checked successfully, and it is not revoked.
// - true, true: the certificate was checked successfully, and it is revoked.
// - true, false: failure to check revocation status causes verification to fail
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
// - true, false: failure to check revocation status causes verification to fail.
func revCheck(cert *x509.Certificate) (bool, bool, error) {
for _, url := range cert.CRLDistributionPoints {
if ldapURL(url) {
log.Infof("skipping LDAP CRL: %s", url)
continue
}
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
if rvk, ok2, err2 := certIsRevokedCRL(cert, url); !ok2 {
log.Warning("error checking revocation via CRL")
if HardFail {
return true, false, err
return true, false, err2
}
return false, false, err
} else if revoked {
return false, false, err2
} else if rvk {
log.Info("certificate is revoked via CRL")
return true, true, err
return true, true, err2
}
}
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
if rvk, ok2, err2 := certIsRevokedOCSP(cert, HardFail); !ok2 {
log.Warning("error checking revocation via OCSP")
if HardFail {
return true, false, err
return true, false, err2
}
return false, false, err
} else if revoked {
return false, false, err2
} else if rvk {
log.Info("certificate is revoked via OCSP")
return true, true, err
return true, true, err2
}
return false, true, nil
@@ -125,13 +126,17 @@ func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
// fetchCRL fetches and parses a CRL.
func fetchCRL(url string) (*x509.RevocationList, error) {
resp, err := HTTPClient.Get(url)
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := HTTPClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
if resp.StatusCode >= http.StatusMultipleChoices {
return nil, errors.New("failed to retrieve CRL")
}
@@ -154,12 +159,11 @@ func getIssuer(cert *x509.Certificate) *x509.Certificate {
}
return issuer
}
// check a cert against a specific CRL. Returns the same bool pair
// as revCheck, plus an error if one occurred.
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
func certIsRevokedCRL(cert *x509.Certificate, url string) (bool, bool, error) {
crlLock.Lock()
crl, ok := CRLSet[url]
if ok && crl == nil {
@@ -187,10 +191,9 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
// check CRL signature
if issuer != nil {
err = crl.CheckSignatureFrom(issuer)
if err != nil {
log.Warningf("failed to verify CRL: %v", err)
return false, false, err
if sigErr := crl.CheckSignatureFrom(issuer); sigErr != nil {
log.Warningf("failed to verify CRL: %v", sigErr)
return false, false, sigErr
}
}
@@ -199,40 +202,44 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
crlLock.Unlock()
}
for _, revoked := range crl.RevokedCertificates {
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
for _, entry := range crl.RevokedCertificateEntries {
if cert.SerialNumber.Cmp(entry.SerialNumber) == 0 {
log.Info("Serial number match: intermediate is revoked.")
return true, true, err
return true, true, nil
}
}
return false, true, err
return false, true, nil
}
// VerifyCertificate ensures that the certificate passed in hasn't
// expired and checks the CRL for the server.
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
revoked, ok, _ = VerifyCertificateError(cert)
func VerifyCertificate(cert *x509.Certificate) (bool, bool) {
revoked, ok, _ := VerifyCertificateError(cert)
return revoked, ok
}
// VerifyCertificateError ensures that the certificate passed in hasn't
// expired and checks the CRL for the server.
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
if !time.Now().Before(cert.NotAfter) {
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
log.Info(msg)
return true, true, errors.New(msg)
} else if !time.Now().After(cert.NotBefore) {
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
log.Info(msg)
return true, true, errors.New(msg)
}
return revCheck(cert)
func VerifyCertificateError(cert *x509.Certificate) (bool, bool, error) {
if !time.Now().Before(cert.NotAfter) {
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
log.Info(msg)
return true, true, errors.New(msg)
} else if !time.Now().After(cert.NotBefore) {
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
log.Info(msg)
return true, true, errors.New(msg)
}
return revCheck(cert)
}
func fetchRemote(url string) (*x509.Certificate, error) {
resp, err := HTTPClient.Get(url)
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := HTTPClient.Do(req)
if err != nil {
return nil, err
}
@@ -255,8 +262,12 @@ var ocspOpts = ocsp.RequestOptions{
Hash: crypto.SHA1,
}
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
var err error
const ocspGetURLMaxLen = 256
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (bool, bool, error) {
var revoked bool
var ok bool
var lastErr error
ocspURLs := leaf.OCSPServer
if len(ocspURLs) == 0 {
@@ -272,15 +283,16 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
if err != nil {
return revoked, ok, err
return false, false, err
}
for _, server := range ocspURLs {
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
if err != nil {
resp, e := sendOCSPRequest(server, ocspRequest, leaf, issuer)
if e != nil {
if strict {
return revoked, ok, err
return false, false, e
}
lastErr = e
continue
}
@@ -292,9 +304,9 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
revoked = true
}
return revoked, ok, err
return revoked, ok, nil
}
return revoked, ok, err
return revoked, ok, lastErr
}
// sendOCSPRequest attempts to request an OCSP response from the
@@ -303,12 +315,21 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
var resp *http.Response
var err error
if len(req) > 256 {
if len(req) > ocspGetURLMaxLen {
buf := bytes.NewBuffer(req)
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodPost, server, buf)
if e != nil {
return nil, e
}
httpReq.Header.Set("Content-Type", "application/ocsp-request")
resp, err = HTTPClient.Do(httpReq)
} else {
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
resp, err = HTTPClient.Get(reqURL)
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodGet, reqURL, nil)
if e != nil {
return nil, e
}
resp, err = HTTPClient.Do(httpReq)
}
if err != nil {
@@ -343,21 +364,21 @@ func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate)
var crlRead = io.ReadAll
// SetCRLFetcher sets the function to use to read from the http response body
// SetCRLFetcher sets the function to use to read from the http response body.
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
crlRead = fn
}
var remoteRead = io.ReadAll
// SetRemoteFetcher sets the function to use to read from the http response body
// SetRemoteFetcher sets the function to use to read from the http response body.
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
remoteRead = fn
}
var ocspRead = io.ReadAll
// SetOCSPFetcher sets the function to use to read from the http response body
// SetOCSPFetcher sets the function to use to read from the http response body.
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
ocspRead = fn
}

View File

@@ -1,3 +1,4 @@
//nolint:testpackage // keep tests in the same package for internal symbol access
package revoke
import (
@@ -50,7 +51,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// to indicate that this is the case.
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt.
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
@@ -80,7 +81,7 @@ sESPRwHkcMUNdAp37FLweUw=
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
// 2014/05/22 14:18:31 certificate is revoked via CRL
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt.
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
@@ -106,7 +107,7 @@ Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
-----END CERTIFICATE-----`)
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url.
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
@@ -153,7 +154,7 @@ func mustParse(pemData string) *x509.Certificate {
panic("Invalid PEM type.")
}
cert, err := x509.ParseCertificate([]byte(block.Bytes))
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
panic(err.Error())
}
@@ -182,7 +183,6 @@ func TestGood(t *testing.T) {
} else if revoked {
t.Fatalf("good certificate should not have been marked as revoked")
}
}
func TestLdap(t *testing.T) {
@@ -230,7 +230,6 @@ func TestBadCRLSet(t *testing.T) {
t.Fatalf("key emptystring should be deleted from CRLSet")
}
delete(CRLSet, "")
}
func TestCachedCRLSet(t *testing.T) {
@@ -241,13 +240,11 @@ func TestCachedCRLSet(t *testing.T) {
}
func TestRemoteFetchError(t *testing.T) {
badurl := ":"
if _, err := fetchRemote(badurl); err == nil {
t.Fatalf("fetching bad url should result in non-nil error")
}
}
func TestNoOCSPServers(t *testing.T) {

View File

@@ -1,6 +1,7 @@
package main
import (
"context"
"flag"
"fmt"
"net"
@@ -28,10 +29,16 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
if verbose {
fmt.Printf("connecting to %s/%s... ", addr, proto)
os.Stdout.Sync()
if err = os.Stdout.Sync(); err != nil {
return err
}
}
conn, err := net.DialTimeout(proto, addr, timeout)
dialer := &net.Dialer{
Timeout: timeout,
}
conn, err := dialer.DialContext(context.Background(), proto, addr)
if err != nil {
if verbose {
fmt.Println("failed.")
@@ -42,8 +49,8 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
if verbose {
fmt.Println("OK")
}
conn.Close()
return nil
return conn.Close()
}
func main() {

View File

@@ -3,6 +3,7 @@ package main
import (
"crypto/x509"
"embed"
"errors"
"fmt"
"os"
"path/filepath"
@@ -14,22 +15,22 @@ import (
// loadCertsFromFile attempts to parse certificates from a file that may be in
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
// Try PEM first
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
if certs, err = certlib.ParseCertificatesPEM(data); err == nil {
return certs, nil
}
// Try DER/PKCS7/PKCS12 (with no password)
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
if certs, _, err = certlib.ParseCertificatesDER(data, ""); err == nil {
return certs, nil
} else {
return nil, err
}
return nil, err
}
func makePoolFromFile(path string) (*x509.CertPool, error) {
@@ -56,49 +57,50 @@ var embeddedTestdata embed.FS
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
// PEM or DER/PKCS#7 format.
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
// Try PEM first
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
certs, err := certlib.ParseCertificatesPEM(data)
if err == nil {
return certs, nil
}
// Try DER/PKCS7/PKCS12 (with no password)
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
certs, _, err = certlib.ParseCertificatesDER(data, "")
if err == nil {
return certs, nil
} else {
return nil, err
}
return nil, err
}
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
certs, err := loadCertsFromBytes(data)
if err != nil || len(certs) == 0 {
return nil, fmt.Errorf("failed to load CA certificates from embedded bytes")
}
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool, nil
certs, err := loadCertsFromBytes(data)
if err != nil || len(certs) == 0 {
return nil, errors.New("failed to load CA certificates from embedded bytes")
}
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool, nil
}
// isSelfSigned returns true if the given certificate is self-signed.
// It checks that the subject and issuer match and that the certificate's
// signature verifies against its own public key.
func isSelfSigned(cert *x509.Certificate) bool {
if cert == nil {
return false
}
// Quick check: subject and issuer match
if cert.Subject.String() != cert.Issuer.String() {
return false
}
// Cryptographic check: the certificate is signed by itself
if err := cert.CheckSignatureFrom(cert); err != nil {
return false
}
return true
if cert == nil {
return false
}
// Quick check: subject and issuer match
if cert.Subject.String() != cert.Issuer.String() {
return false
}
// Cryptographic check: the certificate is signed by itself
if err := cert.CheckSignatureFrom(cert); err != nil {
return false
}
return true
}
func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string) {
func verifyAgainstCA(caPool *x509.CertPool, path string) (bool, string) {
certs, err := loadCertsFromFile(path)
if err != nil || len(certs) == 0 {
return false, ""
@@ -117,14 +119,14 @@ func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string
Intermediates: ints,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
}
if _, err := leaf.Verify(opts); err != nil {
if _, err = leaf.Verify(opts); err != nil {
return false, ""
}
return true, leaf.NotAfter.Format("2006-01-02")
}
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expiry string) {
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (bool, string) {
certs, err := loadCertsFromBytes(certData)
if err != nil || len(certs) == 0 {
return false, ""
@@ -143,92 +145,159 @@ func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expi
Intermediates: ints,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
}
if _, err := leaf.Verify(opts); err != nil {
if _, err = leaf.Verify(opts); err != nil {
return false, ""
}
return true, leaf.NotAfter.Format("2006-01-02")
}
// selftest runs built-in validation using embedded certificates.
func selftest() int {
type testCase struct {
name string
caFile string
certFile string
expectOK bool
type testCase struct {
name string
caFile string
certFile string
expectOK bool
}
func (tc testCase) Run() error {
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
if err != nil {
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.caFile, err)
}
cases := []testCase{
{name: "ISRG Root X1 validates LE E7", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/le-e7.pem", expectOK: true},
{name: "ISRG Root X1 does NOT validate Google WR2", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/goog-wr2.pem", expectOK: false},
{name: "GTS R1 validates Google WR2", caFile: "testdata/gts-r1.pem", certFile: "testdata/goog-wr2.pem", expectOK: true},
{name: "GTS R1 does NOT validate LE E7", caFile: "testdata/gts-r1.pem", certFile: "testdata/le-e7.pem", expectOK: false},
}
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
if err != nil {
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.certFile, err)
}
failures := 0
for _, tc := range cases {
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
pool, err := makePoolFromBytes(caBytes)
if err != nil || pool == nil {
return fmt.Errorf("selftest: failed to build CA pool for %s: %w", tc.caFile, err)
}
ok, exp := verifyAgainstCABytes(pool, certBytes)
if ok != tc.expectOK {
return fmt.Errorf("%s: unexpected result: got %v, want %v", tc.name, ok, tc.expectOK)
}
if ok {
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
}
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
return nil
}
var cases = []testCase{
{
name: "ISRG Root X1 validates LE E7",
caFile: "testdata/isrg-root-x1.pem",
certFile: "testdata/le-e7.pem",
expectOK: true,
},
{
name: "ISRG Root X1 does NOT validate Google WR2",
caFile: "testdata/isrg-root-x1.pem",
certFile: "testdata/goog-wr2.pem",
expectOK: false,
},
{
name: "GTS R1 validates Google WR2",
caFile: "testdata/gts-r1.pem",
certFile: "testdata/goog-wr2.pem",
expectOK: true,
},
{
name: "GTS R1 does NOT validate LE E7",
caFile: "testdata/gts-r1.pem",
certFile: "testdata/le-e7.pem",
expectOK: false,
},
}
// selftest runs built-in validation using embedded certificates.
func selftest() int {
failures := 0
for _, tc := range cases {
err := tc.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.caFile, err)
fmt.Fprintln(os.Stderr, err)
failures++
continue
}
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
}
// Verify that both embedded root CAs are detected as self-signed
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
for _, root := range roots {
b, err := embeddedTestdata.ReadFile(root)
if err != nil {
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.certFile, err)
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
failures++
continue
}
pool, err := makePoolFromBytes(caBytes)
if err != nil || pool == nil {
fmt.Fprintf(os.Stderr, "selftest: failed to build CA pool for %s: %v\n", tc.caFile, err)
certs, err := loadCertsFromBytes(b)
if err != nil || len(certs) == 0 {
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
failures++
continue
}
ok, exp := verifyAgainstCABytes(pool, certBytes)
if ok != tc.expectOK {
fmt.Printf("%s: unexpected result: got %v, want %v\n", tc.name, ok, tc.expectOK)
failures++
leaf := certs[0]
if isSelfSigned(leaf) {
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
} else {
if ok {
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
} else {
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
}
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
failures++
}
}
}
// Verify that both embedded root CAs are detected as self-signed
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
for _, root := range roots {
b, err := embeddedTestdata.ReadFile(root)
if err != nil {
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
failures++
continue
}
certs, err := loadCertsFromBytes(b)
if err != nil || len(certs) == 0 {
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
failures++
continue
}
leaf := certs[0]
if isSelfSigned(leaf) {
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
} else {
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
failures++
}
}
if failures == 0 {
fmt.Println("selftest: PASS")
return 0
}
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
return 1
}
if failures == 0 {
fmt.Println("selftest: PASS")
return 0
}
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
return 1
// expiryString returns a YYYY-MM-DD date string to display for certificate
// expiry. If an explicit exp string is provided, it is used. Otherwise, if a
// leaf certificate is available, its NotAfter is formatted. As a last resort,
// it falls back to today's date (should not normally happen).
func expiryString(leaf *x509.Certificate, exp string) string {
if exp != "" {
return exp
}
if leaf != nil {
return leaf.NotAfter.Format("2006-01-02")
}
return time.Now().Format("2006-01-02")
}
// processCert verifies a single certificate file against the provided CA pool
// and prints the result in the required format, handling self-signed
// certificates specially.
func processCert(caPool *x509.CertPool, certPath string) {
ok, exp := verifyAgainstCA(caPool, certPath)
name := filepath.Base(certPath)
// Try to load the leaf cert for self-signed detection and expiry fallback
var leaf *x509.Certificate
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
leaf = certs[0]
}
// Prefer the SELF-SIGNED label if applicable
if isSelfSigned(leaf) {
fmt.Printf("%s: SELF-SIGNED\n", name)
return
}
if ok {
fmt.Printf("%s: OK (expires %s)\n", name, expiryString(leaf, exp))
return
}
fmt.Printf("%s: INVALID\n", name)
}
func main() {
@@ -250,38 +319,7 @@ func main() {
os.Exit(1)
}
for _, certPath := range os.Args[2:] {
ok, exp := verifyAgainstCA(caPool, certPath)
name := filepath.Base(certPath)
// Load the leaf once for self-signed detection and potential expiry fallback
var leaf *x509.Certificate
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
leaf = certs[0]
}
// If the certificate is self-signed, prefer the SELF-SIGNED label
if isSelfSigned(leaf) {
fmt.Printf("%s: SELF-SIGNED\n", name)
continue
}
if ok {
// Display with the requested format
// Example: file: OK (expires 2031-01-01)
// Ensure deterministic date formatting
// Note: no timezone displayed; date only as per example
// If exp ended up empty for some reason, recompute safely
if exp == "" {
if leaf != nil {
exp = leaf.NotAfter.Format("2006-01-02")
} else {
// fallback to the current date to avoid empty; though shouldn't happen
exp = time.Now().Format("2006-01-02")
}
}
fmt.Printf("%s: OK (expires %s)\n", name, exp)
} else {
fmt.Printf("%s: INVALID\n", name)
}
}
for _, certPath := range os.Args[2:] {
processCert(caPool, certPath)
}
}

View File

@@ -8,8 +8,10 @@ import (
"crypto/x509"
_ "embed"
"encoding/pem"
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
@@ -19,7 +21,7 @@ import (
"gopkg.in/yaml.v2"
)
// Config represents the top-level YAML configuration
// Config represents the top-level YAML configuration.
type Config struct {
Config struct {
Hashes string `yaml:"hashes"`
@@ -28,19 +30,19 @@ type Config struct {
Chains map[string]ChainGroup `yaml:"chains"`
}
// ChainGroup represents a named group of certificate chains
// ChainGroup represents a named group of certificate chains.
type ChainGroup struct {
Certs []CertChain `yaml:"certs"`
Outputs Outputs `yaml:"outputs"`
}
// CertChain represents a root certificate and its intermediates
// CertChain represents a root certificate and its intermediates.
type CertChain struct {
Root string `yaml:"root"`
Intermediates []string `yaml:"intermediates"`
}
// Outputs defines output format options
// Outputs defines output format options.
type Outputs struct {
IncludeSingle bool `yaml:"include_single"`
IncludeIndividual bool `yaml:"include_individual"`
@@ -95,7 +97,8 @@ func main() {
}
// Create output directory if it doesn't exist
if err := os.MkdirAll(outputDir, 0755); err != nil {
err = os.MkdirAll(outputDir, 0750)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
os.Exit(1)
}
@@ -108,9 +111,9 @@ func main() {
}
createdFiles := make([]string, 0, totalFormats)
for groupName, group := range cfg.Chains {
files, err := processChainGroup(groupName, group, expiryDuration)
if err != nil {
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, err)
files, perr := processChainGroup(groupName, group, expiryDuration)
if perr != nil {
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
os.Exit(1)
}
createdFiles = append(createdFiles, files...)
@@ -119,8 +122,8 @@ func main() {
// Generate hash file for all created archives
if cfg.Config.Hashes != "" {
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
if err := generateHashFile(hashFile, createdFiles); err != nil {
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", err)
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
os.Exit(1)
}
}
@@ -135,8 +138,8 @@ func loadConfig(path string) (*Config, error) {
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
return nil, uerr
}
return &cfg, nil
@@ -200,72 +203,107 @@ func processChainGroup(groupName string, group ChainGroup, expiryDuration time.D
return createdFiles, nil
}
// loadAndCollectCerts loads all certificates from chains and collects them for processing
func loadAndCollectCerts(chains []CertChain, outputs Outputs, expiryDuration time.Duration) ([]*x509.Certificate, []certWithPath, error) {
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
func loadAndCollectCerts(
chains []CertChain,
outputs Outputs,
expiryDuration time.Duration,
) ([]*x509.Certificate, []certWithPath, error) {
var singleFileCerts []*x509.Certificate
var individualCerts []certWithPath
for _, chain := range chains {
// Load root certificate
rootCert, err := certlib.LoadCertificate(chain.Root)
if err != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %v", chain.Root, err)
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
if cerr != nil {
return nil, nil, cerr
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
singleFileCerts = append(singleFileCerts, rootCert)
if len(s) > 0 {
singleFileCerts = append(singleFileCerts, s...)
}
if outputs.IncludeIndividual {
individualCerts = append(individualCerts, certWithPath{
cert: rootCert,
path: chain.Root,
})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, err := certlib.LoadCertificate(intPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %v", intPath, err)
}
// Validate that intermediate is signed by root
if err := intCert.CheckSignatureFrom(rootCert); err != nil {
return nil, nil, fmt.Errorf("intermediate %s is not properly signed by root %s: %v", intPath, chain.Root, err)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
singleFileCerts = append(singleFileCerts, intCert)
}
if outputs.IncludeIndividual {
individualCerts = append(individualCerts, certWithPath{
cert: intCert,
path: intPath,
})
}
if len(i) > 0 {
individualCerts = append(individualCerts, i...)
}
}
return singleFileCerts, individualCerts, nil
}
// prepareArchiveFiles prepares all files to be included in archives
func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []certWithPath, outputs Outputs, encoding string) ([]fileEntry, error) {
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
func collectFromChain(
chain CertChain,
outputs Outputs,
expiryDuration time.Duration,
) (
[]*x509.Certificate,
[]certWithPath,
error,
) {
var single []*x509.Certificate
var indiv []certWithPath
// Load root certificate
rootCert, rerr := certlib.LoadCertificate(chain.Root)
if rerr != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
single = append(single, rootCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, lerr := certlib.LoadCertificate(intPath)
if lerr != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
}
// Validate that intermediate is signed by root
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
return nil, nil, fmt.Errorf(
"intermediate %s is not properly signed by root %s: %w",
intPath,
chain.Root,
sigErr,
)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
single = append(single, intCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
}
}
return single, indiv, nil
}
// prepareArchiveFiles prepares all files to be included in archives.
func prepareArchiveFiles(
singleFileCerts []*x509.Certificate,
individualCerts []certWithPath,
outputs Outputs,
encoding string,
) ([]fileEntry, error) {
var archiveFiles []fileEntry
// Handle a single bundle file
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
if err != nil {
return nil, fmt.Errorf("failed to encode single bundle: %v", err)
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
}
archiveFiles = append(archiveFiles, files...)
}
@@ -276,7 +314,7 @@ func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
if err != nil {
return nil, fmt.Errorf("failed to encode individual cert %s: %v", cp.path, err)
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
}
archiveFiles = append(archiveFiles, files...)
}
@@ -294,7 +332,7 @@ func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []
return archiveFiles, nil
}
// createArchiveFiles creates archive files in the specified formats
// createArchiveFiles creates archive files in the specified formats.
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
createdFiles := make([]string, 0, len(formats))
@@ -307,11 +345,11 @@ func createArchiveFiles(groupName string, formats []string, archiveFiles []fileE
switch format {
case "zip":
if err := createZipArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create zip archive: %v", err)
return nil, fmt.Errorf("failed to create zip archive: %w", err)
}
case "tgz":
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create tar.gz archive: %v", err)
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
}
default:
return nil, fmt.Errorf("unsupported format: %s", format)
@@ -329,7 +367,12 @@ func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Durati
if cert.NotAfter.Before(expiryThreshold) {
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
if daysUntilExpiry < 0 {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s has EXPIRED (expired %d days ago)\n", path, -daysUntilExpiry)
fmt.Fprintf(
os.Stderr,
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
path,
-daysUntilExpiry,
)
} else {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
}
@@ -347,8 +390,13 @@ type certWithPath struct {
}
// encodeCertsToFiles converts certificates to file entries based on encoding type
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file
func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding string, isSingle bool) ([]fileEntry, error) {
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
func encodeCertsToFiles(
certs []*x509.Certificate,
baseName string,
encoding string,
isSingle bool,
) ([]fileEntry, error) {
var files []fileEntry
switch encoding {
@@ -369,14 +417,12 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
name: baseName + ".crt",
content: derContent,
})
} else {
} else if len(certs) > 0 {
// Individual DER file (should only have one cert)
if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
case "both":
// Add PEM version
@@ -395,13 +441,11 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
name: baseName + ".crt",
content: derContent,
})
} else {
if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
} else if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
default:
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
@@ -410,7 +454,7 @@ func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding str
return files, nil
}
// encodeCertsToPEM encodes certificates to PEM format
// encodeCertsToPEM encodes certificates to PEM format.
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
var pemContent []byte
for _, cert := range certs {
@@ -435,40 +479,49 @@ func generateManifest(files []fileEntry) []byte {
return []byte(manifest.String())
}
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
func closeWithErr(baseErr error, closers ...io.Closer) error {
for _, c := range closers {
if c == nil {
continue
}
if cerr := c.Close(); cerr != nil {
baseErr = errors.Join(baseErr, cerr)
}
}
return baseErr
}
func createZipArchive(path string, files []fileEntry) error {
f, err := os.Create(path)
if err != nil {
return err
f, zerr := os.Create(path)
if zerr != nil {
return zerr
}
w := zip.NewWriter(f)
for _, file := range files {
fw, err := w.Create(file.name)
if err != nil {
w.Close()
f.Close()
return err
fw, werr := w.Create(file.name)
if werr != nil {
return closeWithErr(werr, w, f)
}
if _, err := fw.Write(file.content); err != nil {
w.Close()
f.Close()
return err
if _, werr = fw.Write(file.content); werr != nil {
return closeWithErr(werr, w, f)
}
}
// Check errors on close operations
if err := w.Close(); err != nil {
f.Close()
return err
if cerr := w.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func createTarGzArchive(path string, files []fileEntry) error {
f, err := os.Create(path)
if err != nil {
return err
f, terr := os.Create(path)
if terr != nil {
return terr
}
gw := gzip.NewWriter(f)
@@ -480,29 +533,23 @@ func createTarGzArchive(path string, files []fileEntry) error {
Mode: 0644,
Size: int64(len(file.content)),
}
if err := tw.WriteHeader(hdr); err != nil {
tw.Close()
gw.Close()
f.Close()
return err
if herr := tw.WriteHeader(hdr); herr != nil {
return closeWithErr(herr, tw, gw, f)
}
if _, err := tw.Write(file.content); err != nil {
tw.Close()
gw.Close()
f.Close()
return err
if _, werr := tw.Write(file.content); werr != nil {
return closeWithErr(werr, tw, gw, f)
}
}
// Check errors on close operations in the correct order
if err := tw.Close(); err != nil {
gw.Close()
f.Close()
return err
if cerr := tw.Close(); cerr != nil {
_ = gw.Close()
_ = f.Close()
return cerr
}
if err := gw.Close(); err != nil {
f.Close()
return err
if cerr := gw.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
@@ -515,9 +562,9 @@ func generateHashFile(path string, files []string) error {
defer f.Close()
for _, file := range files {
data, err := os.ReadFile(file)
if err != nil {
return err
data, rerr := os.ReadFile(file)
if rerr != nil {
return rerr
}
hash := sha256.Sum256(data)

View File

@@ -1,70 +1,68 @@
package main
import (
"compress/flate"
"compress/gzip"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"compress/flate"
"compress/gzip"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
const gzipExt = ".gz"
func compress(path, target string, level int) error {
sourceFile, err := os.Open(path)
if err != nil {
return errors.Wrap(err, "opening file for read")
}
sourceFile, err := os.Open(path)
if err != nil {
return fmt.Errorf("opening file for read: %w", err)
}
defer sourceFile.Close()
destFile, err := os.Create(target)
if err != nil {
return errors.Wrap(err, "opening file for write")
}
destFile, err := os.Create(target)
if err != nil {
return fmt.Errorf("opening file for write: %w", err)
}
defer destFile.Close()
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
if err != nil {
return errors.Wrap(err, "invalid compression level")
}
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
if err != nil {
return fmt.Errorf("invalid compression level: %w", err)
}
defer gzipCompressor.Close()
_, err = io.Copy(gzipCompressor, sourceFile)
if err != nil {
return errors.Wrap(err, "compressing file")
}
_, err = io.Copy(gzipCompressor, sourceFile)
if err != nil {
return fmt.Errorf("compressing file: %w", err)
}
return nil
}
func uncompress(path, target string) error {
sourceFile, err := os.Open(path)
if err != nil {
return errors.Wrap(err, "opening file for read")
}
sourceFile, err := os.Open(path)
if err != nil {
return fmt.Errorf("opening file for read: %w", err)
}
defer sourceFile.Close()
gzipUncompressor, err := gzip.NewReader(sourceFile)
if err != nil {
return errors.Wrap(err, "reading gzip headers")
}
gzipUncompressor, err := gzip.NewReader(sourceFile)
if err != nil {
return fmt.Errorf("reading gzip headers: %w", err)
}
defer gzipUncompressor.Close()
destFile, err := os.Create(target)
if err != nil {
return errors.Wrap(err, "opening file for write")
}
destFile, err := os.Create(target)
if err != nil {
return fmt.Errorf("opening file for write: %w", err)
}
defer destFile.Close()
_, err = io.Copy(destFile, gzipUncompressor)
if err != nil {
return errors.Wrap(err, "uncompressing file")
}
_, err = io.Copy(destFile, gzipUncompressor)
if err != nil {
return fmt.Errorf("uncompressing file: %w", err)
}
return nil
}
@@ -108,9 +106,9 @@ func pathForUncompressing(source, dest string) (string, error) {
}
source = filepath.Base(source)
if !strings.HasSuffix(source, gzipExt) {
return "", errors.Errorf("%s is a not gzip-compressed file", source)
}
if !strings.HasSuffix(source, gzipExt) {
return "", fmt.Errorf("%s is a not gzip-compressed file", source)
}
outFile := source[:len(source)-len(gzipExt)]
outFile = filepath.Join(dest, outFile)
return outFile, nil
@@ -122,9 +120,9 @@ func pathForCompressing(source, dest string) (string, error) {
}
source = filepath.Base(source)
if strings.HasSuffix(source, gzipExt) {
return "", errors.Errorf("%s is a gzip-compressed file", source)
}
if strings.HasSuffix(source, gzipExt) {
return "", fmt.Errorf("%s is a gzip-compressed file", source)
}
dest = filepath.Join(dest, source+gzipExt)
return dest, nil

3
go.mod
View File

@@ -5,7 +5,6 @@ go 1.24.0
require (
github.com/hashicorp/go-syslog v1.0.0
github.com/kr/text v0.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.12.0
golang.org/x/crypto v0.44.0
golang.org/x/sys v0.38.0
@@ -13,6 +12,7 @@ require (
)
require (
github.com/benbjohnson/clock v1.3.5
github.com/davecgh/go-spew v1.1.1
github.com/google/certificate-transparency-go v1.0.21
)
@@ -20,5 +20,6 @@ require (
require (
github.com/kr/fs v0.1.0 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
)

8
go.sum
View File

@@ -1,3 +1,5 @@
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -25,19 +27,15 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=

View File

@@ -1,4 +1,4 @@
// Package syslog is a syslog-type facility for logging.
// Package log is a syslog-type facility for logging.
package log
import (
@@ -17,7 +17,7 @@ type logger struct {
writeConsole bool
}
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
func (log *logger) printf(p gsyslog.Priority, format string, args ...any) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
@@ -28,33 +28,33 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
_ = log.l.WriteLevel(p, fmt.Appendf(nil, format, args...))
}
}
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
func (log *logger) print(p gsyslog.Priority, args ...any) {
if p <= log.p && log.writeConsole {
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
fmt.Print(args...)
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
_ = log.l.WriteLevel(p, fmt.Append(nil, args...))
}
}
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
func (log *logger) println(p gsyslog.Priority, args ...any) {
if p <= log.p && log.writeConsole {
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
fmt.Println(args...)
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
_ = log.l.WriteLevel(p, fmt.Appendln(nil, args...))
}
}
func (log *logger) spew(args ...interface{}) {
func (log *logger) spew(args ...any) {
if log.p == gsyslog.LOG_DEBUG {
spew.Dump(args...)
}
@@ -160,109 +160,109 @@ func Setup(opts *Options) error {
return nil
}
func Debug(args ...interface{}) {
func Debug(args ...any) {
log.print(gsyslog.LOG_DEBUG, args...)
}
func Info(args ...interface{}) {
func Info(args ...any) {
log.print(gsyslog.LOG_INFO, args...)
}
func Notice(args ...interface{}) {
func Notice(args ...any) {
log.print(gsyslog.LOG_NOTICE, args...)
}
func Warning(args ...interface{}) {
func Warning(args ...any) {
log.print(gsyslog.LOG_WARNING, args...)
}
func Err(args ...interface{}) {
func Err(args ...any) {
log.print(gsyslog.LOG_ERR, args...)
}
func Crit(args ...interface{}) {
func Crit(args ...any) {
log.print(gsyslog.LOG_CRIT, args...)
}
func Alert(args ...interface{}) {
func Alert(args ...any) {
log.print(gsyslog.LOG_ALERT, args...)
}
func Emerg(args ...interface{}) {
func Emerg(args ...any) {
log.print(gsyslog.LOG_EMERG, args...)
}
func Debugln(args ...interface{}) {
func Debugln(args ...any) {
log.println(gsyslog.LOG_DEBUG, args...)
}
func Infoln(args ...interface{}) {
func Infoln(args ...any) {
log.println(gsyslog.LOG_INFO, args...)
}
func Noticeln(args ...interface{}) {
func Noticeln(args ...any) {
log.println(gsyslog.LOG_NOTICE, args...)
}
func Warningln(args ...interface{}) {
func Warningln(args ...any) {
log.print(gsyslog.LOG_WARNING, args...)
}
func Errln(args ...interface{}) {
func Errln(args ...any) {
log.println(gsyslog.LOG_ERR, args...)
}
func Critln(args ...interface{}) {
func Critln(args ...any) {
log.println(gsyslog.LOG_CRIT, args...)
}
func Alertln(args ...interface{}) {
func Alertln(args ...any) {
log.println(gsyslog.LOG_ALERT, args...)
}
func Emergln(args ...interface{}) {
func Emergln(args ...any) {
log.println(gsyslog.LOG_EMERG, args...)
}
func Debugf(format string, args ...interface{}) {
func Debugf(format string, args ...any) {
log.printf(gsyslog.LOG_DEBUG, format, args...)
}
func Infof(format string, args ...interface{}) {
func Infof(format string, args ...any) {
log.printf(gsyslog.LOG_INFO, format, args...)
}
func Noticef(format string, args ...interface{}) {
func Noticef(format string, args ...any) {
log.printf(gsyslog.LOG_NOTICE, format, args...)
}
func Warningf(format string, args ...interface{}) {
func Warningf(format string, args ...any) {
log.printf(gsyslog.LOG_WARNING, format, args...)
}
func Errf(format string, args ...interface{}) {
func Errf(format string, args ...any) {
log.printf(gsyslog.LOG_ERR, format, args...)
}
func Critf(format string, args ...interface{}) {
func Critf(format string, args ...any) {
log.printf(gsyslog.LOG_CRIT, format, args...)
}
func Alertf(format string, args ...interface{}) {
func Alertf(format string, args ...any) {
log.printf(gsyslog.LOG_ALERT, format, args...)
}
func Emergf(format string, args ...interface{}) {
func Emergf(format string, args ...any) {
log.printf(gsyslog.LOG_EMERG, format, args...)
os.Exit(1)
}
func Fatal(args ...interface{}) {
func Fatal(args ...any) {
log.println(gsyslog.LOG_ERR, args...)
os.Exit(1)
}
func Fatalf(format string, args ...interface{}) {
func Fatalf(format string, args ...any) {
log.printf(gsyslog.LOG_ERR, format, args...)
os.Exit(1)
}
@@ -279,7 +279,7 @@ func FatalError(err error, message string) {
}
// Spew will pretty print the args if the logger is set to DEBUG priority.
func Spew(args ...interface{}) {
func Spew(args ...any) {
log.spew(args...)
}

View File

@@ -2,14 +2,13 @@
// consist of timestamps, an actor and event string, and a mapping of
// string key-value attribute pairs. For example,
//
// log.Error("serialiser", "failed to open file",
// map[string]string{
// "error": err.Error(),
// "path": "data.bin",
// })
// log.Error("serialiser", "failed to open file",
// map[string]string{
// "error": err.Error(),
// "path": "data.bin",
// })
//
// This produces the output message
//
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
//
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
package logging

View File

@@ -25,8 +25,8 @@ func main() {
log.Info("example", "filelog test", nil)
exampleNewFromFile()
os.Remove("example.log")
os.Remove("example.err")
_ = os.Remove("example.log")
_ = os.Remove("example.err")
}
func exampleNewFromFile() {

View File

@@ -1,6 +1,9 @@
package logging
import "os"
import (
"fmt"
"os"
)
// File writes its logs to file.
type File struct {
@@ -8,22 +11,6 @@ type File struct {
*LogWriter
}
// Close calls close on the underlying log files.
func (fl *File) Close() error {
if fl.fo != nil {
if err := fl.fo.Close(); err != nil {
return err
}
fl.fo = nil
}
if fl.fe != nil {
return fl.fe.Close()
}
return nil
}
// NewFile creates a new Logger that writes all logs to the file
// specified by path. If overwrite is specified, the log file will be
// truncated before writing. Otherwise, the log file will be appended
@@ -36,7 +23,7 @@ func NewFile(path string, overwrite bool) (*File, error) {
if overwrite {
fl.fo, err = os.Create(path)
} else {
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0644)
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600) // #nosec G302
}
if err != nil {
@@ -59,7 +46,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
if overwrite {
fl.fo, err = os.Create(outpath)
} else {
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
}
if err != nil {
@@ -69,14 +56,51 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
if overwrite {
fl.fe, err = os.Create(errpath)
} else {
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
}
if err != nil {
fl.Close()
return nil, err
}
if err != nil {
if closeErr := fl.Close(); closeErr != nil {
return nil, fmt.Errorf("failed to open error log: cleanup close failed: %v: %w", closeErr, err)
}
return nil, err
}
fl.LogWriter = NewLogWriter(fl.fo, fl.fe)
return fl, nil
}
// Close calls close on the underlying log files.
func (fl *File) Close() error {
if fl.fo != nil {
if err := fl.fo.Close(); err != nil {
return err
}
fl.fo = nil
}
if fl.fe != nil {
return fl.fe.Close()
}
return nil
}
func (fl *File) Flush() error {
if err := fl.fo.Sync(); err != nil {
return err
}
return fl.fe.Sync()
}
func (fl *File) Chmod(mode os.FileMode) error {
if err := fl.fo.Chmod(mode); err != nil {
return fmt.Errorf("failed to chmod output log: %w", err)
}
if err := fl.fe.Chmod(mode); err != nil {
return fmt.Errorf("failed to chmod error log: %w", err)
}
return nil
}

View File

@@ -32,31 +32,6 @@ const (
// DefaultLevel is the default logging level when none is provided.
const DefaultLevel = LevelInfo
// Cheap integer to fixed-width decimal ASCII. Give a negative width
// to avoid zero-padding. (From log/log.go in the standard library).
func itoa(i int, wid int) string {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
return string(b[bp:])
}
func writeToOut(level Level) bool {
if level < LevelWarning {
return true
}
return false
}
var levelPrefix = [...]string{
LevelDebug: "DEBUG",
LevelInfo: "INFO",

View File

@@ -1,6 +1,7 @@
package logging
import (
"errors"
"fmt"
"io"
"os"
@@ -11,64 +12,64 @@ import (
//
// Log messages consist of four components:
//
// 1. The **level** attaches a notion of priority to the log message.
// Several log levels are available:
// 1. The **level** attaches a notion of priority to the log message.
// Several log levels are available:
//
// + FATAL (32): the system is in an unsuable state, and cannot
// continue to run. Most of the logging for this will cause the
// program to exit with an error code.
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
// likely to cause a fatal condition shortly. An example is running
// out of disk space. This is something that the ops team should get
// paged for.
// + ERROR (8): error conditions. A single error doesn't require an
// ops team to be paged, but repeated errors should often trigger a
// page based on threshold triggers. An example is a network
// failure: it might be a transient failure (these do happen), but
// most of the time it's self-correcting.
// + WARNING (4): warning conditions. An example of this is a bad
// request sent to a server. This isn't an error on the part of the
// program, but it may be indicative of other things. Like errors,
// the ops team shouldn't be paged for errors, but a page might be
// triggered if a certain threshold of warnings is reached (which is
// typically much higher than errors). For example, repeated
// warnings might be a sign that the system is under attack.
// + INFO (2): informational message. This is a normal log message
// that is used to deliver information, such as recording
// requests. Ops teams are never paged for informational
// messages. This is the default log level.
// + DEBUG (1): debug-level message. These are only used during
// development or if a deployed system repeatedly sees abnormal
// errors.
// + FATAL (32): the system is in an unusable state and cannot
// continue to run. Most of the logging for this will cause the
// program to exit with an error code.
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
// likely to cause a fatal condition shortly. An example is running
// out of disk space. This is something that the ops team should get
// paged for.
// + ERROR (8): error conditions. A single error doesn't require an
// ops team to be paged, but repeated errors should often trigger a
// page based on threshold triggers. An example is a network
// failure: it might be a transient failure (these do happen), but
// most of the time it's self-correcting.
// + WARNING (4): warning conditions. An example of this is a bad
// request sent to a server. This isn't an error on the part of the
// program, but it may be indicative of other things. Like errors,
// the ops team shouldn't be paged for errors, but a page might be
// triggered if a certain threshold of warnings is reached (which is
// typically much higher than errors). For example, repeated
// warnings might be a sign that the system is under attack.
// + INFO (2): informational message. This is a normal log message
// used to deliver information, such as recording requests. Ops
// teams are never paged for informational messages. This is the
// default log level.
// + DEBUG (1): debug-level message. These are only used during
// development or if a deployed system repeatedly sees abnormal
// errors.
//
// The numeric values indicate the priority of a given level.
// The numeric values indicate the priority of a given level.
//
// 2. The **actor** is used to specify which component is generating
// the log message. This could be the program name, or it could be
// a specific component inside the system.
// 2. The **actor** is used to specify which component is generating
// the log message. This could be the program name, or it could be
// a specific component inside the system.
//
// 3. The **event** is a short message indicating what happened. This is
// most like the traditional log message.
// 3. The **event** is a short message indicating what happened. This is
// most like the traditional log message.
//
// 4. The **attributes** are an optional set of key-value string pairs that
// provide additional information.
// 4. The **attributes** are an optional set of key-value string pairs that
// provide additional information.
//
// Additionally, each log message has an associated timestamp. For the
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
//
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
//
// Note that this is organised in a manner that facilitates parsing::
//
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
//
// will cover the header:
//
// + ``$1`` contains the timestamp
// + ``$2`` contains the level
// + ``$3`` contains the actor
// + ``$4`` contains the event
// + “$1“ contains the timestamp
// + “$2“ contains the level
// + “$3“ contains the actor
// + “$4“ contains the event.
type Logger interface {
// SetLevel sets the minimum log level.
SetLevel(Level)
@@ -131,7 +132,7 @@ func (lw *LogWriter) output(w io.Writer, lvl Level, actor, event string, attrs m
}
// Debug emits a debug-level message. These are only used during
// development or if a deployed system repeatedly sees abnormal
// development, or if a deployed system repeatedly sees abnormal
// errors.
//
// Actor specifies the component emitting the message; event indicates
@@ -213,7 +214,7 @@ func (lw *LogWriter) Critical(actor, event string, attrs map[string]string) {
lw.output(lw.we, LevelCritical, actor, event, attrs)
}
// Fatal emits a message indicating that the system is in an unsuable
// Fatal emits a message indicating that the system is in an unusable
// state, and cannot continue to run. The program will exit with exit
// code 1.
//
@@ -229,9 +230,9 @@ func (lw *LogWriter) Fatal(actor, event string, attrs map[string]string) {
os.Exit(1)
}
// FatalCode emits a message indicating that the system is in an unsuable
// FatalCode emits a message indicating that the system is in an unusable
// state, and cannot continue to run. The program will exit with the
// exit code speicfied in the exitcode argument.
// exit code specified in the exitcode argument.
//
// Actor specifies the component emitting the message; event indicates
// the event that caused the log message to be emitted. attrs is a map
@@ -245,7 +246,7 @@ func (lw *LogWriter) FatalCode(exitcode int, actor, event string, attrs map[stri
os.Exit(exitcode)
}
// FatalNoDie emits a message indicating that the system is in an unsuable
// FatalNoDie emits a message indicating that the system is in an unusable
// state, and cannot continue to run. The program will not exit; it is
// assumed that the caller has some final clean up to perform.
//
@@ -314,11 +315,17 @@ func (m *Multi) Status() error {
}
func (m *Multi) Close() error {
var errs []error
for _, l := range m.loggers {
l.Close()
if err := l.Close(); err != nil {
errs = append(errs, err)
}
}
return nil
if len(errs) == 0 {
return nil
}
return errors.Join(errs...)
}
func (m *Multi) Debug(actor, event string, attrs map[string]string) {

View File

@@ -1,30 +1,32 @@
package logging
package logging_test
import (
"bytes"
"fmt"
"os"
"testing"
"git.wntrmute.dev/kyle/goutils/logging"
)
// A list of implementations that should be tested.
var implementations []Logger
var implementations []logging.Logger
func init() {
lw := NewLogWriter(&bytes.Buffer{}, nil)
cw := NewConsole()
lw := logging.NewLogWriter(&bytes.Buffer{}, nil)
cw := logging.NewConsole()
implementations = append(implementations, lw)
implementations = append(implementations, cw)
}
func TestFileSetup(t *testing.T) {
fw1, err := NewFile("fw1.log", true)
fw1, err := logging.NewFile("fw1.log", true)
if err != nil {
t.Fatalf("failed to create new file logger: %v", err)
}
fw2, err := NewSplitFile("fw2.log", "fw2.err", true)
fw2, err := logging.NewSplitFile("fw2.log", "fw2.err", true)
if err != nil {
t.Fatalf("failed to create new split file logger: %v", err)
}
@@ -33,7 +35,7 @@ func TestFileSetup(t *testing.T) {
implementations = append(implementations, fw2)
}
func TestImplementations(t *testing.T) {
func TestImplementations(_ *testing.T) {
for _, l := range implementations {
l.Info("TestImplementations", "Info message",
map[string]string{"type": fmt.Sprintf("%T", l)})
@@ -44,20 +46,30 @@ func TestImplementations(t *testing.T) {
func TestCloseLoggers(t *testing.T) {
for _, l := range implementations {
l.Close()
if err := l.Close(); err != nil {
t.Errorf("failed to close logger: %v", err)
}
}
}
func TestDestroyLogFiles(t *testing.T) {
os.Remove("fw1.log")
os.Remove("fw2.log")
os.Remove("fw2.err")
if err := os.Remove("fw1.log"); err != nil {
t.Errorf("failed to remove fw1.log: %v", err)
}
if err := os.Remove("fw2.log"); err != nil {
t.Errorf("failed to remove fw2.log: %v", err)
}
if err := os.Remove("fw2.err"); err != nil {
t.Errorf("failed to remove fw2.err: %v", err)
}
}
func TestMulti(t *testing.T) {
c1 := NewConsole()
c2 := NewConsole()
m := NewMulti(c1, c2)
c1 := logging.NewConsole()
c2 := logging.NewConsole()
m := logging.NewMulti(c1, c2)
if !m.Good() {
t.Fatal("failed to set up multi logger")
}

View File

@@ -8,15 +8,15 @@ type mwc struct {
}
// Write implements the Writer interface.
func (t *mwc) Write(p []byte) (n int, err error) {
func (t *mwc) Write(p []byte) (int, error) {
for _, w := range t.wcs {
n, err = w.Write(p)
n, err := w.Write(p)
if err != nil {
return
return n, err
}
if n != len(p) {
err = io.ErrShortWrite
return
return n, err
}
}
return len(p), nil

View File

@@ -1,10 +1,11 @@
package mwc
package mwc_test
import (
"bytes"
"testing"
"git.wntrmute.dev/kyle/goutils/assert"
"git.wntrmute.dev/kyle/goutils/mwc"
"git.wntrmute.dev/kyle/goutils/testio"
)
@@ -12,7 +13,7 @@ func TestMWC(t *testing.T) {
buf1 := testio.NewBufCloser(nil)
buf2 := testio.NewBufCloser(nil)
mwc := MultiWriteCloser(buf1, buf2)
mwc := mwc.MultiWriteCloser(buf1, buf2)
_, err := mwc.Write([]byte("hello, world"))
assert.NoErrorT(t, err)
@@ -30,15 +31,15 @@ func TestMWCShort(t *testing.T) {
buf3 := testio.NewBrokenWriter(5)
buf4 := testio.NewSilentBrokenWriter(5)
mwc := MultiWriteCloser(buf1, buf2, buf3)
defer mwc.Close()
multiWriter := mwc.MultiWriteCloser(buf1, buf2, buf3)
defer multiWriter.Close()
_, err := mwc.Write([]byte("hello, world"))
_, err := multiWriter.Write([]byte("hello, world"))
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
mwc.Close()
multiWriter.Close()
mwc = MultiWriteCloser(buf1, buf2, buf4)
_, err = mwc.Write([]byte("hello, world"))
multiWriter = mwc.MultiWriteCloser(buf1, buf2, buf4)
_, err = multiWriter.Write([]byte("hello, world"))
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
}
@@ -47,7 +48,7 @@ func TestMWCClose(t *testing.T) {
buf2 := testio.NewBufCloser(nil)
buf3 := testio.NewBrokenCloser(nil)
mwc := MultiWriteCloser(buf1, buf2, buf3)
mwc := mwc.MultiWriteCloser(buf1, buf2, buf3)
_, err := mwc.Write([]byte("hello, world"))
assert.NoErrorT(t, err)

View File

@@ -1,49 +0,0 @@
// Package rand contains utilities for interacting with math/rand, including
// seeding from a random sed.
package rand
import (
"crypto/rand"
"encoding/binary"
mrand "math/rand"
)
// CryptoUint64 generates a cryptographically-secure 64-bit integer.
func CryptoUint64() (uint64, error) {
bs := make([]byte, 8)
_, err := rand.Read(bs)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint64(bs), nil
}
// Seed initialises the non-cryptographic PRNG with a random,
// cryptographically secure value. This is done just as a good
// way to make this random. The returned 64-bit value is the seed.
func Seed() (uint64, error) {
seed, err := CryptoUint64()
if err != nil {
return 0, err
}
// NB: this is permitted.
mrand.Seed(int64(seed))
return seed, nil
}
// Int is a wrapper for math.Int so only one package needs to be imported.
func Int() int {
return mrand.Int()
}
// Intn is a wrapper for math.Intn so only one package needs to be imported.
func Intn(max int) int {
return mrand.Intn(max)
}
// Intn2 returns a random value between min and max, inclusive.
func Intn2(min, max int) int {
return Intn(max-min) + min
}

View File

@@ -1,74 +0,0 @@
package rand
import (
"fmt"
mrand "math/rand"
"testing"
)
func TestCryptoUint64(t *testing.T) {
n1, err := CryptoUint64()
if err != nil {
t.Fatal(err)
}
n2, err := CryptoUint64()
if err != nil {
t.Fatal(err)
}
// This has such a low chance of occurring that it's likely to be
// indicative of a bad CSPRNG.
if n1 == n2 {
t.Fatalf("repeated random uint64s: %d", n1)
}
}
func TestIntn(t *testing.T) {
expected := []int{3081, 4887, 4847, 1059, 3081}
mrand.Seed(1)
for i := 0; i < 5; i++ {
n := Intn2(1000, 5000)
if n != expected[i] {
fmt.Printf("invalid sequence at %d: expected %d, have %d", i, expected[i], n)
}
}
}
func TestSeed(t *testing.T) {
seed1, err := Seed()
if err != nil {
t.Fatal(err)
}
var seed2 uint64
n1 := Int()
tries := 0
for {
seed2, err = Seed()
if err != nil {
t.Fatal(err)
}
if seed1 != seed2 {
break
}
tries++
if tries > 3 {
t.Fatal("can't generate two unique seeds")
}
}
n2 := Int()
// Again, this not impossible, merely statistically improbably and a
// potential canary for RNG issues.
if n1 == n2 {
t.Fatalf("repeated integers fresh from two unique seeds: %d/%d -> %d",
seed1, seed2, n1)
}
}

View File

@@ -15,7 +15,7 @@ func zero(in []byte, n int) {
stop = len(in)
}
for i := 0; i < stop; i++ {
for i := range stop {
in[i] ^= in[i]
}
}
@@ -37,7 +37,10 @@ func NewBuffer(n int) *Buffer {
// original data will be wiped.
func NewBufferFrom(p []byte) *Buffer {
buf := NewBuffer(len(p))
buf.Write(p)
_, err := buf.Write(p)
if err != nil {
return nil
}
zero(p, len(p))
return buf
}
@@ -54,10 +57,7 @@ func (buf *Buffer) Read(p []byte) (int, error) {
return 0, io.EOF
}
copyLength := len(p)
if copyLength > len(buf.buf) {
copyLength = len(buf.buf)
}
copyLength := min(len(p), len(buf.buf))
copy(p, buf.buf)
zero(buf.buf, len(p))
@@ -91,10 +91,7 @@ func (buf *Buffer) Write(p []byte) (int, error) {
r := len(buf.buf) + len(p)
if cap(buf.buf) < r {
l := r
for {
if l > r {
break
}
for l <= r {
l *= 2
}
buf.grow(l - cap(buf.buf))
@@ -107,7 +104,7 @@ func (buf *Buffer) Write(p []byte) (int, error) {
func (buf *Buffer) WriteByte(c byte) error {
r := len(buf.buf) + 1
if cap(buf.buf) < r {
l := r * 2
l := r << 1
buf.grow(l - cap(buf.buf))
}
buf.buf = append(buf.buf, c)
@@ -138,7 +135,7 @@ func (buf *Buffer) Bytes() []byte {
}
p := make([]byte, buf.Len())
buf.Read(p)
_, _ = buf.Read(p)
buf.Close()
return p
}

View File

@@ -1,15 +1,16 @@
package sbuf
package sbuf_test
import (
"bytes"
"crypto/rand"
"testing"
"git.wntrmute.dev/kyle/goutils/sbuf"
"golang.org/x/crypto/nacl/box"
)
var (
buf = &Buffer{}
buf = &sbuf.Buffer{}
testMessage1 = []byte("round and round and round we go, where we stop, no one knows")
testMessage2 = []byte("the deconstruction of falling stars")
)
@@ -113,23 +114,23 @@ func TestShortRead(t *testing.T) {
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(32)
if len(buf.buf) != 0 {
testBuffer := sbuf.NewBuffer(32)
if testBuffer.Len() != 0 {
t.Fatalf("expected new buffer length to be 0, have %d",
len(buf.buf))
testBuffer.Len())
}
if cap(buf.buf) != 32 {
if testBuffer.Cap() != 32 {
t.Fatalf("expected new buffer capacity to be 0, have %d",
cap(buf.buf))
testBuffer.Cap())
}
}
func TestNewBufferFrom(t *testing.T) {
p := make([]byte, len(testMessage1))
copy(p, testMessage1)
buf := NewBufferFrom(p)
if !bytes.Equal(buf.buf, testMessage1) {
testBuffer := sbuf.NewBufferFrom(p)
if !bytes.Equal(testBuffer.Bytes(), testMessage1) {
t.Fatal("new buffer wasn't constructed properly")
}
}
@@ -137,10 +138,10 @@ func TestNewBufferFrom(t *testing.T) {
func TestBytes(t *testing.T) {
p := make([]byte, len(testMessage1))
copy(p, testMessage1)
buf := NewBufferFrom(p)
testBuffer := sbuf.NewBufferFrom(p)
out := buf.Bytes()
if buf.buf != nil {
out := testBuffer.Bytes()
if testBuffer.Len() != 0 {
t.Fatal("buffer was not closed")
}
@@ -148,21 +149,21 @@ func TestBytes(t *testing.T) {
t.Fatal("buffer did not return the right data")
}
out = buf.Bytes()
out = testBuffer.Bytes()
if out != nil {
t.Fatal("a closed buffer should return nil for Bytes")
}
}
func TestRWByte(t *testing.T) {
buf := NewBuffer(0)
testBuffer := sbuf.NewBuffer(0)
c := byte(42)
err := buf.WriteByte(c)
err := testBuffer.WriteByte(c)
if err != nil {
t.Fatalf("%v", err)
}
c, err = buf.ReadByte()
c, err = testBuffer.ReadByte()
if err != nil {
t.Fatalf("%v", err)
}
@@ -171,22 +172,21 @@ func TestRWByte(t *testing.T) {
t.Fatalf("Expected 42, have %d", c)
}
_, err = buf.ReadByte()
_, err = testBuffer.ReadByte()
if err == nil {
t.Fatal("Expected EOF")
}
}
func BenchmarkRead(b *testing.B) {
b.N = 2000
pub, priv, err := box.GenerateKey(rand.Reader)
if err != nil {
b.Fatalf("%v", err)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := buf.Write(priv[:])
for b.Loop() {
_, err = buf.Write(priv[:])
if err != nil {
b.Fatalf("%v", err)
}
@@ -204,11 +204,11 @@ func BenchmarkFixed(b *testing.B) {
b.Fatalf("%v", err)
}
buf = NewBuffer(64 * b.N)
buf = sbuf.NewBuffer(64 * b.N)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := buf.Write(priv[:])
for b.Loop() {
_, err = buf.Write(priv[:])
if err != nil {
b.Fatalf("%v", err)
}

View File

@@ -1,18 +1,20 @@
package seekbuf
package seekbuf_test
import (
"fmt"
"testing"
"git.wntrmute.dev/kyle/goutils/assert"
"git.wntrmute.dev/kyle/goutils/seekbuf"
)
func TestSeeking(t *testing.T) {
partA := []byte("hello, ")
partB := []byte("world!")
buf := New(partA)
assert.BoolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
buf := seekbuf.New(partA)
assert.BoolT(t, buf.Len() == len(partA),
fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
b := make([]byte, 32)
@@ -32,7 +34,8 @@ func TestSeeking(t *testing.T) {
partsLen := len(partA) + len(partB)
buf.Rewind()
assert.BoolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
assert.BoolT(t, buf.Len() == partsLen,
fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
buf.Close()
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))

View File

@@ -17,23 +17,6 @@ type Tee struct {
Verbose bool
}
func (t *Tee) Write(p []byte) (int, error) {
n, err := os.Stdout.Write(p)
if err != nil {
return n, err
}
if t.f != nil {
return t.f.Write(p)
}
return n, nil
}
// Close calls Close on the underlying file.
func (t *Tee) Close() error {
return t.f.Close()
}
// NewOut writes to standard output only. The file is created, not
// appended to.
func NewOut(logFile string) (*Tee, error) {
@@ -48,9 +31,32 @@ func NewOut(logFile string) (*Tee, error) {
return &Tee{f: f}, nil
}
func (t *Tee) Write(p []byte) (int, error) {
n, err := os.Stdout.Write(p)
if err != nil {
return n, err
}
if t.f != nil {
return t.f.Write(p)
}
return n, nil
}
// Close calls Close on the underlying file if present.
// It is safe to call Close on a Tee with no file; in that case, it returns nil.
func (t *Tee) Close() error {
if t == nil || t.f == nil {
return nil
}
err := t.f.Close()
t.f = nil
return err
}
// Printf formats according to a format specifier and writes to the
// tee instance.
func (t *Tee) Printf(format string, args ...interface{}) (int, error) {
func (t *Tee) Printf(format string, args ...any) (int, error) {
s := fmt.Sprintf(format, args...)
n, err := os.Stdout.WriteString(s)
if err != nil {
@@ -66,7 +72,7 @@ func (t *Tee) Printf(format string, args ...interface{}) (int, error) {
// VPrintf is a variant of Printf that only prints if the Tee's
// Verbose flag is set.
func (t *Tee) VPrintf(format string, args ...interface{}) (int, error) {
func (t *Tee) VPrintf(format string, args ...any) (int, error) {
if t.Verbose {
return t.Printf(format, args...)
}
@@ -87,12 +93,12 @@ func Open(logFile string) error {
// Printf formats according to a format specifier and writes to the
// global tee.
func Printf(format string, args ...interface{}) (int, error) {
func Printf(format string, args ...any) (int, error) {
return globalTee.Printf(format, args...)
}
// VPrintf calls VPrintf on the global tee instance.
func VPrintf(format string, args ...interface{}) (int, error) {
func VPrintf(format string, args ...any) (int, error) {
return globalTee.VPrintf(format, args...)
}

197
tee/tee_test.go Normal file
View File

@@ -0,0 +1,197 @@
package tee_test
import (
"io"
"os"
"path/filepath"
"testing"
tee "git.wntrmute.dev/kyle/goutils/tee"
)
// captureStdout redirects os.Stdout for the duration of fn and returns what was written.
func captureStdout(t *testing.T, fn func()) string {
t.Helper()
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("pipe: %v", err)
}
old := os.Stdout
os.Stdout = w
defer func() { os.Stdout = old }()
fn()
// Close writer to unblock reader and restore stdout
_ = w.Close()
b, _ := io.ReadAll(r)
_ = r.Close()
return string(b)
}
func TestNewOutEmpty_WritesToStdoutOnly(t *testing.T) {
teeInst, err := tee.NewOut("")
if err != nil {
t.Fatalf("NewOut: %v", err)
}
out := captureStdout(t, func() {
var n int
if n, err = teeInst.Write([]byte("abc")); err != nil || n != 3 {
t.Fatalf("Write got n=%d err=%v", n, err)
}
if n, err = teeInst.Printf("-%d-", 7); err != nil || n != len("-7-") {
t.Fatalf("Printf got n=%d err=%v", n, err)
}
})
if out != "abc-7-" {
t.Fatalf("stdout = %q, want %q", out, "abc-7-")
}
}
func TestNewOutWithFile_WritesToBoth(t *testing.T) {
dir := t.TempDir()
logPath := filepath.Join(dir, "log.txt")
teeInst, err := tee.NewOut(logPath)
if err != nil {
t.Fatalf("NewOut: %v", err)
}
defer func() { _ = teeInst.Close() }()
out := captureStdout(t, func() {
if _, err = teeInst.Write([]byte("x")); err != nil {
t.Fatalf("Write: %v", err)
}
if _, err = teeInst.Printf("%s", "y"); err != nil {
t.Fatalf("Printf: %v", err)
}
})
if out != "xy" {
t.Fatalf("stdout = %q, want %q", out, "xy")
}
// Close to flush and release the file before reading
if err = teeInst.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
data, err := os.ReadFile(logPath)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if string(data) != "xy" {
t.Fatalf("file content = %q, want %q", string(data), "xy")
}
}
func TestVPrintf_VerboseToggle(t *testing.T) {
teeInst := &tee.Tee{} // stdout only
out := captureStdout(t, func() {
if n, err := teeInst.VPrintf("hello"); err != nil || n != 0 {
t.Fatalf("VPrintf (quiet) got n=%d err=%v", n, err)
}
})
if out != "" {
t.Fatalf("stdout = %q, want empty when not verbose", out)
}
teeInst.Verbose = true
out = captureStdout(t, func() {
if n, err := teeInst.VPrintf("%s", "hello"); err != nil || n != len("hello") {
t.Fatalf("VPrintf (verbose) got n=%d err=%v", n, err)
}
})
if out != "hello" {
t.Fatalf("stdout = %q, want %q", out, "hello")
}
}
func TestWrite_StdoutErrorDoesNotWriteToFile(t *testing.T) {
dir := t.TempDir()
logPath := filepath.Join(dir, "log.txt")
teeInst, err := tee.NewOut(logPath)
if err != nil {
t.Fatalf("NewOut: %v", err)
}
defer func() { _ = teeInst.Close() }()
// Replace stdout with a closed pipe writer to force write error.
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("pipe: %v", err)
}
old := os.Stdout
os.Stdout = w
_ = w.Close() // immediately close to cause EPIPE on write
defer func() {
os.Stdout = old
_ = r.Close()
}()
var n int
if n, err = teeInst.Write([]byte("abc")); err == nil {
t.Fatalf("expected error writing to closed stdout, got n=%d err=nil", n)
}
// Ensure file remained empty because stdout write failed first.
_ = teeInst.Close()
data, err := os.ReadFile(logPath)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if len(data) != 0 {
t.Fatalf("file content = %q, want empty due to stdout failure", string(data))
}
}
func TestGlobal_OpenPrintfVPrintfClose(t *testing.T) {
// Ensure a clean slate for global tee
_ = tee.Close()
tee.SetVerbose(false)
dir := t.TempDir()
logPath := filepath.Join(dir, "glog.txt")
if err := tee.Open(logPath); err != nil {
t.Fatalf("Open: %v", err)
}
out := captureStdout(t, func() {
if _, err := tee.Printf("A"); err != nil {
t.Fatalf("Printf: %v", err)
}
// Not verbose yet, should not print
if n, err := tee.VPrintf("B"); err != nil || n != 0 {
t.Fatalf("VPrintf (quiet) n=%d err=%v", n, err)
}
tee.SetVerbose(true)
if _, err := tee.VPrintf("C%d", 1); err != nil {
t.Fatalf("VPrintf (verbose): %v", err)
}
})
if out != "AC1" {
t.Fatalf("stdout = %q, want %q", out, "AC1")
}
if err := tee.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
data, err := os.ReadFile(logPath)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if string(data) != "AC1" {
t.Fatalf("file content = %q, want %q", string(data), "AC1")
}
// Reset global tee for other tests/packages
_ = tee.Close()
tee.SetVerbose(false)
}

View File

@@ -169,6 +169,26 @@ type BufCloser struct {
buf *bytes.Buffer
}
// NewBufCloser creates and initializes a new BufCloser using buf as
// its initial contents. It is intended to prepare a BufCloser to read
// existing data. It can also be used to size the internal buffer for
// writing. To do that, buf should have the desired capacity but a
// length of zero.
func NewBufCloser(buf []byte) *BufCloser {
bc := new(BufCloser)
bc.buf = bytes.NewBuffer(buf)
return bc
}
// NewBufCloserString creates and initializes a new Buffer using
// string s as its initial contents. It is intended to prepare a
// buffer to read an existing string.
func NewBufCloserString(s string) *BufCloser {
buf := new(BufCloser)
buf.buf = bytes.NewBufferString(s)
return buf
}
// Write writes the data to the BufCloser.
func (buf *BufCloser) Write(p []byte) (int, error) {
return buf.buf.Write(p)
@@ -199,26 +219,6 @@ func (buf *BufCloser) Len() int {
return buf.buf.Len()
}
// NewBufCloser creates and initializes a new BufCloser using buf as
// its initial contents. It is intended to prepare a BufCloser to read
// existing data. It can also be used to size the internal buffer for
// writing. To do that, buf should have the desired capacity but a
// length of zero.
func NewBufCloser(buf []byte) *BufCloser {
bc := new(BufCloser)
bc.buf = bytes.NewBuffer(buf)
return bc
}
// NewBufCloserString creates and initializes a new Buffer using
// string s as its initial contents. It is intended to prepare a
// buffer to read an existing string.
func NewBufCloserString(s string) *BufCloser {
buf := new(BufCloser)
buf.buf = bytes.NewBufferString(s)
return buf
}
// A LoggingBuffer is an io.ReadWriter that prints the hex value of
// the data for all reads and writes.
type LoggingBuffer struct {
@@ -323,6 +323,26 @@ type BrokenCloser struct {
buf *bytes.Buffer
}
// NewBrokenCloser creates and initializes a new BrokenCloser using buf as
// its initial contents. It is intended to prepare a BrokenCloser to read
// existing data. It can also be used to size the internal buffer for
// writing. To do that, buf should have the desired capacity but a
// length of zero.
func NewBrokenCloser(buf []byte) *BrokenCloser {
bc := new(BrokenCloser)
bc.buf = bytes.NewBuffer(buf)
return bc
}
// NewBrokenCloserString creates and initializes a new Buffer using
// string s as its initial contents. It is intended to prepare a
// buffer to read an existing string.
func NewBrokenCloserString(s string) *BrokenCloser {
buf := new(BrokenCloser)
buf.buf = bytes.NewBufferString(s)
return buf
}
// Write writes the data to the BrokenCloser.
func (buf *BrokenCloser) Write(p []byte) (int, error) {
return buf.buf.Write(p)
@@ -347,23 +367,3 @@ func (buf *BrokenCloser) Reset() {
func (buf *BrokenCloser) Bytes() []byte {
return buf.buf.Bytes()
}
// NewBrokenCloser creates and initializes a new BrokenCloser using buf as
// its initial contents. It is intended to prepare a BrokenCloser to read
// existing data. It can also be used to size the internal buffer for
// writing. To do that, buf should have the desired capacity but a
// length of zero.
func NewBrokenCloser(buf []byte) *BrokenCloser {
bc := new(BrokenCloser)
bc.buf = bytes.NewBuffer(buf)
return bc
}
// NewBrokenCloserString creates and initializes a new Buffer using
// string s as its initial contents. It is intended to prepare a
// buffer to read an existing string.
func NewBrokenCloserString(s string) *BrokenCloser {
buf := new(BrokenCloser)
buf.buf = bytes.NewBufferString(s)
return buf
}

View File

@@ -1,13 +1,15 @@
package testio
package testio_test
import (
"bytes"
"os"
"testing"
"git.wntrmute.dev/kyle/goutils/testio"
)
func TestBrokenWriter(t *testing.T) {
buf := NewBrokenWriter(2)
buf := testio.NewBrokenWriter(2)
data := []byte{1, 2}
n, err := buf.Write(data)
@@ -39,7 +41,7 @@ func TestBufCloser(t *testing.T) {
var data = []byte{1, 2}
var read = make([]byte, 2)
buf := NewBufCloser(data)
buf := testio.NewBufCloser(data)
_, err := buf.Read(read)
if err != nil {
t.Fatalf("%v", err)
@@ -54,7 +56,7 @@ func TestBufCloser(t *testing.T) {
buf.Reset()
s := "hi"
buf = NewBufCloserString(s)
buf = testio.NewBufCloserString(s)
read = buf.Bytes()
if string(read) != s {
@@ -65,7 +67,7 @@ func TestBufCloser(t *testing.T) {
func TestLoggingBuffer(t *testing.T) {
src := &bytes.Buffer{}
data := []byte("AB")
lb := NewLoggingBuffer(src)
lb := testio.NewLoggingBuffer(src)
_, err := lb.Write(data)
if err != nil {
t.Fatalf("%v", err)
@@ -82,8 +84,8 @@ func TestLoggingBuffer(t *testing.T) {
}
expected := "[TEST] [WRITE] 4142\n"
if string(out.Bytes()) != expected {
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
if out.String() != expected {
t.Fatalf("expected '%s', have '%s'", expected, out.String())
}
out.Reset()
@@ -96,8 +98,8 @@ func TestLoggingBuffer(t *testing.T) {
}
expected = "[TEST] [READ] 4142\n"
if string(out.Bytes()) != expected {
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
if out.String() != expected {
t.Fatalf("expected '%s', have '%s'", expected, out.String())
}
out.Reset()
@@ -112,8 +114,8 @@ func TestLoggingBuffer(t *testing.T) {
}
expected = "[READ] 4142\n"
if string(out.Bytes()) != expected {
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
if out.String() != expected {
t.Fatalf("expected '%s', have '%s'", expected, out.String())
}
src.Reset()
@@ -124,8 +126,8 @@ func TestLoggingBuffer(t *testing.T) {
}
func TestBrokenReadWriter(t *testing.T) {
brw := NewBrokenReadWriter(0, 0)
lb := NewLoggingBuffer(brw)
brw := testio.NewBrokenReadWriter(0, 0)
lb := testio.NewLoggingBuffer(brw)
var p = make([]byte, 2)
var data = []byte("HI")
@@ -177,7 +179,7 @@ func TestBrokenReadWriter(t *testing.T) {
}
func TestBufferConn(t *testing.T) {
bc := NewBufferConn()
bc := testio.NewBufferConn()
client := []byte("AB")
peer := []byte("XY")

View File

@@ -1,16 +0,0 @@
package testutil
import "io/ioutil"
// TempName generates a new temporary file name. The caller should
// remove the temporary file when done.
func TempName() (string, error) {
tmpf, err := ioutil.TempFile("", "transport_cachedkp_")
if err != nil {
return "", err
}
name := tmpf.Name()
tmpf.Close()
return name, nil
}