Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f463eeed88 | |||
| 289c9d2343 | |||
| e375963243 | |||
| 31baa10b3b | |||
| 0556c7c56d | |||
| 83c95d9db8 | |||
| beccb551e2 | |||
| c761d98b82 | |||
| e68d22337b | |||
| 4cb6f5b6f0 | |||
| 6d5708800f | |||
| fa3eb821e6 | |||
| dd5ed403b9 | |||
| b4fde22c31 | |||
| 9715293773 | |||
| f6d227946b | |||
| 6f7a8fa4d4 | |||
| 622f6a2638 | |||
| e3162b6164 | |||
| 9d1e3ab2f0 | |||
| dd98356479 | |||
| 9307f44601 | |||
| b9f69e4aa1 | |||
| 7a4e7977c3 | |||
| 72fdc255e7 | |||
| 63957ff22a | |||
| 83d42dc489 | |||
| 984baa6bb4 | |||
| 34982c122f | |||
| fb11c0c27c | |||
| 27cc67d2cf | |||
| 0fe13439b6 | |||
| fb9caec663 | |||
| e3e83630b5 | |||
| c1f06604e3 | |||
| 9091cc7682 | |||
| 74ce7bc58a | |||
| 8b2d10209e | |||
| 770100b688 | |||
| 29630c55a9 | |||
| 3c2ec896f8 | |||
| 7828726ba4 | |||
| 458f3ceaed | |||
| 2c45ae7b4e | |||
| c1b8b72cf1 | |||
| bfc7fedbf9 | |||
| 965312f48e | |||
| 237aa46ddd | |||
| f8c64d3be5 |
42
.circleci/config.yml
Normal file
42
.circleci/config.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||
version: 2.1
|
||||
|
||||
# Define a job to be invoked later in a workflow.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||
jobs:
|
||||
testbuild:
|
||||
working_directory: ~/repo
|
||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||
docker:
|
||||
- image: cimg/go:1.22.2
|
||||
# Add steps to the job
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v4-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: Install Dependencies
|
||||
command: go mod download
|
||||
- save_cache:
|
||||
key: go-mod-v4-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- run:
|
||||
name: Run tests
|
||||
command: go test ./...
|
||||
- run:
|
||||
name: Run build
|
||||
command: go build ./...
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
|
||||
# Invoke jobs via workflows
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||
workflows:
|
||||
testbuild:
|
||||
jobs:
|
||||
- testbuild
|
||||
8
.idea/.gitignore
generated
vendored
Normal file
8
.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
9
.idea/goutils.iml
generated
Normal file
9
.idea/goutils.iml
generated
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
8
.idea/modules.xml
generated
Normal file
8
.idea/modules.xml
generated
Normal file
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/goutils.iml" filepath="$PROJECT_DIR$/.idea/goutils.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
||||
6
.idea/vcs.xml
generated
Normal file
6
.idea/vcs.xml
generated
Normal file
@@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
12
BUILD.bazel
12
BUILD.bazel
@@ -1,12 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
|
||||
# gazelle:prefix git.wntrmute.dev/kyle/goutils
|
||||
gazelle(name = "gazelle")
|
||||
|
||||
go_library(
|
||||
name = "goutils",
|
||||
srcs = ["doc.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
33
LICENSE
33
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2015 Kyle Isom <kyle@tyrfingr.is>
|
||||
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -11,3 +11,34 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
=======================================================================
|
||||
The backoff package (written during my time at Cloudflare) is released
|
||||
under the following license:
|
||||
|
||||
Copyright (c) 2016 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
33
README.md
33
README.md
@@ -1,15 +1,18 @@
|
||||
GOUTILS
|
||||
|
||||
### Note: this repo is archived and not being maintained here anymore.
|
||||
|
||||
This is a collection of small utility code I've written in Go; the `cmd/`
|
||||
directory has a number of command-line utilities. Rather than keep all
|
||||
of these in superfluous repositories of their own, I'm putting them here.
|
||||
of these in superfluous repositories of their own, or rewriting them
|
||||
for each project, I'm putting them here.
|
||||
|
||||
The project can be built with the standard Go tooling, or it can be built
|
||||
with Bazel.
|
||||
|
||||
Contents:
|
||||
|
||||
ahash/ Provides hashes from string algorithm specifiers.
|
||||
assert/ Error handling, assertion-style.
|
||||
backoff/ Implementation of an intelligent backoff strategy.
|
||||
cmd/
|
||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||
certchain/ Display the certificate chain from a
|
||||
@@ -25,33 +28,47 @@ Contents:
|
||||
cruntar/ Untar an archive with hard links, copying instead of
|
||||
linking.
|
||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||
data_sync/ Sync the user's homedir to external storage.
|
||||
diskimg/ Write a disk image to a device.
|
||||
eig/ EEPROM image generator.
|
||||
fragment/ Print a fragment of a file.
|
||||
jlp/ JSON linter/prettifier.
|
||||
kgz/ Custom gzip compressor / decompressor that handles 99%
|
||||
of my use cases.
|
||||
parts/ Simple parts database management for my collection of
|
||||
electronic components.
|
||||
pem2bin/ Dump the binary body of a PEM-encoded block.
|
||||
pembody/ Print the body of a PEM certificate.
|
||||
pemit/ Dump data to a PEM file.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
readchain/ Print the common name for the certificates
|
||||
in a bundle.
|
||||
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
||||
rhash/ Compute the digest of remote files.
|
||||
showimp Display the external imports in a package.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
ski Display the SKI for PEM-encoded TLS material.
|
||||
sprox/ Simple TCP proxy.
|
||||
stealchain/ Dump the verified chain from a TLS
|
||||
connection.
|
||||
connection to a server.
|
||||
stealchain- Dump the verified chain from a TLS
|
||||
server/ connection from a client.
|
||||
subjhash/ Print or match subject info from a certificate.
|
||||
tlskeypair/ Check whether a TLS certificate and key file match.
|
||||
utc/ Convert times to UTC.
|
||||
yamll/ A small YAML linter.
|
||||
config/ A simple global configuration system where configuration
|
||||
data is pulled from a file or an environment variable
|
||||
transparently.
|
||||
dbg/ A debug printer.
|
||||
die/ Death of a program.
|
||||
fileutil/ Common file functions.
|
||||
lib/ Commonly-useful functions for writing Go programs.
|
||||
logging/ A logging library.
|
||||
mwc/ MultiwriteCloser implementation.
|
||||
rand/ Utilities for working with math/rand.
|
||||
sbuf/ A byte buffer that can be wiped.
|
||||
seekbuf/ A read-seekable byte buffer.
|
||||
syslog/ Syslog-type logging.
|
||||
tee/ Emulate tee(1)'s functionality in io.Writers.
|
||||
testio/ Various I/O utilities useful during testing.
|
||||
testutil/ Various utility functions useful during testing.
|
||||
@@ -60,4 +77,4 @@ Contents:
|
||||
Each program should have a small README in the directory with more
|
||||
information.
|
||||
|
||||
All code here is licensed under the MIT license.
|
||||
All code here is licensed under the ISC license.
|
||||
|
||||
39
WORKSPACE
39
WORKSPACE
@@ -1,39 +0,0 @@
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
############################################################
|
||||
# Define your own dependencies here using go_repository.
|
||||
# Else, dependencies declared by rules_go/gazelle will be used.
|
||||
# The first declaration of an external repository "wins".
|
||||
############################################################
|
||||
|
||||
load("//:deps.bzl", "go_dependencies")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%go_dependencies
|
||||
go_dependencies()
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(version = "1.20.4")
|
||||
|
||||
gazelle_dependencies()
|
||||
@@ -1,23 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "ahash",
|
||||
srcs = ["ahash.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/ahash",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//assert:go_default_library",
|
||||
"@org_golang_x_crypto//blake2b",
|
||||
"@org_golang_x_crypto//blake2s",
|
||||
"@org_golang_x_crypto//md4",
|
||||
"@org_golang_x_crypto//ripemd160",
|
||||
"@org_golang_x_crypto//sha3",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "ahash_test",
|
||||
srcs = ["ahash_test.go"],
|
||||
embed = [":ahash"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//assert:go_default_library"],
|
||||
)
|
||||
@@ -45,13 +45,6 @@ func sha512Slicer(bs []byte) []byte {
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
var sliceFunctions = map[string]func([]byte) []byte{
|
||||
"sha224": sha224Slicer,
|
||||
"sha256": sha256Slicer,
|
||||
"sha384": sha384Slicer,
|
||||
"sha512": sha512Slicer,
|
||||
}
|
||||
|
||||
// Hash represents a generic hash function that may or may not be secure. It
|
||||
// satisfies the hash.Hash interface.
|
||||
type Hash struct {
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "assert",
|
||||
srcs = ["assert.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/assert",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -94,7 +94,7 @@ func NoError(err error, s ...string) {
|
||||
}
|
||||
|
||||
if nil != err {
|
||||
die(err.Error())
|
||||
die(err.Error(), s...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,5 +170,5 @@ func ErrorEqT(t *testing.T, expected, actual error) {
|
||||
should = fmt.Sprintf("have '%s'", actual)
|
||||
}
|
||||
|
||||
die(fmt.Sprintf("assert.Error2: expected '%s', but %s", expected, should))
|
||||
t.Fatalf("assert.Error2: expected '%s', but %s", expected, should)
|
||||
}
|
||||
|
||||
24
backoff/LICENSE
Normal file
24
backoff/LICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright (c) 2016 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
83
backoff/README.md
Normal file
83
backoff/README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# backoff
|
||||
## Go implementation of "Exponential Backoff And Jitter"
|
||||
|
||||
This package implements the backoff strategy described in the AWS
|
||||
Architecture Blog article
|
||||
["Exponential Backoff And Jitter"](http://www.awsarchitectureblog.com/2015/03/backoff.html). Essentially,
|
||||
the backoff has an interval `time.Duration`; the *n<sup>th</sup>* call
|
||||
to backoff will return an a `time.Duration` that is *2 <sup>n</sup> *
|
||||
interval*. If jitter is enabled (which is the default behaviour), the
|
||||
duration is a random value between 0 and *2 <sup>n</sup> * interval*.
|
||||
The backoff is configured with a maximum duration that will not be
|
||||
exceeded; e.g., by default, the longest duration returned is
|
||||
`backoff.DefaultMaxDuration`.
|
||||
|
||||
## Usage
|
||||
|
||||
A `Backoff` is initialised with a call to `New`. Using zero values
|
||||
causes it to use `DefaultMaxDuration` and `DefaultInterval` as the
|
||||
maximum duration and interval.
|
||||
|
||||
```
|
||||
package something
|
||||
|
||||
import "github.com/cloudflare/backoff"
|
||||
|
||||
func retryable() {
|
||||
b := backoff.New(0, 0)
|
||||
for {
|
||||
err := someOperation()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("error in someOperation: %v", err)
|
||||
<-time.After(b.Duration())
|
||||
}
|
||||
|
||||
log.Printf("succeeded after %d tries", b.Tries()+1)
|
||||
b.Reset()
|
||||
}
|
||||
```
|
||||
|
||||
It can also be used to rate limit code that should retry infinitely, but which does not
|
||||
use `Backoff` itself.
|
||||
|
||||
```
|
||||
package something
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/backoff"
|
||||
)
|
||||
|
||||
func retryable() {
|
||||
b := backoff.New(0, 0)
|
||||
b.SetDecay(30 * time.Second)
|
||||
|
||||
for {
|
||||
// b will reset if someOperation returns later than
|
||||
// the last call to b.Duration() + 30s.
|
||||
err := someOperation()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("error in someOperation: %v", err)
|
||||
<-time.After(b.Duration())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tunables
|
||||
|
||||
* `NewWithoutJitter` creates a Backoff that doesn't use jitter.
|
||||
|
||||
The default behaviour is controlled by two variables:
|
||||
|
||||
* `DefaultInterval` sets the base interval for backoffs created with
|
||||
the zero `time.Duration` value in the `Interval` field.
|
||||
* `DefaultMaxDuration` sets the maximum duration for backoffs created
|
||||
with the zero `time.Duration` value in the `MaxDuration` field.
|
||||
|
||||
197
backoff/backoff.go
Normal file
197
backoff/backoff.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Package backoff contains an implementation of an intelligent backoff
|
||||
// strategy. It is based on the approach in the AWS architecture blog
|
||||
// article titled "Exponential Backoff And Jitter", which is found at
|
||||
// http://www.awsarchitectureblog.com/2015/03/backoff.html.
|
||||
//
|
||||
// Essentially, the backoff has an interval `time.Duration`; the nth
|
||||
// call to backoff will return a `time.Duration` that is 2^n *
|
||||
// interval. If jitter is enabled (which is the default behaviour),
|
||||
// the duration is a random value between 0 and 2^n * interval. The
|
||||
// backoff is configured with a maximum duration that will not be
|
||||
// exceeded.
|
||||
//
|
||||
// The `New` function will attempt to use the system's cryptographic
|
||||
// random number generator to seed a Go math/rand random number
|
||||
// source. If this fails, the package will panic on startup.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
mrand "math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var prngMu sync.Mutex
|
||||
var prng *mrand.Rand
|
||||
|
||||
// DefaultInterval is used when a Backoff is initialised with a
|
||||
// zero-value Interval.
|
||||
var DefaultInterval = 5 * time.Minute
|
||||
|
||||
// DefaultMaxDuration is maximum amount of time that the backoff will
|
||||
// delay for.
|
||||
var DefaultMaxDuration = 6 * time.Hour
|
||||
|
||||
// A Backoff contains the information needed to intelligently backoff
|
||||
// and retry operations using an exponential backoff algorithm. It should
|
||||
// be initialised with a call to `New`.
|
||||
//
|
||||
// Only use a Backoff from a single goroutine, it is not safe for concurrent
|
||||
// access.
|
||||
type Backoff struct {
|
||||
// maxDuration is the largest possible duration that can be
|
||||
// returned from a call to Duration.
|
||||
maxDuration time.Duration
|
||||
|
||||
// interval controls the time step for backing off.
|
||||
interval time.Duration
|
||||
|
||||
// noJitter controls whether to use the "Full Jitter"
|
||||
// improvement to attempt to smooth out spikes in a high
|
||||
// contention scenario. If noJitter is set to true, no
|
||||
// jitter will be introduced.
|
||||
noJitter bool
|
||||
|
||||
// decay controls the decay of n. If it is non-zero, n is
|
||||
// reset if more than the last backoff + decay has elapsed since
|
||||
// the last try.
|
||||
decay time.Duration
|
||||
|
||||
n uint64
|
||||
lastTry time.Time
|
||||
}
|
||||
|
||||
// New creates a new backoff with the specified max duration and
|
||||
// interval. Zero values may be used to use the default values.
|
||||
//
|
||||
// Panics if either max or interval is negative.
|
||||
func New(max time.Duration, interval time.Duration) *Backoff {
|
||||
if max < 0 || interval < 0 {
|
||||
panic("backoff: max or interval is negative")
|
||||
}
|
||||
|
||||
b := &Backoff{
|
||||
maxDuration: max,
|
||||
interval: interval,
|
||||
}
|
||||
b.setup()
|
||||
return b
|
||||
}
|
||||
|
||||
// NewWithoutJitter works similarly to New, except that the created
|
||||
// Backoff will not use jitter.
|
||||
func NewWithoutJitter(max time.Duration, interval time.Duration) *Backoff {
|
||||
b := New(max, interval)
|
||||
b.noJitter = true
|
||||
return b
|
||||
}
|
||||
|
||||
func init() {
|
||||
var buf [8]byte
|
||||
var n int64
|
||||
|
||||
_, err := io.ReadFull(rand.Reader, buf[:])
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
n = int64(binary.LittleEndian.Uint64(buf[:]))
|
||||
|
||||
src := mrand.NewSource(n)
|
||||
prng = mrand.New(src)
|
||||
}
|
||||
|
||||
func (b *Backoff) setup() {
|
||||
if b.interval == 0 {
|
||||
b.interval = DefaultInterval
|
||||
}
|
||||
|
||||
if b.maxDuration == 0 {
|
||||
b.maxDuration = DefaultMaxDuration
|
||||
}
|
||||
}
|
||||
|
||||
// Duration returns a time.Duration appropriate for the backoff,
|
||||
// incrementing the attempt counter.
|
||||
func (b *Backoff) Duration() time.Duration {
|
||||
b.setup()
|
||||
|
||||
b.decayN()
|
||||
|
||||
t := b.duration(b.n)
|
||||
|
||||
if b.n < math.MaxUint64 {
|
||||
b.n++
|
||||
}
|
||||
|
||||
if !b.noJitter {
|
||||
prngMu.Lock()
|
||||
t = time.Duration(prng.Int63n(int64(t)))
|
||||
prngMu.Unlock()
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// requires b to be locked.
|
||||
func (b *Backoff) duration(n uint64) (t time.Duration) {
|
||||
// Saturate pow
|
||||
pow := time.Duration(math.MaxInt64)
|
||||
if n < 63 {
|
||||
pow = 1 << n
|
||||
}
|
||||
|
||||
t = b.interval * pow
|
||||
if t/pow != b.interval || t > b.maxDuration {
|
||||
t = b.maxDuration
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the attempt counter of a backoff.
|
||||
//
|
||||
// It should be called when the rate-limited action succeeds.
|
||||
func (b *Backoff) Reset() {
|
||||
b.lastTry = time.Time{}
|
||||
b.n = 0
|
||||
}
|
||||
|
||||
// SetDecay sets the duration after which the try counter will be reset.
|
||||
// Panics if decay is smaller than 0.
|
||||
//
|
||||
// The decay only kicks in if at least the last backoff + decay has elapsed
|
||||
// since the last try.
|
||||
func (b *Backoff) SetDecay(decay time.Duration) {
|
||||
if decay < 0 {
|
||||
panic("backoff: decay < 0")
|
||||
}
|
||||
|
||||
b.decay = decay
|
||||
}
|
||||
|
||||
// requires b to be locked
|
||||
func (b *Backoff) decayN() {
|
||||
if b.decay == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if b.lastTry.IsZero() {
|
||||
b.lastTry = time.Now()
|
||||
return
|
||||
}
|
||||
|
||||
lastDuration := b.duration(b.n - 1)
|
||||
decayed := time.Since(b.lastTry) > lastDuration+b.decay
|
||||
b.lastTry = time.Now()
|
||||
|
||||
if !decayed {
|
||||
return
|
||||
}
|
||||
|
||||
b.n = 0
|
||||
}
|
||||
175
backoff/backoff_test.go
Normal file
175
backoff/backoff_test.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// If given New with 0's and no jitter, ensure that certain invariants are met:
|
||||
//
|
||||
// - the default max duration and interval should be used
|
||||
// - noJitter should be true
|
||||
// - the RNG should not be initialised
|
||||
// - the first duration should be equal to the default interval
|
||||
func TestDefaults(t *testing.T) {
|
||||
b := NewWithoutJitter(0, 0)
|
||||
|
||||
if b.maxDuration != DefaultMaxDuration {
|
||||
t.Fatalf("expected new backoff to use the default max duration (%s), but have %s", DefaultMaxDuration, b.maxDuration)
|
||||
}
|
||||
|
||||
if b.interval != DefaultInterval {
|
||||
t.Fatalf("exepcted new backoff to use the default interval (%s), but have %s", DefaultInterval, b.interval)
|
||||
}
|
||||
|
||||
if b.noJitter != true {
|
||||
t.Fatal("backoff should have been initialised without jitter")
|
||||
}
|
||||
|
||||
dur := b.Duration()
|
||||
if dur != DefaultInterval {
|
||||
t.Fatalf("expected first duration to be %s, have %s", DefaultInterval, dur)
|
||||
}
|
||||
}
|
||||
|
||||
// Given a zero-value initialised Backoff, it should be transparently
|
||||
// setup.
|
||||
func TestSetup(t *testing.T) {
|
||||
b := new(Backoff)
|
||||
dur := b.Duration()
|
||||
if dur < 0 || dur > (5*time.Minute) {
|
||||
t.Fatalf("want duration between 0 and 5 minutes, have %s", dur)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that tries incremenets as expected.
|
||||
func TestTries(t *testing.T) {
|
||||
b := NewWithoutJitter(5, 1)
|
||||
|
||||
for i := uint64(0); i < 3; i++ {
|
||||
if b.n != i {
|
||||
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
|
||||
}
|
||||
|
||||
pow := 1 << i
|
||||
expected := time.Duration(pow)
|
||||
dur := b.Duration()
|
||||
if dur != expected {
|
||||
t.Fatalf("want duration=%d, have duration=%d at i=%d", expected, dur, i)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint(3); i < 5; i++ {
|
||||
dur := b.Duration()
|
||||
if dur != 5 {
|
||||
t.Fatalf("want duration=5, have %d at i=%d", dur, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a call to Reset will actually reset the Backoff.
|
||||
func TestReset(t *testing.T) {
|
||||
const iter = 10
|
||||
b := New(1000, 1)
|
||||
for i := 0; i < iter; i++ {
|
||||
_ = b.Duration()
|
||||
}
|
||||
|
||||
if b.n != iter {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||
}
|
||||
|
||||
b.Reset()
|
||||
if b.n != 0 {
|
||||
t.Fatalf("expected tries=0 after reset, have tries=%d", b.n)
|
||||
}
|
||||
}
|
||||
|
||||
const decay = 5 * time.Millisecond
|
||||
const max = 10 * time.Millisecond
|
||||
const interval = time.Millisecond
|
||||
|
||||
func TestDecay(t *testing.T) {
|
||||
const iter = 10
|
||||
|
||||
b := NewWithoutJitter(max, 1)
|
||||
b.SetDecay(decay)
|
||||
|
||||
var backoff time.Duration
|
||||
for i := 0; i < iter; i++ {
|
||||
backoff = b.Duration()
|
||||
}
|
||||
|
||||
if b.n != iter {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||
}
|
||||
|
||||
// Don't decay below backoff
|
||||
b.lastTry = time.Now().Add(-backoff + 1)
|
||||
backoff = b.Duration()
|
||||
if b.n != iter+1 {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter+1, b.n)
|
||||
}
|
||||
|
||||
// Reset after backoff + decay
|
||||
b.lastTry = time.Now().Add(-backoff - decay)
|
||||
b.Duration()
|
||||
if b.n != 1 {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", 1, b.n)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that decay works even if the retry counter is saturated.
|
||||
func TestDecaySaturation(t *testing.T) {
|
||||
b := NewWithoutJitter(1<<2, 1)
|
||||
b.SetDecay(decay)
|
||||
|
||||
var duration time.Duration
|
||||
for i := 0; i <= 2; i++ {
|
||||
duration = b.Duration()
|
||||
}
|
||||
|
||||
if duration != 1<<2 {
|
||||
t.Fatalf("expected duration=%v, have duration=%v", 1<<2, duration)
|
||||
}
|
||||
|
||||
b.lastTry = time.Now().Add(-duration - decay)
|
||||
b.n = math.MaxUint64
|
||||
|
||||
duration = b.Duration()
|
||||
if duration != 1 {
|
||||
t.Errorf("expected duration=%v, have duration=%v", 1, duration)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBackoff_SetDecay() {
|
||||
b := NewWithoutJitter(max, interval)
|
||||
b.SetDecay(decay)
|
||||
|
||||
// try 0
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// try 1
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// try 2
|
||||
duration := b.Duration()
|
||||
fmt.Println(duration)
|
||||
|
||||
// try 3, below decay
|
||||
time.Sleep(duration)
|
||||
duration = b.Duration()
|
||||
fmt.Println(duration)
|
||||
|
||||
// try 4, resets
|
||||
time.Sleep(duration + decay)
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// Output: 1ms
|
||||
// 2ms
|
||||
// 4ms
|
||||
// 8ms
|
||||
// 1ms
|
||||
}
|
||||
79
certlib/certerr/errors.go
Normal file
79
certlib/certerr/errors.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package certerr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrEmptyCertificate indicates that a certificate could not be processed
|
||||
// because there was no data to process.
|
||||
var ErrEmptyCertificate = errors.New("certlib: empty certificate")
|
||||
|
||||
type ErrorSourceType uint8
|
||||
|
||||
func (t ErrorSourceType) String() string {
|
||||
switch t {
|
||||
case ErrorSourceCertificate:
|
||||
return "certificate"
|
||||
case ErrorSourcePrivateKey:
|
||||
return "private key"
|
||||
case ErrorSourceCSR:
|
||||
return "CSR"
|
||||
case ErrorSourceSCTList:
|
||||
return "SCT list"
|
||||
case ErrorSourceKeypair:
|
||||
return "TLS keypair"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown error source %d", t))
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
ErrorSourceCertificate ErrorSourceType = 1
|
||||
ErrorSourcePrivateKey ErrorSourceType = 2
|
||||
ErrorSourceCSR ErrorSourceType = 3
|
||||
ErrorSourceSCTList ErrorSourceType = 4
|
||||
ErrorSourceKeypair ErrorSourceType = 5
|
||||
)
|
||||
|
||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
||||
// file, but saw another.
|
||||
type InvalidPEMType struct {
|
||||
have string
|
||||
want []string
|
||||
}
|
||||
|
||||
func (err *InvalidPEMType) Error() string {
|
||||
if len(err.want) == 1 {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||
} else {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
||||
func ErrInvalidPEMType(have string, want ...string) error {
|
||||
return &InvalidPEMType{
|
||||
have: have,
|
||||
want: want,
|
||||
}
|
||||
}
|
||||
|
||||
func LoadingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to load %s from disk: %w", t, err)
|
||||
}
|
||||
|
||||
func ParsingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to parse %s: %w", t, err)
|
||||
}
|
||||
|
||||
func DecodeError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to decode %s: %w", t, err)
|
||||
}
|
||||
|
||||
func VerifyError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to verify %s: %w", t, err)
|
||||
}
|
||||
|
||||
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
||||
85
certlib/certlib.go
Normal file
85
certlib/certlib.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||
// byte slice.
|
||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
||||
if len(in) == 0 {
|
||||
err = certerr.ErrEmptyCertificate
|
||||
return
|
||||
}
|
||||
|
||||
if in[0] == '-' {
|
||||
p, remaining := pem.Decode(in)
|
||||
if p == nil {
|
||||
err = errors.New("certlib: invalid PEM file")
|
||||
return
|
||||
}
|
||||
|
||||
rest = remaining
|
||||
if p.Type != "CERTIFICATE" {
|
||||
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
|
||||
return
|
||||
}
|
||||
|
||||
in = p.Bytes
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(in)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadCertificates tries to read all the certificates in a
|
||||
// PEM-encoded collection.
|
||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
var cert *x509.Certificate
|
||||
for {
|
||||
cert, in, err = ReadCertificate(in)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert)
|
||||
if len(in) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return certs, err
|
||||
}
|
||||
|
||||
// LoadCertificate tries to read a single certificate from disk. If
|
||||
// the file contains multiple certificates (e.g. a chain), only the
|
||||
// first certificate is returned.
|
||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, _, err := ReadCertificate(in)
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// LoadCertificates tries to read all the certificates in a file,
|
||||
// returning them in the order that it found them in the file.
|
||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ReadCertificates(in)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
75
certlib/der_helpers.go
Normal file
75
certlib/der_helpers.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package certlib
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, ECDSA, or Ed25519 DER-encoded
|
||||
// private key. The key must not be in PEM format. If an error is returned, it
|
||||
// may contain information about the private key, so care should be taken when
|
||||
// displaying it directly.
|
||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch generalKey := generalKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case ed25519.PrivateKey:
|
||||
return generalKey, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
||||
}
|
||||
}
|
||||
164
certlib/ed25519.go
Normal file
164
certlib/ed25519.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
var errEd25519WrongID = errors.New("incorrect object identifier")
|
||||
var errEd25519WrongKeyType = errors.New("incorrect key type")
|
||||
|
||||
// ed25519OID is the OID for the Ed25519 signature scheme: see
|
||||
// https://datatracker.ietf.org/doc/draft-ietf-curdle-pkix-04.
|
||||
var ed25519OID = asn1.ObjectIdentifier{1, 3, 101, 112}
|
||||
|
||||
// subjectPublicKeyInfo reflects the ASN.1 object defined in the X.509 standard.
|
||||
//
|
||||
// This is defined in crypto/x509 as "publicKeyInfo".
|
||||
type subjectPublicKeyInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
PublicKey asn1.BitString
|
||||
}
|
||||
|
||||
// MarshalEd25519PublicKey creates a DER-encoded SubjectPublicKeyInfo for an
|
||||
// ed25519 public key, as defined in
|
||||
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. This is analogous to
|
||||
// MarshalPKIXPublicKey in crypto/x509, which doesn't currently support Ed25519.
|
||||
func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
||||
pub, ok := pk.(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
spki := subjectPublicKeyInfo{
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PublicKey: asn1.BitString{
|
||||
BitLength: len(pub) * 8,
|
||||
Bytes: pub,
|
||||
},
|
||||
}
|
||||
|
||||
return asn1.Marshal(spki)
|
||||
}
|
||||
|
||||
// ParseEd25519PublicKey returns the Ed25519 public key encoded by the input.
|
||||
func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
||||
var spki subjectPublicKeyInfo
|
||||
if rest, err := asn1.Unmarshal(der, &spki); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("SubjectPublicKeyInfo too long")
|
||||
}
|
||||
|
||||
if !spki.Algorithm.Algorithm.Equal(ed25519OID) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||
}
|
||||
|
||||
return ed25519.PublicKey(spki.PublicKey.Bytes), nil
|
||||
}
|
||||
|
||||
// oneAsymmetricKey reflects the ASN.1 structure for storing private keys in
|
||||
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04, excluding the optional
|
||||
// fields, which we don't use here.
|
||||
//
|
||||
// This is identical to pkcs8 in crypto/x509.
|
||||
type oneAsymmetricKey struct {
|
||||
Version int
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
// curvePrivateKey is the innter type of the PrivateKey field of
|
||||
// oneAsymmetricKey.
|
||||
type curvePrivateKey []byte
|
||||
|
||||
// MarshalEd25519PrivateKey returns a DER encoding of the input private key as
|
||||
// specified in https://tools.ietf.org/html/draft-ietf-curdle-pkix-04.
|
||||
func MarshalEd25519PrivateKey(sk crypto.PrivateKey) ([]byte, error) {
|
||||
priv, ok := sk.(ed25519.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
// Marshal the innter CurvePrivateKey.
|
||||
curvePrivateKey, err := asn1.Marshal(priv.Seed())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Marshal the OneAsymmetricKey.
|
||||
asym := oneAsymmetricKey{
|
||||
Version: 0,
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PrivateKey: curvePrivateKey,
|
||||
}
|
||||
return asn1.Marshal(asym)
|
||||
}
|
||||
|
||||
// ParseEd25519PrivateKey returns the Ed25519 private key encoded by the input.
|
||||
func ParseEd25519PrivateKey(der []byte) (crypto.PrivateKey, error) {
|
||||
asym := new(oneAsymmetricKey)
|
||||
if rest, err := asn1.Unmarshal(der, asym); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("OneAsymmetricKey too long")
|
||||
}
|
||||
|
||||
// Check that the key type is correct.
|
||||
if !asym.Algorithm.Algorithm.Equal(ed25519OID) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
// Unmarshal the inner CurvePrivateKey.
|
||||
seed := new(curvePrivateKey)
|
||||
if rest, err := asn1.Unmarshal(asym.PrivateKey, seed); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("CurvePrivateKey too long")
|
||||
}
|
||||
|
||||
return ed25519.NewKeyFromSeed(*seed), nil
|
||||
}
|
||||
630
certlib/helpers.go
Normal file
630
certlib/helpers.go
Normal file
@@ -0,0 +1,630 @@
|
||||
package certlib
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
|
||||
ct "github.com/google/certificate-transparency-go"
|
||||
cttls "github.com/google/certificate-transparency-go/tls"
|
||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
const OneYear = 8760 * time.Hour
|
||||
|
||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||
const OneDay = 24 * time.Hour
|
||||
|
||||
// DelegationUsage is the OID for the DelegationUseage extensions
|
||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||
|
||||
// DelegationExtension
|
||||
var DelegationExtension = pkix.Extension{
|
||||
Id: DelegationUsage,
|
||||
Critical: false,
|
||||
Value: []byte{0x05, 0x00}, // ASN.1 NULL
|
||||
}
|
||||
|
||||
// InclusiveDate returns the time.Time representation of a date - 1
|
||||
// nanosecond. This allows time.After to be used inclusively.
|
||||
func InclusiveDate(year int, month time.Month, day int) time.Time {
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||
}
|
||||
|
||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 5 years.
|
||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
||||
|
||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 39 months.
|
||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
||||
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
||||
func KeyLength(key interface{}) int {
|
||||
if key == nil {
|
||||
return 0
|
||||
}
|
||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
||||
return ecdsaKey.Curve.Params().BitSize
|
||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
||||
return rsaKey.N.BitLen()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExpiryTime returns the time when the certificate chain is expired.
|
||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
||||
if len(chain) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
notAfter = chain[0].NotAfter
|
||||
for _, cert := range chain {
|
||||
if notAfter.After(cert.NotAfter) {
|
||||
notAfter = cert.NotAfter
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MonthsValid returns the number of months for which a certificate is valid.
|
||||
func MonthsValid(c *x509.Certificate) int {
|
||||
issued := c.NotBefore
|
||||
expiry := c.NotAfter
|
||||
years := (expiry.Year() - issued.Year())
|
||||
months := years*12 + int(expiry.Month()) - int(issued.Month())
|
||||
|
||||
// Round up if valid for less than a full month
|
||||
if expiry.Day() > issued.Day() {
|
||||
months++
|
||||
}
|
||||
return months
|
||||
}
|
||||
|
||||
// ValidExpiry determines if a certificate is valid for an acceptable
|
||||
// length of time per the CA/Browser Forum baseline requirements.
|
||||
// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
|
||||
func ValidExpiry(c *x509.Certificate) bool {
|
||||
issued := c.NotBefore
|
||||
|
||||
var maxMonths int
|
||||
switch {
|
||||
case issued.After(Apr2015):
|
||||
maxMonths = 39
|
||||
case issued.After(Jul2012):
|
||||
maxMonths = 60
|
||||
case issued.Before(Jul2012):
|
||||
maxMonths = 120
|
||||
}
|
||||
|
||||
if MonthsValid(c) > maxMonths {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2WithRSA"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5WithRSA"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1WithRSA"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256WithRSA"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384WithRSA"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512WithRSA"
|
||||
case x509.DSAWithSHA1:
|
||||
return "DSAWithSHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "DSAWithSHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "ECDSAWithSHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "ECDSAWithSHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "ECDSAWithSHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "ECDSAWithSHA512"
|
||||
default:
|
||||
return "Unknown Signature"
|
||||
}
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512"
|
||||
case x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return "Unknown Hash Algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
// StringTLSVersion returns underlying enum values from human names for TLS
|
||||
// versions, defaults to current golang default of TLS 1.0
|
||||
func StringTLSVersion(version string) uint16 {
|
||||
switch version {
|
||||
case "1.2":
|
||||
return tls.VersionTLS12
|
||||
case "1.1":
|
||||
return tls.VersionTLS11
|
||||
default:
|
||||
return tls.VersionTLS10
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
|
||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||
var buffer bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
pem.Encode(&buffer, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
})
|
||||
}
|
||||
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM
|
||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||
}
|
||||
|
||||
// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
||||
var certs []*x509.Certificate
|
||||
var err error
|
||||
certsPEM = bytes.TrimSpace(certsPEM)
|
||||
for len(certsPEM) > 0 {
|
||||
var cert []*x509.Certificate
|
||||
cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
} else if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert...)
|
||||
}
|
||||
if len(certsPEM) > 0 {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||
// either PKCS #7, PKCS #12, or raw x509.
|
||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
||||
certsDER = bytes.TrimSpace(certsDER)
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
||||
if err != nil {
|
||||
var pkcs12data interface{}
|
||||
certs = make([]*x509.Certificate, 1)
|
||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
||||
if err != nil {
|
||||
certs, err = x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
} else {
|
||||
key = pkcs12data.(crypto.Signer)
|
||||
}
|
||||
} else {
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
|
||||
}
|
||||
certs = pkcs7data.Content.SignedData.Certificates
|
||||
}
|
||||
if certs == nil {
|
||||
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||
}
|
||||
return certs, key, nil
|
||||
}
|
||||
|
||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||
func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
cert, err := ParseCertificatePEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// ParseCertificatePEM parses and returns a PEM-encoded certificate,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
certPEM = bytes.TrimSpace(certPEM)
|
||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
} else if cert == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||
} else if len(rest) > 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
|
||||
} else if len(cert) > 1 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
||||
}
|
||||
return cert[0], nil
|
||||
}
|
||||
|
||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||
// either a raw x509 certificate or a PKCS #7 structure possibly containing
|
||||
// multiple certificates, from the top of certsPEM, which itself may
|
||||
// contain multiple PEM encoded certificate objects.
|
||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||
|
||||
block, rest := pem.Decode(certsPEM)
|
||||
if block == nil {
|
||||
return nil, rest, nil
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
|
||||
}
|
||||
certs := pkcs7data.Content.SignedData.Certificates
|
||||
if certs == nil {
|
||||
return nil, rest, errors.New("PKCS #7 structure contains no certificates")
|
||||
}
|
||||
return certs, rest, nil
|
||||
}
|
||||
var certs = []*x509.Certificate{cert}
|
||||
return certs, rest, nil
|
||||
}
|
||||
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
if certsFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
pemCerts, err := os.ReadFile(certsFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PEMToCertPool(pemCerts)
|
||||
}
|
||||
|
||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
if len(pemCerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||
return nil, errors.New("failed to load cert pool")
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParsePrivateKeyDER(keyDER)
|
||||
}
|
||||
|
||||
// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
|
||||
func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
||||
// Ignore any EC PARAMETERS blocks when looking for a key (openssl includes
|
||||
// them by default).
|
||||
var keyDER *pem.Block
|
||||
for {
|
||||
keyDER, in = pem.Decode(in)
|
||||
if keyDER == nil || keyDER.Type != "EC PARAMETERS" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if keyDER != nil {
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
||||
if strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
}
|
||||
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
}
|
||||
|
||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
||||
in = bytes.TrimSpace(in)
|
||||
p, rest := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
|
||||
}
|
||||
|
||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
||||
} else {
|
||||
csr, err = x509.ParseCertificateRequest(in)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
err = csr.CheckSignature()
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
// ParseCSRPEM parses a PEM-encoded certificate signing request.
|
||||
// It does not check the signature. This is useful for dumping data from a CSR
|
||||
// locally.
|
||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
block, _ := pem.Decode([]byte(csrPEM))
|
||||
if block == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||
}
|
||||
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return csrObject, nil
|
||||
}
|
||||
|
||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
switch pub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
bitLength := pub.N.BitLen()
|
||||
switch {
|
||||
case bitLength >= 4096:
|
||||
return x509.SHA512WithRSA
|
||||
case bitLength >= 3072:
|
||||
return x509.SHA384WithRSA
|
||||
case bitLength >= 2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P521():
|
||||
return x509.ECDSAWithSHA512
|
||||
case elliptic.P384():
|
||||
return x509.ECDSAWithSHA384
|
||||
case elliptic.P256():
|
||||
return x509.ECDSAWithSHA256
|
||||
default:
|
||||
return x509.ECDSAWithSHA1
|
||||
}
|
||||
default:
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// LoadClientCertificate load key/certificate from pem files
|
||||
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
||||
if certFile != "" && keyFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, certerr.LoadingError(certerr.ErrorSourceKeypair, err)
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config object from certs and roots
|
||||
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
||||
var certs []tls.Certificate
|
||||
if cert != nil {
|
||||
certs = []tls.Certificate{*cert}
|
||||
}
|
||||
return &tls.Config{
|
||||
Certificates: certs,
|
||||
RootCAs: remoteCAs,
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeSCTList serializes a list of SCTs.
|
||||
func SerializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
|
||||
list := ctx509.SignedCertificateTimestampList{}
|
||||
for _, sct := range sctList {
|
||||
sctBytes, err := cttls.Marshal(sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes})
|
||||
}
|
||||
return cttls.Marshal(list)
|
||||
}
|
||||
|
||||
// DeserializeSCTList deserializes a list of SCTs.
|
||||
func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) {
|
||||
var sctList ctx509.SignedCertificateTimestampList
|
||||
rest, err := cttls.Unmarshal(serializedSCTList, &sctList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
}
|
||||
|
||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||
for i, serializedSCT := range sctList.SCTList {
|
||||
var sct ct.SignedCertificateTimestamp
|
||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
}
|
||||
list[i] = sct
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// SCTListFromOCSPResponse extracts the SCTList from an ocsp.Response,
|
||||
// returning an empty list if the SCT extension was not found or could not be
|
||||
// unmarshalled.
|
||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||
// This loop finds the SCTListExtension in the OCSP response.
|
||||
var SCTListExtension, ext pkix.Extension
|
||||
for _, ext = range response.Extensions {
|
||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||
if ext.Id.Equal(sctExtOid) {
|
||||
SCTListExtension = ext
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// This code block extracts the sctList from the SCT extension.
|
||||
var sctList []ct.SignedCertificateTimestamp
|
||||
var err error
|
||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
||||
var serializedSCTList []byte
|
||||
rest := make([]byte, numBytes)
|
||||
copy(rest, SCTListExtension.Value)
|
||||
for len(rest) != 0 {
|
||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, err)
|
||||
}
|
||||
}
|
||||
sctList, err = DeserializeSCTList(serializedSCTList)
|
||||
}
|
||||
return sctList, err
|
||||
}
|
||||
|
||||
// ReadBytes reads a []byte either from a file or an environment variable.
|
||||
// If valFile has a prefix of 'env:', the []byte is read from the environment
|
||||
// using the subsequent name. If the prefix is 'file:' the []byte is read from
|
||||
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
||||
// file path.
|
||||
func ReadBytes(valFile string) ([]byte, error) {
|
||||
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
|
||||
case 1:
|
||||
return os.ReadFile(valFile)
|
||||
case 2:
|
||||
switch splitVal[0] {
|
||||
case "env":
|
||||
return []byte(os.Getenv(splitVal[1])), nil
|
||||
case "file":
|
||||
return os.ReadFile(splitVal[1])
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("multiple prefixes: %s",
|
||||
strings.Join(splitVal[:len(splitVal)-1], ", "))
|
||||
}
|
||||
}
|
||||
82
certlib/hosts/hosts.go
Normal file
82
certlib/hosts/hosts.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package hosts
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Target struct {
|
||||
Host string
|
||||
Port int
|
||||
}
|
||||
|
||||
func (t *Target) String() string {
|
||||
return fmt.Sprintf("%s:%d", t.Host, t.Port)
|
||||
}
|
||||
|
||||
func parseURL(host string) (string, int, error) {
|
||||
url, err := url.Parse(host)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
}
|
||||
|
||||
if strings.ToLower(url.Scheme) != "https" {
|
||||
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
||||
}
|
||||
|
||||
if url.Port() == "" {
|
||||
return url.Hostname(), 443, nil
|
||||
}
|
||||
|
||||
port, err := strconv.ParseInt(url.Port(), 10, 16)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
||||
}
|
||||
|
||||
return url.Hostname(), int(port), nil
|
||||
}
|
||||
|
||||
func parseHostPort(host string) (string, int, error) {
|
||||
host, sport, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
port, err := strconv.ParseInt(sport, 10, 16)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||
}
|
||||
|
||||
return host, int(port), nil
|
||||
}
|
||||
|
||||
return host, 443, nil
|
||||
}
|
||||
|
||||
func ParseHost(host string) (*Target, error) {
|
||||
host, port, err := parseURL(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
}
|
||||
|
||||
host, port, err = parseHostPort(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
}
|
||||
|
||||
func ParseHosts(hosts ...string) ([]*Target, error) {
|
||||
targets := make([]*Target, 0, len(hosts))
|
||||
for _, host := range hosts {
|
||||
target, err := ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targets = append(targets, target)
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
220
certlib/pkcs7/pkcs7.go
Normal file
220
certlib/pkcs7/pkcs7.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package pkcs7
|
||||
|
||||
// Originally from CFSSL, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
|
||||
// used to package certificates and CRLs. Using openssl, every certificate converted
|
||||
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
|
||||
// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html
|
||||
//
|
||||
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
|
||||
//
|
||||
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
|
||||
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
|
||||
// sent over a network and then verified and decrypted. It is asn1, and the type of
|
||||
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
|
||||
//
|
||||
// ContentInfo ::= SEQUENCE {
|
||||
// contentType ContentType,
|
||||
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
|
||||
// }
|
||||
//
|
||||
// There are 6 possible ContentTypes, data, signedData, envelopedData,
|
||||
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
|
||||
// Data are implemented, as the degenerate case of signedData without a signature is the typical
|
||||
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
|
||||
// formats.
|
||||
// The ContentType signedData has the form:
|
||||
//
|
||||
// signedData ::= SEQUENCE {
|
||||
// version Version,
|
||||
// digestAlgorithms DigestAlgorithmIdentifiers,
|
||||
// contentInfo ContentInfo,
|
||||
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
|
||||
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
|
||||
// signerInfos SignerInfos
|
||||
// }
|
||||
//
|
||||
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
|
||||
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
|
||||
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
|
||||
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
|
||||
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
|
||||
// of any number of extended certificates is not yet supported in this implementation.
|
||||
//
|
||||
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
|
||||
//
|
||||
// The ContentType encryptedData is the most complicated and its form can be gathered by
|
||||
// the go type below. It essentially contains a raw octet string of encrypted data and an
|
||||
// algorithm identifier for use in decrypting this data.
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// Types used for asn1 Unmarshaling.
|
||||
|
||||
type signedData struct {
|
||||
Version int
|
||||
DigestAlgorithms asn1.RawValue
|
||||
ContentInfo asn1.RawValue
|
||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
||||
Crls asn1.RawValue `asn1:"optional"`
|
||||
SignerInfos asn1.RawValue
|
||||
}
|
||||
|
||||
type initPKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
|
||||
}
|
||||
|
||||
// Object identifier strings of the three implemented PKCS7 types.
|
||||
const (
|
||||
ObjIDData = "1.2.840.113549.1.7.1"
|
||||
ObjIDSignedData = "1.2.840.113549.1.7.2"
|
||||
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
|
||||
)
|
||||
|
||||
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
|
||||
// possible types of Content objects, as denoted by the object identifier in
|
||||
// the ContentInfo field, the other two being nil. SignedData
|
||||
// is the degenerate SignedData Content info without signature used
|
||||
// to hold certificates and crls. Data is raw bytes, and EncryptedData
|
||||
// is as defined in PKCS #7 standard.
|
||||
type PKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentInfo string
|
||||
Content Content
|
||||
}
|
||||
|
||||
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
|
||||
type Content struct {
|
||||
Data []byte
|
||||
SignedData SignedData
|
||||
EncryptedData EncryptedData
|
||||
}
|
||||
|
||||
// SignedData defines the typical carrier of certificates and crls.
|
||||
type SignedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
Certificates []*x509.Certificate
|
||||
Crl *x509.RevocationList
|
||||
}
|
||||
|
||||
// Data contains raw bytes. Used as a subtype in PKCS12.
|
||||
type Data struct {
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
|
||||
type EncryptedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
EncryptedContentInfo EncryptedContentInfo
|
||||
}
|
||||
|
||||
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
|
||||
type EncryptedContentInfo struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||
// PKCS7 structure.
|
||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
||||
|
||||
var pkcs7 initPKCS7
|
||||
_, err = asn1.Unmarshal(raw, &pkcs7)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
|
||||
msg = new(PKCS7)
|
||||
msg.Raw = pkcs7.Raw
|
||||
msg.ContentInfo = pkcs7.ContentType.String()
|
||||
switch {
|
||||
case msg.ContentInfo == ObjIDData:
|
||||
msg.ContentInfo = "Data"
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
case msg.ContentInfo == ObjIDSignedData:
|
||||
msg.ContentInfo = "SignedData"
|
||||
var signedData signedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
if len(signedData.Certificates.Bytes) != 0 {
|
||||
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
}
|
||||
if len(signedData.Crls.Bytes) != 0 {
|
||||
msg.Content.SignedData.Crl, err = x509.ParseRevocationList(signedData.Crls.Bytes)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
}
|
||||
msg.Content.SignedData.Version = signedData.Version
|
||||
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
|
||||
case msg.ContentInfo == ObjIDEncryptedData:
|
||||
msg.ContentInfo = "EncryptedData"
|
||||
var encryptedData EncryptedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
if encryptedData.Version != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
|
||||
}
|
||||
msg.Content.EncryptedData = encryptedData
|
||||
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
}
|
||||
363
certlib/revoke/revoke.go
Normal file
363
certlib/revoke/revoke.go
Normal file
@@ -0,0 +1,363 @@
|
||||
// Package revoke provides functionality for checking the validity of
|
||||
// a cert. Specifically, the temporal validity of the certificate is
|
||||
// checked first, then any CRL and OCSP url in the cert is checked.
|
||||
package revoke
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/log"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// HTTPClient is an instance of http.Client that will be used for all HTTP requests.
|
||||
var HTTPClient = http.DefaultClient
|
||||
|
||||
// HardFail determines whether the failure to check the revocation
|
||||
// status of a certificate (i.e. due to network failure) causes
|
||||
// verification to fail (a hard failure).
|
||||
var HardFail = false
|
||||
|
||||
// CRLSet associates a PKIX certificate list with the URL the CRL is
|
||||
// fetched from.
|
||||
var CRLSet = map[string]*x509.RevocationList{}
|
||||
var crlLock = new(sync.Mutex)
|
||||
|
||||
// We can't handle LDAP certificates, so this checks to see if the
|
||||
// URL string points to an LDAP resource so that we can ignore it.
|
||||
func ldapURL(url string) bool {
|
||||
u, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
log.Warningf("error parsing url %s: %v", url, err)
|
||||
return false
|
||||
}
|
||||
if u.Scheme == "ldap" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// revCheck should check the certificate for any revocations. It
|
||||
// returns a pair of booleans: the first indicates whether the certificate
|
||||
// is revoked, the second indicates whether the revocations were
|
||||
// successfully checked.. This leads to the following combinations:
|
||||
//
|
||||
// - false, false: an error was encountered while checking revocations.
|
||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||
// - true, false: failure to check revocation status causes verification to fail
|
||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
for _, url := range cert.CRLDistributionPoints {
|
||||
if ldapURL(url) {
|
||||
log.Infof("skipping LDAP CRL: %s", url)
|
||||
continue
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
||||
log.Warning("error checking revocation via CRL")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
log.Info("certificate is revoked via CRL")
|
||||
return true, true, err
|
||||
}
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
||||
log.Warning("error checking revocation via OCSP")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
log.Info("certificate is revoked via OCSP")
|
||||
return true, true, err
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// fetchCRL fetches and parses a CRL.
|
||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 300 {
|
||||
return nil, errors.New("failed to retrieve CRL")
|
||||
}
|
||||
|
||||
body, err := crlRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseRevocationList(body)
|
||||
}
|
||||
|
||||
func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
||||
var issuer *x509.Certificate
|
||||
var err error
|
||||
for _, issuingCert := range cert.IssuingCertificateURL {
|
||||
issuer, err = fetchRemote(issuingCert)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return issuer
|
||||
|
||||
}
|
||||
|
||||
// check a cert against a specific CRL. Returns the same bool pair
|
||||
// as revCheck, plus an error if one occurred.
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
||||
crlLock.Lock()
|
||||
crl, ok := CRLSet[url]
|
||||
if ok && crl == nil {
|
||||
ok = false
|
||||
delete(CRLSet, url)
|
||||
}
|
||||
crlLock.Unlock()
|
||||
|
||||
var shouldFetchCRL = true
|
||||
if ok {
|
||||
if time.Now().After(crl.ThisUpdate) {
|
||||
shouldFetchCRL = false
|
||||
}
|
||||
}
|
||||
|
||||
issuer := getIssuer(cert)
|
||||
|
||||
if shouldFetchCRL {
|
||||
var err error
|
||||
crl, err = fetchCRL(url)
|
||||
if err != nil {
|
||||
log.Warningf("failed to fetch CRL: %v", err)
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// check CRL signature
|
||||
if issuer != nil {
|
||||
err = crl.CheckSignatureFrom(issuer)
|
||||
if err != nil {
|
||||
log.Warningf("failed to verify CRL: %v", err)
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
|
||||
crlLock.Lock()
|
||||
CRLSet[url] = crl
|
||||
crlLock.Unlock()
|
||||
}
|
||||
|
||||
for _, revoked := range crl.RevokedCertificates {
|
||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
||||
log.Info("Serial number match: intermediate is revoked.")
|
||||
return true, true, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, true, err
|
||||
}
|
||||
|
||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
||||
revoked, ok, _ = VerifyCertificateError(cert)
|
||||
return revoked, ok
|
||||
}
|
||||
|
||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
if !time.Now().Before(cert.NotAfter) {
|
||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||
log.Info(msg)
|
||||
return true, true, fmt.Errorf(msg)
|
||||
} else if !time.Now().After(cert.NotBefore) {
|
||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||
log.Info(msg)
|
||||
return true, true, fmt.Errorf(msg)
|
||||
}
|
||||
return revCheck(cert)
|
||||
}
|
||||
|
||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
in, err := remoteRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, _ := pem.Decode(in)
|
||||
if p != nil {
|
||||
return certlib.ParseCertificatePEM(in)
|
||||
}
|
||||
|
||||
return x509.ParseCertificate(in)
|
||||
}
|
||||
|
||||
var ocspOpts = ocsp.RequestOptions{
|
||||
Hash: crypto.SHA1,
|
||||
}
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
||||
var err error
|
||||
|
||||
ocspURLs := leaf.OCSPServer
|
||||
if len(ocspURLs) == 0 {
|
||||
// OCSP not enabled for this certificate.
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
issuer := getIssuer(leaf)
|
||||
|
||||
if issuer == nil {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||
if err != nil {
|
||||
return revoked, ok, err
|
||||
}
|
||||
|
||||
for _, server := range ocspURLs {
|
||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if err != nil {
|
||||
if strict {
|
||||
return revoked, ok, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// There wasn't an error fetching the OCSP status.
|
||||
ok = true
|
||||
|
||||
if resp.Status != ocsp.Good {
|
||||
// The certificate was revoked.
|
||||
revoked = true
|
||||
}
|
||||
|
||||
return revoked, ok, err
|
||||
}
|
||||
return revoked, ok, err
|
||||
}
|
||||
|
||||
// sendOCSPRequest attempts to request an OCSP response from the
|
||||
// server. The error only indicates a failure to *fetch* the
|
||||
// certificate, and *does not* mean the certificate is valid.
|
||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
if len(req) > 256 {
|
||||
buf := bytes.NewBuffer(req)
|
||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
||||
} else {
|
||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||
resp, err = HTTPClient.Get(reqURL)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New("failed to retrieve OSCP")
|
||||
}
|
||||
|
||||
body, err := ocspRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case bytes.Equal(body, ocsp.UnauthorizedErrorResponse):
|
||||
return nil, errors.New("OSCP unauthorized")
|
||||
case bytes.Equal(body, ocsp.MalformedRequestErrorResponse):
|
||||
return nil, errors.New("OSCP malformed")
|
||||
case bytes.Equal(body, ocsp.InternalErrorErrorResponse):
|
||||
return nil, errors.New("OSCP internal error")
|
||||
case bytes.Equal(body, ocsp.TryLaterErrorResponse):
|
||||
return nil, errors.New("OSCP try later")
|
||||
case bytes.Equal(body, ocsp.SigRequredErrorResponse):
|
||||
return nil, errors.New("OSCP signature required")
|
||||
}
|
||||
|
||||
return ocsp.ParseResponseForCert(body, leaf, issuer)
|
||||
}
|
||||
|
||||
var crlRead = io.ReadAll
|
||||
|
||||
// SetCRLFetcher sets the function to use to read from the http response body
|
||||
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
crlRead = fn
|
||||
}
|
||||
|
||||
var remoteRead = io.ReadAll
|
||||
|
||||
// SetRemoteFetcher sets the function to use to read from the http response body
|
||||
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
remoteRead = fn
|
||||
}
|
||||
|
||||
var ocspRead = io.ReadAll
|
||||
|
||||
// SetOCSPFetcher sets the function to use to read from the http response body
|
||||
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
ocspRead = fn
|
||||
}
|
||||
262
certlib/revoke/revoke_test.go
Normal file
262
certlib/revoke/revoke_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package revoke
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// The first three test cases represent known revoked, expired, and good
|
||||
// certificates that were checked on the date listed in the log. The
|
||||
// good certificate will eventually need to be replaced in year 2029.
|
||||
|
||||
// If there is a soft-fail, the test will pass to mimic the default
|
||||
// behaviour used in this software. However, it will print a warning
|
||||
// to indicate that this is the case.
|
||||
|
||||
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
|
||||
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
||||
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
||||
cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
|
||||
b2JhbCBSb290MB4XDTA3MDQwNDE0MTUxNFoXDTE0MDQwNDE0MTQyMFowejELMAkG
|
||||
A1UEBhMCSVQxFzAVBgNVBAoTDkFjdGFsaXMgUy5wLkEuMScwJQYDVQQLEx5DZXJ0
|
||||
aWZpY2F0aW9uIFNlcnZpY2UgUHJvdmlkZXIxKTAnBgNVBAMTIEFjdGFsaXMgU2Vy
|
||||
dmVyIEF1dGhlbnRpY2F0aW9uIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAv6P0bhXbUQkVW8ox0HJ+sP5+j6pTwS7yg/wGEUektB/G1duQiT1v21fo
|
||||
LANr6F353jILQDCpHIfal3MhbSsHEMKU7XaqsyLWV93bcIKbIloS/eXDfkog6KB3
|
||||
u0JHgrtNz584Jg/OLm9feffNbCJ38TiLo0/UWkAQ6PQWaOwZEgyKjVI5F3swoTB3
|
||||
g0LZAzegvkU00Kfp13cSg+cJeU4SajwtfQ+g6s6dlaekaHy/0ef46PfiHHRuhEhE
|
||||
JWIpDtUN2ywTT33MSSUe5glDIiXYfcamJQrebzGsHEwyqI195Yaxb+FLNND4n3HM
|
||||
e7EI2OrLyT+r/WMvQbl+xNihwtv+HwIDAQABo4IBbzCCAWswEgYDVR0TAQH/BAgw
|
||||
BgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1o
|
||||
dHRwOi8vd3d3LnB1YmxpYy10cnVzdC5jb20vQ1BTL09tbmlSb290Lmh0bWwwDgYD
|
||||
VR0PAQH/BAQDAgEGMIGJBgNVHSMEgYEwf6F5pHcwdTELMAkGA1UEBhMCVVMxGDAW
|
||||
BgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3Qg
|
||||
U29sdXRpb25zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwg
|
||||
Um9vdIICAaUwRQYDVR0fBD4wPDA6oDigNoY0aHR0cDovL3d3dy5wdWJsaWMtdHJ1
|
||||
c3QuY29tL2NnaS1iaW4vQ1JMLzIwMTgvY2RwLmNybDAdBgNVHQ4EFgQUpi6OuXYt
|
||||
oxHC3cTezVLuraWpAFEwDQYJKoZIhvcNAQEFBQADgYEAAtjJBwjsvw7DBs+v7BQz
|
||||
gSGeg6nbYUuPL7+1driT5XsUKJ7WZjiwW2zW/WHZ+zGo1Ev8Dc574RpSrg/EIlfH
|
||||
TpBiBuFgiKtJksKdoxPZGSI8FitwcgeW+y8wotmm0CtDzWN27g2kfSqHb5eHfZY5
|
||||
sESPRwHkcMUNdAp37FLweUw=
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
||||
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
|
||||
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
||||
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
||||
Um9vdFNpZ24gUGFydG5lcnMgQ0ExGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
|
||||
CzAJBgNVBAYTAkJFMB4XDTA4MDMxODEyMDAwMFoXDTE4MDMxODEyMDAwMFowJTEj
|
||||
MCEGA1UEAxMaTW9iaWxlIEFybW9yIEVudGVycHJpc2UgQ0EwggEiMA0GCSqGSIb3
|
||||
DQEBAQUAA4IBDwAwggEKAoIBAQCaEjeDR73jSZVlacRn5bc5VIPdyouHvGIBUxyS
|
||||
C6483HgoDlWrWlkEndUYFjRPiQqJFthdJxfglykXD+btHixMIYbz/6eb7hRTdT9w
|
||||
HKsfH+wTBIdb5AZiNjkg3QcCET5HfanJhpREjZWP513jM/GSrG3VwD6X5yttCIH1
|
||||
NFTDAr7aqpW/UPw4gcPfkwS92HPdIkb2DYnsqRrnKyNValVItkxJiotQ1HOO3YfX
|
||||
ivGrHIbJdWYg0rZnkPOgYF0d+aIA4ZfwvdW48+r/cxvLevieuKj5CTBZZ8XrFt8r
|
||||
JTZhZljbZvnvq/t6ZIzlwOj082f+lTssr1fJ3JsIPnG2lmgTAgMBAAGjgfcwgfQw
|
||||
DgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFIZw
|
||||
ns4uzXdLX6xDRXUzFgZxWM7oME0GA1UdIARGMEQwQgYJKwYBBAGgMgE8MDUwMwYI
|
||||
KwYBBQUHAgIwJxolaHR0cDovL3d3dy5nbG9iYWxzaWduLmNvbS9yZXBvc2l0b3J5
|
||||
LzA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmdsb2JhbHNpZ24ubmV0L1Jv
|
||||
b3RTaWduUGFydG5lcnMuY3JsMB8GA1UdIwQYMBaAFFaE7LVxpedj2NtRBNb65vBI
|
||||
UknOMA0GCSqGSIb3DQEBBQUAA4IBAQBZvf+2xUJE0ekxuNk30kPDj+5u9oI3jZyM
|
||||
wvhKcs7AuRAbcxPtSOnVGNYl8By7DPvPun+U3Yci8540y143RgD+kz3jxIBaoW/o
|
||||
c4+X61v6DBUtcBPEt+KkV6HIsZ61SZmc/Y1I2eoeEt6JYoLjEZMDLLvc1cK/+wpg
|
||||
dUZSK4O9kjvIXqvsqIOlkmh/6puSugTNao2A7EIQr8ut0ZmzKzMyZ0BuQhJDnAPd
|
||||
Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
|
||||
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
|
||||
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
||||
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
||||
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
||||
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
|
||||
BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTQwMjEy
|
||||
MDAwMDAwWhcNMjkwMjExMjM1OTU5WjCBkDELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
|
||||
EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
|
||||
Q09NT0RPIENBIExpbWl0ZWQxNjA0BgNVBAMTLUNPTU9ETyBSU0EgRG9tYWluIFZh
|
||||
bGlkYXRpb24gU2VjdXJlIFNlcnZlciBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAI7CAhnhoFmk6zg1jSz9AdDTScBkxwtiBUUWOqigwAwCfx3M28Sh
|
||||
bXcDow+G+eMGnD4LgYqbSRutA776S9uMIO3Vzl5ljj4Nr0zCsLdFXlIvNN5IJGS0
|
||||
Qa4Al/e+Z96e0HqnU4A7fK31llVvl0cKfIWLIpeNs4TgllfQcBhglo/uLQeTnaG6
|
||||
ytHNe+nEKpooIZFNb5JPJaXyejXdJtxGpdCsWTWM/06RQ1A/WZMebFEh7lgUq/51
|
||||
UHg+TLAchhP6a5i84DuUHoVS3AOTJBhuyydRReZw3iVDpA3hSqXttn7IzW3uLh0n
|
||||
c13cRTCAquOyQQuvvUSH2rnlG51/ruWFgqUCAwEAAaOCAWUwggFhMB8GA1UdIwQY
|
||||
MBaAFLuvfgI9+qbxPISOre44mOzZMjLUMB0GA1UdDgQWBBSQr2o6lFoL2JDqElZz
|
||||
30O0Oija5zAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNV
|
||||
HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgG
|
||||
BmeBDAECATBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8vY3JsLmNvbW9kb2NhLmNv
|
||||
bS9DT01PRE9SU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDBxBggrBgEFBQcB
|
||||
AQRlMGMwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29tb2RvY2EuY29tL0NPTU9E
|
||||
T1JTQUFkZFRydXN0Q0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21v
|
||||
ZG9jYS5jb20wDQYJKoZIhvcNAQEMBQADggIBAE4rdk+SHGI2ibp3wScF9BzWRJ2p
|
||||
mj6q1WZmAT7qSeaiNbz69t2Vjpk1mA42GHWx3d1Qcnyu3HeIzg/3kCDKo2cuH1Z/
|
||||
e+FE6kKVxF0NAVBGFfKBiVlsit2M8RKhjTpCipj4SzR7JzsItG8kO3KdY3RYPBps
|
||||
P0/HEZrIqPW1N+8QRcZs2eBelSaz662jue5/DJpmNXMyYE7l3YphLG5SEXdoltMY
|
||||
dVEVABt0iN3hxzgEQyjpFv3ZBdRdRydg1vs4O2xyopT4Qhrf7W8GjEXCBgCq5Ojc
|
||||
2bXhc3js9iPc0d1sjhqPpepUfJa3w/5Vjo1JXvxku88+vZbrac2/4EjxYoIQ5QxG
|
||||
V/Iz2tDIY+3GH5QFlkoakdH368+PUq4NCNk+qKBR6cGHdNXJ93SrLlP7u3r7l+L4
|
||||
HyaPs9Kg4DdbKDsx5Q5XLVq4rXmsXiBmGqW5prU5wfWYQ//u+aen/e7KJD2AFsQX
|
||||
j4rBYKEMrltDR5FL1ZoXX/nUh8HCjLfn4g8wGTeGrODcQgPmlKidrv0PJFGUzpII
|
||||
0fxQ8ANAe4hZ7Q7drNJ3gjTcBpUC2JD5Leo31Rpg0Gcg19hCC0Wvgmje3WYkN5Ap
|
||||
lBlGGSW4gNfL1IYoakRwJiNiqZ+Gb7+6kHDSVneFeO/qJakXzlByjAA6quPbYzSf
|
||||
+AZxAeKCINT+b72x
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
var goodCert = mustParse(goodComodoCA)
|
||||
|
||||
func mustParse(pemData string) *x509.Certificate {
|
||||
block, _ := pem.Decode([]byte(pemData))
|
||||
if block == nil {
|
||||
panic("Invalid PEM data.")
|
||||
} else if block.Type != "CERTIFICATE" {
|
||||
panic("Invalid PEM type.")
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return cert
|
||||
}
|
||||
|
||||
func TestRevoked(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(revokedCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if !revoked {
|
||||
t.Fatalf("revoked certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpired(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(expiredCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if !revoked {
|
||||
t.Fatalf("expired certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(goodCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if revoked {
|
||||
t.Fatalf("good certificate should not have been marked as revoked")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLdap(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints = append(ldapCert.CRLDistributionPoints, "ldap://myldap.example.com")
|
||||
if revoked, ok := VerifyCertificate(ldapCert); revoked || !ok {
|
||||
t.Fatalf("ldap certificate should have been recognized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLdapURLErr(t *testing.T) {
|
||||
if ldapURL(":") {
|
||||
t.Fatalf("bad url does not cause error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertNotYetValid(t *testing.T) {
|
||||
notReadyCert := expiredCert
|
||||
notReadyCert.NotBefore = time.Date(3000, time.January, 1, 1, 1, 1, 1, time.Local)
|
||||
notReadyCert.NotAfter = time.Date(3005, time.January, 1, 1, 1, 1, 1, time.Local)
|
||||
if revoked, _ := VerifyCertificate(expiredCert); !revoked {
|
||||
t.Fatalf("not yet verified certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRLFetchError(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints[0] = ""
|
||||
if revoked, ok := VerifyCertificate(ldapCert); ok || revoked {
|
||||
t.Fatalf("Fetching error not encountered")
|
||||
}
|
||||
HardFail = true
|
||||
if revoked, ok := VerifyCertificate(ldapCert); ok || !revoked {
|
||||
t.Fatalf("Fetching error not encountered, hardfail not registered")
|
||||
}
|
||||
HardFail = false
|
||||
}
|
||||
|
||||
func TestBadCRLSet(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints[0] = ""
|
||||
CRLSet[""] = nil
|
||||
certIsRevokedCRL(ldapCert, "")
|
||||
if _, ok := CRLSet[""]; ok {
|
||||
t.Fatalf("key emptystring should be deleted from CRLSet")
|
||||
}
|
||||
delete(CRLSet, "")
|
||||
|
||||
}
|
||||
|
||||
func TestCachedCRLSet(t *testing.T) {
|
||||
VerifyCertificate(goodCert)
|
||||
if revoked, ok := VerifyCertificate(goodCert); !ok || revoked {
|
||||
t.Fatalf("Previously fetched CRL's should be read smoothly and unrevoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteFetchError(t *testing.T) {
|
||||
|
||||
badurl := ":"
|
||||
|
||||
if _, err := fetchRemote(badurl); err == nil {
|
||||
t.Fatalf("fetching bad url should result in non-nil error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNoOCSPServers(t *testing.T) {
|
||||
badIssuer := goodCert
|
||||
badIssuer.IssuingCertificateURL = []string{" "}
|
||||
certIsRevokedOCSP(badIssuer, true)
|
||||
noOCSPCert := goodCert
|
||||
noOCSPCert.OCSPServer = make([]string, 0)
|
||||
if revoked, ok, _ := certIsRevokedOCSP(noOCSPCert, true); revoked || !ok {
|
||||
t.Fatalf("OCSP falsely registered as enabled for this certificate")
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "atping_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/atping",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "atping",
|
||||
embed = [":atping_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
40
cmd/ca-signed/README.txt
Normal file
40
cmd/ca-signed/README.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
ca-signed: verify certificates against a CA
|
||||
-------------------------------------------
|
||||
|
||||
Description
|
||||
ca-signed verifies whether one or more certificates are signed by a given
|
||||
Certificate Authority (CA). It prints a concise status per input certificate
|
||||
along with the certificate’s expiration date when validation succeeds.
|
||||
|
||||
Usage
|
||||
ca-signed CA.pem cert1.pem [cert2.pem ...]
|
||||
|
||||
- CA.pem: A file containing one or more CA certificates in PEM, DER, or PKCS#7/PKCS#12 formats.
|
||||
- certN.pem: A file containing the end-entity (leaf) certificate to verify. If the file contains a chain,
|
||||
the first certificate is treated as the leaf and the remaining ones are used as intermediates.
|
||||
|
||||
Output format
|
||||
For each input certificate file, one line is printed:
|
||||
<filename>: OK (expires YYYY-MM-DD)
|
||||
<filename>: INVALID
|
||||
|
||||
Special self-test mode
|
||||
ca-signed selftest
|
||||
|
||||
Runs a built-in test suite using embedded certificates. This mode requires no
|
||||
external files or network access. The program exits with code 0 if all tests
|
||||
pass, or a non-zero exit code if any test fails. Example output lines include
|
||||
whether validation succeeds and the leaf’s expiration when applicable.
|
||||
|
||||
Examples
|
||||
# Verify a server certificate against a root CA
|
||||
ca-signed isrg-root-x1.pem le-e7.pem
|
||||
|
||||
# Run the embedded self-test suite
|
||||
ca-signed selftest
|
||||
|
||||
Notes
|
||||
- The tool attempts to parse certificates in PEM first, then falls back to
|
||||
DER/PKCS#7/PKCS#12 (with an empty password) where applicable.
|
||||
- Expiration is shown for the leaf certificate only.
|
||||
- In selftest mode, test certificates are compiled into the binary using go:embed.
|
||||
287
cmd/ca-signed/main.go
Normal file
287
cmd/ca-signed/main.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
)
|
||||
|
||||
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
||||
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
||||
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
||||
// Try PEM via helper (it builds a pool)
|
||||
if pool, err := certlib.LoadPEMCertPool(path); err == nil && pool != nil {
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// Fallback: read as DER(s), add to a new pool
|
||||
certs, err := loadCertsFromFile(path)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, fmt.Errorf("failed to load CA certificates from %s", path)
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
//go:embed testdata/*.pem
|
||||
var embeddedTestdata embed.FS
|
||||
|
||||
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
||||
// PEM or DER/PKCS#7 format.
|
||||
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
||||
certs, err := loadCertsFromBytes(data)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, fmt.Errorf("failed to load CA certificates from embedded bytes")
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// isSelfSigned returns true if the given certificate is self-signed.
|
||||
// It checks that the subject and issuer match and that the certificate's
|
||||
// signature verifies against its own public key.
|
||||
func isSelfSigned(cert *x509.Certificate) bool {
|
||||
if cert == nil {
|
||||
return false
|
||||
}
|
||||
// Quick check: subject and issuer match
|
||||
if cert.Subject.String() != cert.Issuer.String() {
|
||||
return false
|
||||
}
|
||||
// Cryptographic check: the certificate is signed by itself
|
||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string) {
|
||||
certs, err := loadCertsFromFile(path)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
leaf := certs[0]
|
||||
ints := x509.NewCertPool()
|
||||
if len(certs) > 1 {
|
||||
for _, ic := range certs[1:] {
|
||||
ints.AddCert(ic)
|
||||
}
|
||||
}
|
||||
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: caPool,
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expiry string) {
|
||||
certs, err := loadCertsFromBytes(certData)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
leaf := certs[0]
|
||||
ints := x509.NewCertPool()
|
||||
if len(certs) > 1 {
|
||||
for _, ic := range certs[1:] {
|
||||
ints.AddCert(ic)
|
||||
}
|
||||
}
|
||||
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: caPool,
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
// selftest runs built-in validation using embedded certificates.
|
||||
func selftest() int {
|
||||
type testCase struct {
|
||||
name string
|
||||
caFile string
|
||||
certFile string
|
||||
expectOK bool
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
{name: "ISRG Root X1 validates LE E7", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/le-e7.pem", expectOK: true},
|
||||
{name: "ISRG Root X1 does NOT validate Google WR2", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/goog-wr2.pem", expectOK: false},
|
||||
{name: "GTS R1 validates Google WR2", caFile: "testdata/gts-r1.pem", certFile: "testdata/goog-wr2.pem", expectOK: true},
|
||||
{name: "GTS R1 does NOT validate LE E7", caFile: "testdata/gts-r1.pem", certFile: "testdata/le-e7.pem", expectOK: false},
|
||||
}
|
||||
|
||||
failures := 0
|
||||
for _, tc := range cases {
|
||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.caFile, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.certFile, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
pool, err := makePoolFromBytes(caBytes)
|
||||
if err != nil || pool == nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to build CA pool for %s: %v\n", tc.caFile, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||
if ok != tc.expectOK {
|
||||
fmt.Printf("%s: unexpected result: got %v, want %v\n", tc.name, ok, tc.expectOK)
|
||||
failures++
|
||||
} else {
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that both embedded root CAs are detected as self-signed
|
||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||
for _, root := range roots {
|
||||
b, err := embeddedTestdata.ReadFile(root)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certs, err := loadCertsFromBytes(b)
|
||||
if err != nil || len(certs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
leaf := certs[0]
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||
} else {
|
||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
|
||||
if failures == 0 {
|
||||
fmt.Println("selftest: PASS")
|
||||
return 0
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||
return 1
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Special selftest mode: single argument "selftest"
|
||||
if len(os.Args) == 2 && os.Args[1] == "selftest" {
|
||||
os.Exit(selftest())
|
||||
}
|
||||
|
||||
if len(os.Args) < 3 {
|
||||
prog := filepath.Base(os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, "Usage:\n %s ca.pem cert1.pem cert2.pem ...\n %s selftest\n", prog, prog)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
caPath := os.Args[1]
|
||||
caPool, err := makePoolFromFile(caPath)
|
||||
if err != nil || caPool == nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to load CA certificate(s): %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, certPath := range os.Args[2:] {
|
||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||
name := filepath.Base(certPath)
|
||||
// Load the leaf once for self-signed detection and potential expiry fallback
|
||||
var leaf *x509.Certificate
|
||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||
leaf = certs[0]
|
||||
}
|
||||
|
||||
// If the certificate is self-signed, prefer the SELF-SIGNED label
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Display with the requested format
|
||||
// Example: file: OK (expires 2031-01-01)
|
||||
// Ensure deterministic date formatting
|
||||
// Note: no timezone displayed; date only as per example
|
||||
// If exp ended up empty for some reason, recompute safely
|
||||
if exp == "" {
|
||||
if leaf != nil {
|
||||
exp = leaf.NotAfter.Format("2006-01-02")
|
||||
} else {
|
||||
// fallback to the current date to avoid empty; though shouldn't happen
|
||||
exp = time.Now().Format("2006-01-02")
|
||||
}
|
||||
}
|
||||
fmt.Printf("%s: OK (expires %s)\n", name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
29
cmd/ca-signed/testdata/goog-wr2.pem
vendored
Normal file
29
cmd/ca-signed/testdata/goog-wr2.pem
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFCzCCAvOgAwIBAgIQf/AFoHxM3tEArZ1mpRB7mDANBgkqhkiG9w0BAQsFADBH
|
||||
MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
|
||||
QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMjMxMjEzMDkwMDAwWhcNMjkwMjIw
|
||||
MTQwMDAwWjA7MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNl
|
||||
cnZpY2VzMQwwCgYDVQQDEwNXUjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQCp/5x/RR5wqFOfytnlDd5GV1d9vI+aWqxG8YSau5HbyfsvAfuSCQAWXqAc
|
||||
+MGr+XgvSszYhaLYWTwO0xj7sfUkDSbutltkdnwUxy96zqhMt/TZCPzfhyM1IKji
|
||||
aeKMTj+xWfpgoh6zySBTGYLKNlNtYE3pAJH8do1cCA8Kwtzxc2vFE24KT3rC8gIc
|
||||
LrRjg9ox9i11MLL7q8Ju26nADrn5Z9TDJVd06wW06Y613ijNzHoU5HEDy01hLmFX
|
||||
xRmpC5iEGuh5KdmyjS//V2pm4M6rlagplmNwEmceOuHbsCFx13ye/aoXbv4r+zgX
|
||||
FNFmp6+atXDMyGOBOozAKql2N87jAgMBAAGjgf4wgfswDgYDVR0PAQH/BAQDAgGG
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/
|
||||
AgEAMB0GA1UdDgQWBBTeGx7teRXUPjckwyG77DQ5bUKyMDAfBgNVHSMEGDAWgBTk
|
||||
rysmcRorSCeFL1JmLO/wiRNxPjA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAKG
|
||||
GGh0dHA6Ly9pLnBraS5nb29nL3IxLmNydDArBgNVHR8EJDAiMCCgHqAchhpodHRw
|
||||
Oi8vYy5wa2kuZ29vZy9yL3IxLmNybDATBgNVHSAEDDAKMAgGBmeBDAECATANBgkq
|
||||
hkiG9w0BAQsFAAOCAgEARXWL5R87RBOWGqtY8TXJbz3S0DNKhjO6V1FP7sQ02hYS
|
||||
TL8Tnw3UVOlIecAwPJQl8hr0ujKUtjNyC4XuCRElNJThb0Lbgpt7fyqaqf9/qdLe
|
||||
SiDLs/sDA7j4BwXaWZIvGEaYzq9yviQmsR4ATb0IrZNBRAq7x9UBhb+TV+PfdBJT
|
||||
DhEl05vc3ssnbrPCuTNiOcLgNeFbpwkuGcuRKnZc8d/KI4RApW//mkHgte8y0YWu
|
||||
ryUJ8GLFbsLIbjL9uNrizkqRSvOFVU6xddZIMy9vhNkSXJ/UcZhjJY1pXAprffJB
|
||||
vei7j+Qi151lRehMCofa6WBmiA4fx+FOVsV2/7R6V2nyAiIJJkEd2nSi5SnzxJrl
|
||||
Xdaqev3htytmOPvoKWa676ATL/hzfvDaQBEcXd2Ppvy+275W+DKcH0FBbX62xevG
|
||||
iza3F4ydzxl6NJ8hk8R+dDXSqv1MbRT1ybB5W0k8878XSOjvmiYTDIfyc9acxVJr
|
||||
Y/cykHipa+te1pOhv7wYPYtZ9orGBV5SGOJm4NrB3K1aJar0RfzxC3ikr7Dyc6Qw
|
||||
qDTBU39CluVIQeuQRgwG3MuSxl7zRERDRilGoKb8uY45JzmxWuKxrfwT/478JuHU
|
||||
/oTxUFqOl2stKnn7QGTq8z29W+GgBLCXSBxC9epaHM0myFH/FJlniXJfHeytWt0=
|
||||
-----END CERTIFICATE-----
|
||||
31
cmd/ca-signed/testdata/gts-r1.pem
vendored
Normal file
31
cmd/ca-signed/testdata/gts-r1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
|
||||
MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
|
||||
MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
|
||||
Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
|
||||
A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
|
||||
27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
|
||||
Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
|
||||
TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
|
||||
qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
|
||||
szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
|
||||
Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
|
||||
MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
|
||||
wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
|
||||
aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
|
||||
VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
|
||||
AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||
FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
|
||||
C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
|
||||
QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
|
||||
h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
|
||||
7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
|
||||
ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
|
||||
MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
|
||||
Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
|
||||
6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
|
||||
0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
|
||||
2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
|
||||
bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
|
||||
-----END CERTIFICATE-----
|
||||
31
cmd/ca-signed/testdata/isrg-root-x1.pem
vendored
Normal file
31
cmd/ca-signed/testdata/isrg-root-x1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
||||
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
||||
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
||||
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
||||
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
||||
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
||||
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
||||
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
||||
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
||||
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
||||
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
||||
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
||||
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
||||
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
||||
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
||||
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
||||
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
||||
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
||||
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
||||
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
||||
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
||||
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
||||
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
||||
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
||||
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
||||
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
||||
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||
-----END CERTIFICATE-----
|
||||
26
cmd/ca-signed/testdata/le-e7.pem
vendored
Normal file
26
cmd/ca-signed/testdata/le-e7.pem
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw
|
||||
WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
|
||||
RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST
|
||||
CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef
|
||||
QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw
|
||||
gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
|
||||
ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4
|
||||
wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB
|
||||
AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g
|
||||
BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu
|
||||
Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD
|
||||
aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF
|
||||
h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG
|
||||
yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr
|
||||
OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o
|
||||
yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S
|
||||
M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ
|
||||
UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq
|
||||
Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I
|
||||
tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ
|
||||
YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty
|
||||
+VUwFj9tmWxyR/M=
|
||||
-----END CERTIFICATE-----
|
||||
148
cmd/cert-bundler/README.txt
Normal file
148
cmd/cert-bundler/README.txt
Normal file
@@ -0,0 +1,148 @@
|
||||
cert-bundler: create certificate chain archives
|
||||
------------------------------------------------
|
||||
|
||||
Description
|
||||
cert-bundler creates archives of certificate chains from a YAML configuration
|
||||
file. It validates certificates, checks expiration dates, and generates
|
||||
archives in multiple formats (zip, tar.gz) with optional manifest files
|
||||
containing SHA256 checksums.
|
||||
|
||||
Usage
|
||||
cert-bundler [options]
|
||||
|
||||
Options:
|
||||
-c <file> Path to YAML configuration file (default: bundle.yaml)
|
||||
-o <dir> Output directory for archives (default: pkg)
|
||||
|
||||
YAML Configuration Format
|
||||
|
||||
The configuration file uses the following structure:
|
||||
|
||||
config:
|
||||
hashes: <filename>
|
||||
expiry: <duration>
|
||||
chains:
|
||||
<group_name>:
|
||||
certs:
|
||||
- root: <path>
|
||||
intermediates:
|
||||
- <path>
|
||||
- <path>
|
||||
- root: <path>
|
||||
intermediates:
|
||||
- <path>
|
||||
outputs:
|
||||
include_single: <bool>
|
||||
include_individual: <bool>
|
||||
manifest: <bool>
|
||||
encoding: <encoding>
|
||||
formats:
|
||||
- <format>
|
||||
- <format>
|
||||
|
||||
Configuration Fields
|
||||
|
||||
config:
|
||||
hashes: (optional) Name of the file to write SHA256 checksums of all
|
||||
generated archives. If omitted, no hash file is created.
|
||||
expiry: (optional) Expiration warning threshold. Certificates expiring
|
||||
within this period will trigger a warning. Supports formats like
|
||||
"1y" (year), "6m" (month), "30d" (day). Default: 1y
|
||||
|
||||
chains:
|
||||
Each key under "chains" defines a named certificate group. All certificates
|
||||
in a group are bundled together into archives with the group name.
|
||||
|
||||
certs:
|
||||
List of certificate chains. Each chain has:
|
||||
root: Path to root CA certificate (PEM or DER format)
|
||||
intermediates: List of paths to intermediate certificates
|
||||
|
||||
All intermediates are validated against their root CA. An error is
|
||||
reported if signature verification fails.
|
||||
|
||||
outputs:
|
||||
Defines output formats and content for the group's archives:
|
||||
|
||||
include_single: (bool) If true, all certificates in the group are
|
||||
concatenated into a single file named "bundle.pem"
|
||||
(or "bundle.crt" for DER encoding).
|
||||
|
||||
include_individual: (bool) If true, each certificate is included as
|
||||
a separate file in the archive, named after the
|
||||
original file (e.g., "int/cca2.pem" becomes
|
||||
"cca2.pem").
|
||||
|
||||
manifest: (bool) If true, a MANIFEST file is included containing
|
||||
SHA256 checksums of all files in the archive.
|
||||
|
||||
encoding: Specifies certificate encoding in the archive:
|
||||
- "pem": PEM format with .pem extension (default)
|
||||
- "der": DER format with .crt extension
|
||||
- "both": Both PEM and DER versions are included
|
||||
|
||||
formats: List of archive formats to generate:
|
||||
- "zip": Creates a .zip archive
|
||||
- "tgz": Creates a .tar.gz archive
|
||||
|
||||
Output Files
|
||||
|
||||
For each group and format combination, an archive is created:
|
||||
<group_name>.zip or <group_name>.tar.gz
|
||||
|
||||
If config.hashes is specified, a hash file is created in the output directory
|
||||
containing SHA256 checksums of all generated archives.
|
||||
|
||||
Example Configuration
|
||||
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: roots/core-ca.pem
|
||||
intermediates:
|
||||
- int/cca1.pem
|
||||
- int/cca2.pem
|
||||
- int/cca3.pem
|
||||
- root: roots/ssh-ca.pem
|
||||
intermediates:
|
||||
- ssh/ssh_dmz1.pem
|
||||
- ssh/ssh_internal.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
encoding: pem
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
|
||||
This configuration:
|
||||
- Creates core_certs.zip and core_certs.tar.gz in the output directory
|
||||
- Each archive contains bundle.pem (all certificates concatenated)
|
||||
- Each archive contains individual certificates (core-ca.pem, cca1.pem, etc.)
|
||||
- Each archive includes a MANIFEST file with SHA256 checksums
|
||||
- Creates bundle.sha256 with checksums of the two archives
|
||||
- Warns if any certificate expires within 1 year
|
||||
|
||||
Examples
|
||||
|
||||
# Create bundles using default configuration (bundle.yaml -> pkg/)
|
||||
cert-bundler
|
||||
|
||||
# Use custom configuration and output directory
|
||||
cert-bundler -c myconfig.yaml -o output
|
||||
|
||||
# Create bundles from testdata configuration
|
||||
cert-bundler -c testdata/bundle.yaml -o testdata/pkg
|
||||
|
||||
Notes
|
||||
- Certificate paths in the YAML are relative to the current working directory
|
||||
- All intermediates must be properly signed by their specified root CA
|
||||
- Certificates are checked for expiration; warnings are printed to stderr
|
||||
- Expired certificates do not prevent archive creation but generate warnings
|
||||
- Both PEM and DER certificate formats are supported as input
|
||||
- Archive filenames use the group name, not individual chain names
|
||||
- If both include_single and include_individual are true, archives contain both
|
||||
489
cmd/cert-bundler/main.go
Normal file
489
cmd/cert-bundler/main.go
Normal file
@@ -0,0 +1,489 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
_ "embed"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Config represents the top-level YAML configuration
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
Expiry string `yaml:"expiry"`
|
||||
} `yaml:"config"`
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
Manifest bool `yaml:"manifest"`
|
||||
Formats []string `yaml:"formats"`
|
||||
Encoding string `yaml:"encoding"`
|
||||
}
|
||||
|
||||
var (
|
||||
configFile string
|
||||
outputDir string
|
||||
)
|
||||
|
||||
var formatExtensions = map[string]string{
|
||||
"zip": ".zip",
|
||||
"tgz": ".tar.gz",
|
||||
}
|
||||
|
||||
//go:embed README.txt
|
||||
var readmeContent string
|
||||
|
||||
func usage() {
|
||||
fmt.Fprint(os.Stderr, readmeContent)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.StringVar(&configFile, "c", "bundle.yaml", "path to YAML configuration file")
|
||||
flag.StringVar(&outputDir, "o", "pkg", "output directory for archives")
|
||||
flag.Parse()
|
||||
|
||||
if configFile == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: configuration file required (-c flag)\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Load and parse configuration
|
||||
cfg, err := loadConfig(configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse expiry duration (default 1 year)
|
||||
expiryDuration := 365 * 24 * time.Hour
|
||||
if cfg.Config.Expiry != "" {
|
||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Create output directory if it doesn't exist
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Process each chain group
|
||||
createdFiles := []string{}
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, err := processChainGroup(groupName, group, expiryDuration)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
}
|
||||
|
||||
// Generate hash file for all created archives
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if err := generateHashFile(hashFile, createdFiles); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Certificate bundling completed successfully")
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Support simple formats like "1y", "6m", "30d"
|
||||
if len(s) < 2 {
|
||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||
}
|
||||
|
||||
unit := s[len(s)-1]
|
||||
value := s[:len(s)-1]
|
||||
|
||||
var multiplier time.Duration
|
||||
switch unit {
|
||||
case 'y', 'Y':
|
||||
multiplier = 365 * 24 * time.Hour
|
||||
case 'm', 'M':
|
||||
multiplier = 30 * 24 * time.Hour
|
||||
case 'd', 'D':
|
||||
multiplier = 24 * time.Hour
|
||||
default:
|
||||
return time.ParseDuration(s)
|
||||
}
|
||||
|
||||
var num int
|
||||
_, err := fmt.Sscanf(value, "%d", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||
}
|
||||
|
||||
return time.Duration(num) * multiplier, nil
|
||||
}
|
||||
|
||||
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
|
||||
var createdFiles []string
|
||||
|
||||
// Default encoding to "pem" if not specified
|
||||
encoding := group.Outputs.Encoding
|
||||
if encoding == "" {
|
||||
encoding = "pem"
|
||||
}
|
||||
|
||||
// Collect data from all chains in the group
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
|
||||
for _, chain := range group.Certs {
|
||||
// Step 1: Load all certificates for this chain
|
||||
allCerts := make(map[string]*x509.Certificate)
|
||||
|
||||
// Load root certificate
|
||||
rootCert, err := certlib.LoadCertificate(chain.Root)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load root certificate %s: %v", chain.Root, err)
|
||||
}
|
||||
allCerts[chain.Root] = rootCert
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to single file if needed
|
||||
if group.Outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, rootCert)
|
||||
}
|
||||
|
||||
// Add root to individual files if needed
|
||||
if group.Outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: rootCert,
|
||||
path: chain.Root,
|
||||
})
|
||||
}
|
||||
|
||||
// Step 2: Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, err := certlib.LoadCertificate(intPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load intermediate certificate %s: %v", intPath, err)
|
||||
}
|
||||
allCerts[intPath] = intCert
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if err := intCert.CheckSignatureFrom(rootCert); err != nil {
|
||||
return nil, fmt.Errorf("intermediate %s is not properly signed by root %s: %v", intPath, chain.Root, err)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to a single file if needed
|
||||
if group.Outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, intCert)
|
||||
}
|
||||
|
||||
// Add intermediate to individual files if needed
|
||||
if group.Outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: intCert,
|
||||
path: intPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare files for inclusion in archives for the entire group.
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Handle a single bundle file.
|
||||
if group.Outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %v", err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
|
||||
// Handle individual files
|
||||
if group.Outputs.IncludeIndividual {
|
||||
for _, cp := range individualCerts {
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %v", cp.path, err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate manifest if requested
|
||||
if group.Outputs.Manifest {
|
||||
manifestContent := generateManifest(archiveFiles)
|
||||
archiveFiles = append(archiveFiles, fileEntry{
|
||||
name: "MANIFEST",
|
||||
content: manifestContent,
|
||||
})
|
||||
}
|
||||
|
||||
// Create archives for the entire group
|
||||
for _, format := range group.Outputs.Formats {
|
||||
ext, ok := formatExtensions[format]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %v", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
createdFiles = append(createdFiles, archivePath)
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||
now := time.Now()
|
||||
expiryThreshold := now.Add(expiryDuration)
|
||||
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s has EXPIRED (expired %d days ago)\n", path, -daysUntilExpiry)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
name string
|
||||
content []byte
|
||||
}
|
||||
|
||||
type certWithPath struct {
|
||||
cert *x509.Certificate
|
||||
path string
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file
|
||||
func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding string, isSingle bool) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
case "pem":
|
||||
pemContent, err := encodeCertsToPEM(certs, isSingle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
case "der":
|
||||
if isSingle {
|
||||
// For single file in DER, concatenate all cert DER bytes
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
// Individual DER file (should only have one cert)
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
pemContent, err := encodeCertsToPEM(certs, isSingle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
// Add DER version
|
||||
if isSingle {
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format
|
||||
func encodeCertsToPEM(certs []*x509.Certificate, concatenate bool) ([]byte, error) {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||
}
|
||||
return pemContent, nil
|
||||
}
|
||||
|
||||
func generateManifest(files []fileEntry) []byte {
|
||||
var manifest strings.Builder
|
||||
for _, file := range files {
|
||||
if file.name == "MANIFEST" {
|
||||
continue
|
||||
}
|
||||
hash := sha256.Sum256(file.content)
|
||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||
}
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
defer w.Close()
|
||||
|
||||
for _, file := range files {
|
||||
fw, err := w.Create(file.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fw.Write(file.content); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
defer gw.Close()
|
||||
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Mode: 0644,
|
||||
Size: int64(len(file.content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(file.content); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateHashFile(path string, files []string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
189
cmd/cert-bundler/prompt.txt
Normal file
189
cmd/cert-bundler/prompt.txt
Normal file
@@ -0,0 +1,189 @@
|
||||
Task: build a certificate bundling tool in cmd/cert-bundler. It
|
||||
creates archives of certificates chains.
|
||||
|
||||
A YAML file for this looks something like:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: roots/core-ca.pem
|
||||
intermediates:
|
||||
- int/cca1.pem
|
||||
- int/cca2.pem
|
||||
- int/cca3.pem
|
||||
- root: roots/ssh-ca.pem
|
||||
intermediates:
|
||||
- ssh/ssh_dmz1.pem
|
||||
- ssh/ssh_internal.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
```
|
||||
|
||||
Some requirements:
|
||||
|
||||
1. First, all the certificates should be loaded.
|
||||
2. For each root, each of the indivudal intermediates should be
|
||||
checked to make sure they are properly signed by the root CA.
|
||||
3. The program should optionally take an expiration period (defaulting
|
||||
to one year), specified in config.expiration, and if any certificate
|
||||
is within that expiration period, a warning should be printed.
|
||||
4. If outputs.include_single is true, all certificates under chains
|
||||
should be concatenated into a single file.
|
||||
5. If outputs.include_individual is true, all certificates under
|
||||
chains should be included at the root level (e.g. int/cca2.pem
|
||||
would be cca2.pem in the archive).
|
||||
6. If bundle.manifest is true, a "MANIFEST" file is created with
|
||||
SHA256 sums of each file included in the archive.
|
||||
7. For each of the formats, create an archive file in the output
|
||||
directory (specified with `-o`) with that format.
|
||||
- If zip is included, create a .zip file.
|
||||
- If tgz is included, create a .tar.gz file with default compression
|
||||
levels.
|
||||
- All archive files should include any generated files (single
|
||||
and/or individual) in the top-level directory.
|
||||
8. In the output directory, create a file with the same name as
|
||||
config.hashes that contains the SHA256 sum of all files created.
|
||||
|
||||
-----
|
||||
|
||||
The outputs.include_single and outputs.include_individual describe
|
||||
what should go in the final archive. If both are specified, the output
|
||||
archive should include both a single bundle.pem and each individual
|
||||
certificate, for example.
|
||||
|
||||
-----
|
||||
|
||||
As it stands, given the following `bundle.yaml`:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
google_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- tgz
|
||||
lets_encrypt:
|
||||
certs:
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: false
|
||||
include_individual: true
|
||||
manifest: false
|
||||
formats:
|
||||
- zip
|
||||
```
|
||||
|
||||
The program outputs the following files:
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs_0.tgz (contains individual certs)
|
||||
- core_certs_0.zip (contains individual certs)
|
||||
- core_certs_1.tgz (contains core_certs.pem)
|
||||
- core_certs_1.zip (contains core_certs.pem)
|
||||
- google_certs_0.tgz
|
||||
- lets_encrypt_0.zip
|
||||
|
||||
It should output
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs.tgz
|
||||
- core_certs.zip
|
||||
- google_certs.tgz
|
||||
- lets_encrypt.zip
|
||||
|
||||
core_certs.* should contain `bundle.pem` and all the individual
|
||||
certs. There should be no _$n$ variants of archives.
|
||||
|
||||
-----
|
||||
|
||||
Add an additional field to outputs: encoding. It should accept one of
|
||||
`der`, `pem`, or `both`. If `der`, certificates should be output as a
|
||||
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
|
||||
should be output as a `.pem` file containing a PEM-encoded certificate.
|
||||
If both, both the `.crt` and `.pem` certificate should be included.
|
||||
|
||||
For example, given the previous config, if `encoding` is der, the
|
||||
google_certs.tgz archive should contain
|
||||
|
||||
- bundle.crt
|
||||
- MANIFEST
|
||||
|
||||
Or with lets_encrypt.zip:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- le-e7.crt
|
||||
|
||||
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.pem
|
||||
|
||||
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.crt
|
||||
- le-e7.pem
|
||||
|
||||
-----
|
||||
|
||||
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
|
||||
|
||||
-----
|
||||
|
||||
Move the format extensions to a global variable.
|
||||
|
||||
-----
|
||||
|
||||
Write a README.txt with a description of the bundle.yaml format.
|
||||
|
||||
Additionally, update the help text for the program (e.g. with `-h`)
|
||||
to provide the same detailed information.
|
||||
|
||||
-----
|
||||
|
||||
It may be easier to embed the README.txt in the program on build.
|
||||
|
||||
|
||||
43
cmd/cert-bundler/testdata/bundle.yaml
vendored
Normal file
43
cmd/cert-bundler/testdata/bundle.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
google_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
encoding: der
|
||||
formats:
|
||||
- tgz
|
||||
lets_encrypt:
|
||||
certs:
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: false
|
||||
include_individual: true
|
||||
manifest: false
|
||||
encoding: both
|
||||
formats:
|
||||
- zip
|
||||
29
cmd/cert-bundler/testdata/pems/goog-wr2.pem
vendored
Normal file
29
cmd/cert-bundler/testdata/pems/goog-wr2.pem
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFCzCCAvOgAwIBAgIQf/AFoHxM3tEArZ1mpRB7mDANBgkqhkiG9w0BAQsFADBH
|
||||
MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
|
||||
QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMjMxMjEzMDkwMDAwWhcNMjkwMjIw
|
||||
MTQwMDAwWjA7MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNl
|
||||
cnZpY2VzMQwwCgYDVQQDEwNXUjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQCp/5x/RR5wqFOfytnlDd5GV1d9vI+aWqxG8YSau5HbyfsvAfuSCQAWXqAc
|
||||
+MGr+XgvSszYhaLYWTwO0xj7sfUkDSbutltkdnwUxy96zqhMt/TZCPzfhyM1IKji
|
||||
aeKMTj+xWfpgoh6zySBTGYLKNlNtYE3pAJH8do1cCA8Kwtzxc2vFE24KT3rC8gIc
|
||||
LrRjg9ox9i11MLL7q8Ju26nADrn5Z9TDJVd06wW06Y613ijNzHoU5HEDy01hLmFX
|
||||
xRmpC5iEGuh5KdmyjS//V2pm4M6rlagplmNwEmceOuHbsCFx13ye/aoXbv4r+zgX
|
||||
FNFmp6+atXDMyGOBOozAKql2N87jAgMBAAGjgf4wgfswDgYDVR0PAQH/BAQDAgGG
|
||||
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/
|
||||
AgEAMB0GA1UdDgQWBBTeGx7teRXUPjckwyG77DQ5bUKyMDAfBgNVHSMEGDAWgBTk
|
||||
rysmcRorSCeFL1JmLO/wiRNxPjA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAKG
|
||||
GGh0dHA6Ly9pLnBraS5nb29nL3IxLmNydDArBgNVHR8EJDAiMCCgHqAchhpodHRw
|
||||
Oi8vYy5wa2kuZ29vZy9yL3IxLmNybDATBgNVHSAEDDAKMAgGBmeBDAECATANBgkq
|
||||
hkiG9w0BAQsFAAOCAgEARXWL5R87RBOWGqtY8TXJbz3S0DNKhjO6V1FP7sQ02hYS
|
||||
TL8Tnw3UVOlIecAwPJQl8hr0ujKUtjNyC4XuCRElNJThb0Lbgpt7fyqaqf9/qdLe
|
||||
SiDLs/sDA7j4BwXaWZIvGEaYzq9yviQmsR4ATb0IrZNBRAq7x9UBhb+TV+PfdBJT
|
||||
DhEl05vc3ssnbrPCuTNiOcLgNeFbpwkuGcuRKnZc8d/KI4RApW//mkHgte8y0YWu
|
||||
ryUJ8GLFbsLIbjL9uNrizkqRSvOFVU6xddZIMy9vhNkSXJ/UcZhjJY1pXAprffJB
|
||||
vei7j+Qi151lRehMCofa6WBmiA4fx+FOVsV2/7R6V2nyAiIJJkEd2nSi5SnzxJrl
|
||||
Xdaqev3htytmOPvoKWa676ATL/hzfvDaQBEcXd2Ppvy+275W+DKcH0FBbX62xevG
|
||||
iza3F4ydzxl6NJ8hk8R+dDXSqv1MbRT1ybB5W0k8878XSOjvmiYTDIfyc9acxVJr
|
||||
Y/cykHipa+te1pOhv7wYPYtZ9orGBV5SGOJm4NrB3K1aJar0RfzxC3ikr7Dyc6Qw
|
||||
qDTBU39CluVIQeuQRgwG3MuSxl7zRERDRilGoKb8uY45JzmxWuKxrfwT/478JuHU
|
||||
/oTxUFqOl2stKnn7QGTq8z29W+GgBLCXSBxC9epaHM0myFH/FJlniXJfHeytWt0=
|
||||
-----END CERTIFICATE-----
|
||||
31
cmd/cert-bundler/testdata/pems/gts-r1.pem
vendored
Normal file
31
cmd/cert-bundler/testdata/pems/gts-r1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
|
||||
MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
|
||||
MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
|
||||
Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
|
||||
A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
|
||||
27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
|
||||
Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
|
||||
TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
|
||||
qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
|
||||
szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
|
||||
Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
|
||||
MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
|
||||
wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
|
||||
aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
|
||||
VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
|
||||
AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||
FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
|
||||
C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
|
||||
QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
|
||||
h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
|
||||
7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
|
||||
ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
|
||||
MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
|
||||
Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
|
||||
6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
|
||||
0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
|
||||
2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
|
||||
bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
|
||||
-----END CERTIFICATE-----
|
||||
31
cmd/cert-bundler/testdata/pems/isrg-root-x1.pem
vendored
Normal file
31
cmd/cert-bundler/testdata/pems/isrg-root-x1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
||||
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
||||
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
||||
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
||||
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
||||
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
||||
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
||||
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
||||
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
||||
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
||||
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
||||
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
||||
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
||||
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
||||
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
||||
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
||||
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
||||
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
||||
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
||||
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
||||
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
||||
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
||||
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
||||
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
||||
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
||||
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
||||
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||
-----END CERTIFICATE-----
|
||||
26
cmd/cert-bundler/testdata/pems/le-e7.pem
vendored
Normal file
26
cmd/cert-bundler/testdata/pems/le-e7.pem
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw
|
||||
WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
|
||||
RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST
|
||||
CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef
|
||||
QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw
|
||||
gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
|
||||
ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4
|
||||
wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB
|
||||
AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g
|
||||
BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu
|
||||
Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD
|
||||
aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF
|
||||
h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG
|
||||
yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr
|
||||
OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o
|
||||
yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S
|
||||
M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ
|
||||
UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq
|
||||
Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I
|
||||
tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ
|
||||
YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty
|
||||
+VUwFj9tmWxyR/M=
|
||||
-----END CERTIFICATE-----
|
||||
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
Normal file
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
|
||||
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
|
||||
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
|
||||
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip
|
||||
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Normal file
Binary file not shown.
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "certchain_lib",
|
||||
srcs = ["certchain.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/certchain",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "certchain",
|
||||
embed = [":certchain_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,22 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "certdump_lib",
|
||||
srcs = [
|
||||
"certdump.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/certdump",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@com_github_cloudflare_cfssl//errors",
|
||||
"@com_github_cloudflare_cfssl//helpers",
|
||||
"@com_github_kr_text//:text",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "certdump",
|
||||
embed = [":certdump_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -12,12 +12,13 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func certPublic(cert *x509.Certificate) string {
|
||||
@@ -109,6 +110,14 @@ func showBasicConstraints(cert *x509.Certificate) {
|
||||
|
||||
if cert.IsCA {
|
||||
fmt.Printf(", is a CA certificate")
|
||||
if !cert.BasicConstraintsValid {
|
||||
fmt.Printf(" (basic constraint failure)")
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("is not a CA certificate")
|
||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||
fmt.Printf(" (key encipherment usage enabled!)")
|
||||
}
|
||||
}
|
||||
|
||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||
@@ -208,17 +217,17 @@ func displayCert(cert *x509.Certificate) {
|
||||
}
|
||||
|
||||
func displayAllCerts(in []byte, leafOnly bool) {
|
||||
certs, err := helpers.ParseCertificatesPEM(in)
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
certs, _, err = helpers.ParseCertificatesDER(in, "")
|
||||
certs, _, err = certlib.ParseCertificatesDER(in, "")
|
||||
if err != nil {
|
||||
Warn(TranslateCFSSLError(err), "failed to parse certificates")
|
||||
lib.Warn(err, "failed to parse certificates")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(certs) == 0 {
|
||||
Warnx("no certificates found")
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -236,7 +245,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
||||
ci := getConnInfo(uri)
|
||||
conn, err := tls.Dial("tcp", ci.Addr, permissiveConfig())
|
||||
if err != nil {
|
||||
Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
lib.Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
@@ -252,11 +261,11 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
||||
}
|
||||
conn.Close()
|
||||
} else {
|
||||
Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
lib.Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
}
|
||||
|
||||
if len(state.PeerCertificates) == 0 {
|
||||
Warnx("no certificates found")
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -266,7 +275,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
||||
}
|
||||
|
||||
if len(state.VerifiedChains) == 0 {
|
||||
Warnx("no verified chains found; using peer chain")
|
||||
lib.Warnx("no verified chains found; using peer chain")
|
||||
for i := range state.PeerCertificates {
|
||||
displayCert(state.PeerCertificates[i])
|
||||
}
|
||||
@@ -289,9 +298,9 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() == 0 || (flag.NArg() == 1 && flag.Arg(0) == "-") {
|
||||
certs, err := ioutil.ReadAll(os.Stdin)
|
||||
certs, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
Warn(err, "couldn't read certificates from standard input")
|
||||
lib.Warn(err, "couldn't read certificates from standard input")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -306,9 +315,9 @@ func main() {
|
||||
if strings.HasPrefix(filename, "https://") {
|
||||
displayAllCertsWeb(filename, leafOnly)
|
||||
} else {
|
||||
in, err := ioutil.ReadFile(filename)
|
||||
in, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
Warn(err, "couldn't read certificate")
|
||||
lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,10 @@ package main
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
@@ -89,34 +86,6 @@ func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||
}
|
||||
}
|
||||
|
||||
// TranslateCFSSLError turns a CFSSL error into a more readable string.
|
||||
func TranslateCFSSLError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// printing errors as json is terrible
|
||||
if cfsslError, ok := err.(*cferr.Error); ok {
|
||||
err = errors.New(cfsslError.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Warnx displays a formatted error message to standard error, à la
|
||||
// warnx(3).
|
||||
func Warnx(format string, a ...interface{}) (int, error) {
|
||||
format += "\n"
|
||||
return fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
// Warn displays a formatted error message to standard output,
|
||||
// appending the error string, à la warn(3).
|
||||
func Warn(err error, format string, a ...interface{}) (int, error) {
|
||||
format += ": %v\n"
|
||||
a = append(a, err)
|
||||
return fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "certexpiry_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/certexpiry",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@com_github_cloudflare_cfssl//helpers",
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "certexpiry",
|
||||
embed = [":certexpiry_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
@@ -87,7 +87,7 @@ func main() {
|
||||
continue
|
||||
}
|
||||
|
||||
certs, err := helpers.ParseCertificatesPEM(in)
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
lib.Warn(err, "while parsing certificates")
|
||||
continue
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "certverify_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/certverify",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@com_github_cloudflare_cfssl//helpers",
|
||||
"@com_github_cloudflare_cfssl//revoke",
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "certverify",
|
||||
embed = [":certverify_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -8,14 +8,14 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/revoke"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func printRevocation(cert *x509.Certificate) {
|
||||
remaining := cert.NotAfter.Sub(time.Now())
|
||||
remaining := time.Until(cert.NotAfter)
|
||||
fmt.Printf("certificate expires in %s.\n", lib.Duration(remaining))
|
||||
|
||||
revoked, ok := revoke.VerifyCertificate(cert)
|
||||
@@ -47,7 +47,7 @@ func main() {
|
||||
if verbose {
|
||||
fmt.Println("[+] loading root certificates from", caFile)
|
||||
}
|
||||
roots, err = helpers.LoadPEMCertPool(caFile)
|
||||
roots, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func main() {
|
||||
if verbose {
|
||||
fmt.Println("[+] loading intermediate certificates from", intFile)
|
||||
}
|
||||
ints, err = helpers.LoadPEMCertPool(caFile)
|
||||
ints, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
} else {
|
||||
ints = x509.NewCertPool()
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
fileData, err := ioutil.ReadFile(flag.Arg(0))
|
||||
die.If(err)
|
||||
|
||||
chain, err := helpers.ParseCertificatesPEM(fileData)
|
||||
chain, err := certlib.ParseCertificatesPEM(fileData)
|
||||
die.If(err)
|
||||
if verbose {
|
||||
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "clustersh_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/clustersh",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@com_github_pkg_sftp//:sftp",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
"@org_golang_x_crypto//ssh",
|
||||
"@org_golang_x_crypto//ssh/agent",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "clustersh",
|
||||
embed = [":clustersh_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "cruntar_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/cruntar",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "cruntar",
|
||||
embed = [":cruntar_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,7 +57,7 @@ func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
||||
}
|
||||
filePath := filepath.Clean(filepath.Join(top, hdr.Name))
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
case tar.TypeReg:
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -92,6 +93,17 @@ func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
||||
return err
|
||||
}
|
||||
case tar.TypeSymlink:
|
||||
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
||||
return fmt.Errorf("symlink %s is outside the top-level %s",
|
||||
hdr.Linkname, top)
|
||||
}
|
||||
path := linkTarget(hdr.Linkname, top)
|
||||
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
||||
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "csrpubdump_lib",
|
||||
srcs = ["pubdump.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/csrpubdump",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "csrpubdump",
|
||||
embed = [":csrpubdump_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
32
cmd/data_sync/README
Normal file
32
cmd/data_sync/README
Normal file
@@ -0,0 +1,32 @@
|
||||
data_sync
|
||||
|
||||
This is a tool I wrote primarily to sync my home directory to a backup
|
||||
drive plugged into my laptop. This system is provisioned by Ansible,
|
||||
and the goal is to be able to just copy my home directory back in the
|
||||
event of a failure without having lost a great deal of work or to wait
|
||||
for ansible to finish installing the right backup software. Specifically,
|
||||
I use a Framework laptop with the 1TB storage module, encrypted with
|
||||
LUKS, and run this twice daily (timed to correspond with my commute,
|
||||
though that's not really necessary). It started off as a shell script,
|
||||
then I decided to just write it as a program.
|
||||
|
||||
Usage: data_sync [-d path] [-l level] [-m path] [-nqsv]
|
||||
[-t path]
|
||||
-d path path to sync source directory
|
||||
(default "~")
|
||||
-l level log level to output (default "INFO"). Valid log
|
||||
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||
CRIT, ALERT, EMERG. The default is INFO.
|
||||
-m path path to sync mount directory
|
||||
(default "/media/$USER/$(hostname -s)_data")
|
||||
-n dry-run mode: only check paths and print files to
|
||||
exclude
|
||||
-q suppress console output
|
||||
-s suppress syslog output
|
||||
-t path path to sync target directory
|
||||
(default "/media/$USER/$(hostname -s)_data/$USER")
|
||||
-v verbose rsync output
|
||||
|
||||
data_sync rsyncs the tree at the sync source directory (-d) to the sync target
|
||||
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||
target directory must exist on the mount directory.
|
||||
230
cmd/data_sync/main.go
Normal file
230
cmd/data_sync/main.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/config"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
"git.wntrmute.dev/kyle/goutils/log"
|
||||
)
|
||||
|
||||
func mustHostname() string {
|
||||
hostname, err := os.Hostname()
|
||||
log.FatalError(err, "couldn't retrieve hostname")
|
||||
|
||||
if hostname == "" {
|
||||
log.Fatal("no hostname returned")
|
||||
}
|
||||
return strings.Split(hostname, ".")[0]
|
||||
}
|
||||
|
||||
var (
|
||||
defaultDataDir = mustHostname() + "_data"
|
||||
defaultProgName = defaultDataDir + "_sync"
|
||||
defaultMountDir = filepath.Join("/media", os.Getenv("USER"), defaultDataDir)
|
||||
defaultSyncDir = os.Getenv("HOME")
|
||||
defaultTargetDir = filepath.Join(defaultMountDir, os.Getenv("USER"))
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
prog := filepath.Base(os.Args[0])
|
||||
fmt.Fprintf(w, `Usage: %s [-d path] [-l level] [-m path] [-nqsv]
|
||||
[-t path]
|
||||
-d path path to sync source directory
|
||||
(default "%s")
|
||||
-l level log level to output (default "INFO"). Valid log
|
||||
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||
CRIT, ALERT, EMERG. The default is INFO.
|
||||
-m path path to sync mount directory
|
||||
(default "%s")
|
||||
-n dry-run mode: only check paths and print files to
|
||||
exclude
|
||||
-q suppress console output
|
||||
-s suppress syslog output
|
||||
-t path path to sync target directory
|
||||
(default "%s")
|
||||
-v verbose rsync output
|
||||
|
||||
%s rsyncs the tree at the sync source directory (-d) to the sync target
|
||||
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||
target directory must exist on the mount directory.
|
||||
|
||||
`, prog, defaultSyncDir, defaultMountDir, defaultTargetDir, prog)
|
||||
}
|
||||
|
||||
func checkPaths(mount, target string, dryRun bool) error {
|
||||
if !fileutil.DirectoryDoesExist(mount) {
|
||||
return fmt.Errorf("sync dir %s isn't mounted", mount)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(target, mount) {
|
||||
return fmt.Errorf("target dir %s must exist in %s", target, mount)
|
||||
}
|
||||
|
||||
if !fileutil.DirectoryDoesExist(target) {
|
||||
if dryRun {
|
||||
log.Infof("would mkdir %s", target)
|
||||
} else {
|
||||
log.Infof("mkdir %s", target)
|
||||
if err := os.Mkdir(target, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildExcludes(syncDir string) ([]string, error) {
|
||||
var excluded []string
|
||||
|
||||
walker := func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
if info != nil && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode().IsRegular() {
|
||||
if err = fileutil.Access(path, fileutil.AccessRead); err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
}
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if err = fileutil.Access(path, fileutil.AccessExec); err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(syncDir, walker)
|
||||
return excluded, err
|
||||
}
|
||||
|
||||
func writeExcludes(excluded []string) (string, error) {
|
||||
if len(excluded) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
excludeFile, err := os.CreateTemp("", defaultProgName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, name := range excluded {
|
||||
fmt.Fprintln(excludeFile, name)
|
||||
}
|
||||
|
||||
defer excludeFile.Close()
|
||||
return excludeFile.Name(), nil
|
||||
}
|
||||
|
||||
func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
|
||||
var args []string
|
||||
|
||||
if excludeFile != "" {
|
||||
args = append(args, "--exclude-from")
|
||||
args = append(args, excludeFile)
|
||||
}
|
||||
|
||||
if verboseRsync {
|
||||
args = append(args, "--progress")
|
||||
args = append(args, "-v")
|
||||
}
|
||||
|
||||
args = append(args, []string{"-au", syncDir + "/", target + "/"}...)
|
||||
|
||||
path, err := exec.LookPath("rsync")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Usage = func() { usage(os.Stderr) }
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
var logLevel, mountDir, syncDir, target string
|
||||
var dryRun, quietMode, noSyslog, verboseRsync bool
|
||||
|
||||
flag.StringVar(&syncDir, "d", config.GetDefault("sync_dir", defaultSyncDir),
|
||||
"`path to sync source directory`")
|
||||
flag.StringVar(&logLevel, "l", config.GetDefault("log_level", "INFO"),
|
||||
"log level to output")
|
||||
flag.StringVar(&mountDir, "m", config.GetDefault("mount_dir", defaultMountDir),
|
||||
"`path` to sync mount directory")
|
||||
flag.BoolVar(&dryRun, "n", false, "dry-run mode: only check paths and print files to exclude")
|
||||
flag.BoolVar(&quietMode, "q", quietMode, "suppress console output")
|
||||
flag.BoolVar(&noSyslog, "s", noSyslog, "suppress syslog output")
|
||||
flag.StringVar(&target, "t", config.GetDefault("sync_target", defaultTargetDir),
|
||||
"`path` to sync target directory")
|
||||
flag.BoolVar(&verboseRsync, "v", false, "verbose rsync output")
|
||||
flag.Parse()
|
||||
|
||||
if quietMode && noSyslog {
|
||||
fmt.Fprintln(os.Stderr, "both console and syslog output are suppressed")
|
||||
fmt.Fprintln(os.Stderr, "errors will NOT be reported")
|
||||
}
|
||||
|
||||
logOpts := &log.Options{
|
||||
Level: logLevel,
|
||||
Tag: defaultProgName,
|
||||
Facility: "user",
|
||||
WriteSyslog: !noSyslog,
|
||||
WriteConsole: !quietMode,
|
||||
}
|
||||
err := log.Setup(logOpts)
|
||||
log.FatalError(err, "failed to set up logging")
|
||||
|
||||
log.Infof("checking paths: mount=%s, target=%s", mountDir, target)
|
||||
err = checkPaths(mountDir, target, dryRun)
|
||||
log.FatalError(err, "target dir isn't ready")
|
||||
|
||||
log.Infof("checking for files to exclude from %s", syncDir)
|
||||
excluded, err := buildExcludes(syncDir)
|
||||
log.FatalError(err, "couldn't build excludes")
|
||||
|
||||
if dryRun {
|
||||
fmt.Println("excluded files:")
|
||||
for _, path := range excluded {
|
||||
fmt.Printf("\t%s\n", path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
excludeFile, err := writeExcludes(excluded)
|
||||
log.FatalError(err, "couldn't write exclude file")
|
||||
log.Infof("excluding %d files via %s", len(excluded), excludeFile)
|
||||
|
||||
if excludeFile != "" {
|
||||
defer func() {
|
||||
log.Infof("removing exclude file %s", excludeFile)
|
||||
if err := os.Remove(excludeFile); err != nil {
|
||||
log.Warningf("failed to remove temp file %s", excludeFile)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = rsync(syncDir, target, excludeFile, verboseRsync)
|
||||
log.FatalError(err, "couldn't sync data")
|
||||
}
|
||||
34
cmd/diskimg/README
Normal file
34
cmd/diskimg/README
Normal file
@@ -0,0 +1,34 @@
|
||||
diskimg: write disk images
|
||||
|
||||
Usage:
|
||||
diskimg [-a algo] [-v] image device
|
||||
|
||||
Flags:
|
||||
-a algo Select the hashing algorithm to use. The default
|
||||
is 'sha256'. Specifying an algorithm of 'list'
|
||||
will print the supported algorithms to standard
|
||||
output and exit with error code 2.
|
||||
-v Enable verbose (debug) output.
|
||||
|
||||
Examples:
|
||||
|
||||
Copying images/server.img to /dev/sda:
|
||||
|
||||
$ sudo diskimg images/server.img /dev/sda
|
||||
|
||||
Write a bladerunner node image to /dev/sda:
|
||||
|
||||
$ sudo diskimg -v ~/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img /dev/sda
|
||||
opening image /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img for read
|
||||
/home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img 416d4c8f890904167419e3d488d097e9c847273376b650546fdb1f6f9809c184
|
||||
opening device /dev/sda for rw
|
||||
writing /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img -> /dev/sda
|
||||
wrote 4151312384 bytes to /dev/sda
|
||||
syncing /dev/sda
|
||||
verifying the image was written successfully
|
||||
OK
|
||||
|
||||
Motivation:
|
||||
|
||||
I wanted to write something like balena's Etcher, but commandline only.
|
||||
|
||||
116
cmd/diskimg/main.go
Normal file
116
cmd/diskimg/main.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/ahash"
|
||||
"git.wntrmute.dev/kyle/goutils/dbg"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
const defaultHashAlgorithm = "sha256"
|
||||
|
||||
var (
|
||||
hAlgo string
|
||||
debug = dbg.New()
|
||||
)
|
||||
|
||||
|
||||
func openImage(imageFile string) (image *os.File, hash []byte, err error) {
|
||||
image, err = os.Open(imageFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hash, err = ahash.SumReader(hAlgo, image)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = image.Seek(0, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
debug.Printf("%s %x\n", imageFile, hash)
|
||||
return
|
||||
}
|
||||
|
||||
func openDevice(devicePath string) (device *os.File, err error) {
|
||||
fi, err := os.Stat(devicePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
device, err = os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&hAlgo, "a", defaultHashAlgorithm, "default hash algorithm")
|
||||
flag.BoolVar(&debug.Enabled, "v", false, "enable debug logging")
|
||||
flag.Parse()
|
||||
|
||||
if hAlgo == "list" {
|
||||
fmt.Println("Supported hashing algorithms:")
|
||||
for _, algo := range ahash.SecureHashList() {
|
||||
fmt.Printf("\t- %s\n", algo)
|
||||
}
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if flag.NArg() != 2 {
|
||||
die.With("usage: diskimg image device")
|
||||
}
|
||||
|
||||
imageFile := flag.Arg(0)
|
||||
devicePath := flag.Arg(1)
|
||||
|
||||
debug.Printf("opening image %s for read\n", imageFile)
|
||||
image, hash, err := openImage(imageFile)
|
||||
if image != nil {
|
||||
defer image.Close()
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
debug.Printf("opening device %s for rw\n", devicePath)
|
||||
device, err := openDevice(devicePath)
|
||||
if device != nil {
|
||||
defer device.Close()
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
debug.Printf("writing %s -> %s\n", imageFile, devicePath)
|
||||
n, err := io.Copy(device, image)
|
||||
die.If(err)
|
||||
debug.Printf("wrote %d bytes to %s\n", n, devicePath)
|
||||
|
||||
debug.Printf("syncing %s\n", devicePath)
|
||||
err = device.Sync()
|
||||
die.If(err)
|
||||
|
||||
debug.Println("verifying the image was written successfully")
|
||||
_, err = device.Seek(0, 0)
|
||||
die.If(err)
|
||||
|
||||
deviceHash, err := ahash.SumLimitedReader(hAlgo, device, n)
|
||||
die.If(err)
|
||||
|
||||
if !bytes.Equal(deviceHash, hash) {
|
||||
fmt.Fprintln(os.Stderr, "Hash mismatch:")
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", imageFile, hash)
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", devicePath, deviceHash)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
debug.Println("OK")
|
||||
os.Exit(0)
|
||||
}
|
||||
71
cmd/dumpbytes/main.go
Normal file
71
cmd/dumpbytes/main.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func usage(w io.Writer, exc int) {
|
||||
fmt.Fprintln(w, `usage: dumpbytes <file>`)
|
||||
os.Exit(exc)
|
||||
}
|
||||
|
||||
func printBytes(buf []byte) {
|
||||
fmt.Printf("\t")
|
||||
for i := 0; i < len(buf); i++ {
|
||||
fmt.Printf("0x%02x, ", buf[i])
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func dumpFile(path string, indentLevel int) error {
|
||||
indent := ""
|
||||
for i := 0; i < indentLevel; i++ {
|
||||
indent += "\t"
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
fmt.Printf("%svar buffer = []byte{\n", indent)
|
||||
for {
|
||||
buf := make([]byte, 8)
|
||||
n, err := file.Read(buf)
|
||||
if err == io.EOF {
|
||||
if n > 0 {
|
||||
fmt.Printf("%s", indent)
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s", indent)
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
|
||||
fmt.Printf("%s}\n", indent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
indent := 0
|
||||
flag.Usage = func() { usage(os.Stderr, 0) }
|
||||
flag.IntVar(&indent, "n", 0, "indent level")
|
||||
flag.Parse()
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
err := dumpFile(file, indent)
|
||||
die.If(err)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "eig_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/eig",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "eig",
|
||||
embed = [":eig_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "fragment_lib",
|
||||
srcs = ["fragment.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/fragment",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "fragment",
|
||||
embed = [":fragment_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
5
cmd/host/README
Normal file
5
cmd/host/README
Normal file
@@ -0,0 +1,5 @@
|
||||
host
|
||||
|
||||
This is a utility to display CNAME records and IPs for a hostname. It
|
||||
was born of my frustration in trying to figure out how to get the host(1)
|
||||
tool installed on Fedora.
|
||||
41
cmd/host/main.go
Normal file
41
cmd/host/main.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
)
|
||||
|
||||
func lookupHost(host string) error {
|
||||
cname, err := net.LookupCNAME(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cname != host {
|
||||
fmt.Printf("%s is a CNAME for %s\n", host, cname)
|
||||
host = cname
|
||||
}
|
||||
|
||||
addrs, err := net.LookupHost(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
fmt.Printf("\t%s\n", addr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
if err := lookupHost(arg); err != nil {
|
||||
log.Printf("%s: %s", arg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "jlp_lib",
|
||||
srcs = ["jlp.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/jlp",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//lib:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "jlp",
|
||||
embed = [":jlp_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "kgz_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/kgz",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@com_github_pkg_errors//:errors"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "kgz",
|
||||
embed = [":kgz_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
3
cmd/minmax/README
Normal file
3
cmd/minmax/README
Normal file
@@ -0,0 +1,3 @@
|
||||
minmax
|
||||
|
||||
A quick tool to calculate minmax codes if needed for uLisp.
|
||||
53
cmd/minmax/minmax.go
Normal file
53
cmd/minmax/minmax.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var kinds = map[string]int{
|
||||
"sym": 0,
|
||||
"tf": 1,
|
||||
"fn": 2,
|
||||
"sp": 3,
|
||||
}
|
||||
|
||||
func dieIf(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[!] %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: minmax type min max\n")
|
||||
fmt.Fprintf(os.Stderr, " type is one of fn, sp, sym, tf\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
kind, ok := kinds[flag.Arg(0)]
|
||||
if !ok {
|
||||
usage()
|
||||
}
|
||||
|
||||
min, err := strconv.Atoi(flag.Arg(1))
|
||||
dieIf(err)
|
||||
|
||||
max, err := strconv.Atoi(flag.Arg(2))
|
||||
dieIf(err)
|
||||
|
||||
code := kind << 6
|
||||
code += (min << 3)
|
||||
code += max
|
||||
fmt.Printf("%0o\n", code)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "parts_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/parts",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "parts",
|
||||
embed = [":parts_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "pem2bin_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/pem2bin",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "pem2bin",
|
||||
embed = [":pem2bin_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "pembody_lib",
|
||||
srcs = ["pembody.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/pembody",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//lib:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "pembody",
|
||||
embed = [":pembody_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "pemit_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/pemit",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//assert:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "pemit",
|
||||
embed = [":pemit_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "readchain_lib",
|
||||
srcs = ["chain.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/readchain",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "readchain",
|
||||
embed = [":readchain_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "renfnv_lib",
|
||||
srcs = ["renfnv.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/renfnv",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//fileutil:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "renfnv",
|
||||
embed = [":renfnv_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "rhash_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/rhash",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//ahash:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "rhash",
|
||||
embed = [":rhash_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
48
cmd/rolldie/main.go
Normal file
48
cmd/rolldie/main.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
var dieRollFormat = regexp.MustCompile(`^(\d+)[dD](\d+)$`)
|
||||
|
||||
func rollDie(count, sides int) []int {
|
||||
sum := 0
|
||||
var rolls []int
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
roll := rand.Intn(sides) + 1
|
||||
sum += roll
|
||||
rolls = append(rolls, roll)
|
||||
}
|
||||
|
||||
rolls = append(rolls, sum)
|
||||
return rolls
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
if !dieRollFormat.MatchString(arg) {
|
||||
fmt.Fprintf(os.Stderr, "invalid die format %s: should be XdY\n", arg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
dieRoll := dieRollFormat.FindAllStringSubmatch(arg, -1)
|
||||
count, err := strconv.Atoi(dieRoll[0][1])
|
||||
die.If(err)
|
||||
|
||||
sides, err := strconv.Atoi(dieRoll[0][2])
|
||||
die.If(err)
|
||||
|
||||
fmt.Println(rollDie(count, sides))
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "showimp_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/showimp",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//logging:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "showimp",
|
||||
embed = [":showimp_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -12,36 +12,23 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/dbg"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
gopath string
|
||||
project string
|
||||
debug bool
|
||||
)
|
||||
|
||||
var (
|
||||
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
|
||||
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
|
||||
log = logging.NewConsole()
|
||||
imports = map[string]bool{}
|
||||
debug = dbg.New()
|
||||
fset = &token.FileSet{}
|
||||
imports = map[string]bool{}
|
||||
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
|
||||
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
|
||||
)
|
||||
|
||||
func debugf(format string, args ...interface{}) {
|
||||
if debug {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func debugln(args ...interface{}) {
|
||||
if debug {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
gopath = os.Getenv("GOPATH")
|
||||
if gopath == "" {
|
||||
@@ -75,7 +62,7 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
debugln(path)
|
||||
debug.Println(path)
|
||||
|
||||
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
@@ -85,16 +72,16 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
||||
for _, importSpec := range f.Imports {
|
||||
importPath := strings.Trim(importSpec.Path.Value, `"`)
|
||||
if stdLibRegexp.MatchString(importPath) {
|
||||
debugln("standard lib:", importPath)
|
||||
debug.Println("standard lib:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, project) {
|
||||
debugln("internal import:", importPath)
|
||||
debug.Println("internal import:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, "golang.org/") {
|
||||
debugln("extended lib:", importPath)
|
||||
debug.Println("extended lib:", importPath)
|
||||
continue
|
||||
}
|
||||
debugln("import:", importPath)
|
||||
debug.Println("import:", importPath)
|
||||
imports[importPath] = true
|
||||
}
|
||||
|
||||
@@ -108,7 +95,7 @@ func main() {
|
||||
var noVendor bool
|
||||
flag.StringVar(&ignoreLine, "i", "", "comma-separated list of directories to ignore")
|
||||
flag.BoolVar(&noVendor, "nv", false, "ignore the vendor directory")
|
||||
flag.BoolVar(&debug, "v", false, "log debugging information")
|
||||
flag.BoolVar(&debug.Enabled, "v", false, "log debugging information")
|
||||
flag.Parse()
|
||||
|
||||
if noVendor {
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "ski_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/ski",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "ski",
|
||||
embed = [":ski_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "sprox_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/sprox",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "sprox",
|
||||
embed = [":sprox_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "stealchain-server_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/stealchain-server",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "stealchain-server",
|
||||
embed = [":stealchain-server_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "stealchain_lib",
|
||||
srcs = ["thief.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/stealchain",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "stealchain",
|
||||
embed = [":stealchain_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "subjhash_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/subjhash",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"@ht_sr_git_kisom_goutils//die:go_default_library",
|
||||
"@ht_sr_git_kisom_goutils//lib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "subjhash",
|
||||
embed = [":subjhash_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
@@ -57,7 +58,7 @@ func getSubjectInfoHash(cert *x509.Certificate, issuer bool) []byte {
|
||||
|
||||
func printDigests(paths []string, issuer bool) {
|
||||
for _, path := range paths {
|
||||
cert, err := lib.LoadCertificate(path)
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to load certificate from %s", path)
|
||||
continue
|
||||
@@ -82,9 +83,9 @@ func matchDigests(paths []string, issuer bool) {
|
||||
snd := paths[1]
|
||||
paths = paths[2:]
|
||||
|
||||
fstCert, err := lib.LoadCertificate(fst)
|
||||
fstCert, err := certlib.LoadCertificate(fst)
|
||||
die.If(err)
|
||||
sndCert, err := lib.LoadCertificate(snd)
|
||||
sndCert, err := certlib.LoadCertificate(snd)
|
||||
die.If(err)
|
||||
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
|
||||
lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
||||
|
||||
34
cmd/tlsinfo/README.txt
Normal file
34
cmd/tlsinfo/README.txt
Normal file
@@ -0,0 +1,34 @@
|
||||
tlsinfo: show TLS version, cipher, and peer certificates
|
||||
---------------------------------------------------------
|
||||
|
||||
Description
|
||||
tlsinfo connects to a TLS server and prints the negotiated TLS version and
|
||||
cipher suite, followed by details for each certificate in the server’s
|
||||
presented chain (as provided by the server).
|
||||
|
||||
Usage
|
||||
tlsinfo <hostname:port>
|
||||
|
||||
Output
|
||||
The program prints the negotiated protocol and cipher, then one section per
|
||||
certificate in the order received from the server. Example fields:
|
||||
TLS Version: TLS 1.3
|
||||
Cipher Suite: TLS_AES_128_GCM_SHA256
|
||||
Certificate 1
|
||||
Subject: CN=example.com, O=Example Corp, C=US
|
||||
Issuer: CN=Example Root CA, O=Example Corp, C=US
|
||||
DNS Names: [example.com www.example.com]
|
||||
Not Before: 2025-01-01 00:00:00 +0000 UTC
|
||||
Not After: 2026-01-01 23:59:59 +0000 UTC
|
||||
|
||||
Examples
|
||||
# Inspect a public HTTPS endpoint
|
||||
tlsinfo example.com:443
|
||||
|
||||
Notes
|
||||
- Verification is intentionally disabled (InsecureSkipVerify=true). The tool
|
||||
does not validate the server certificate or hostname; it is for inspection
|
||||
only.
|
||||
- The SNI/ServerName is inferred from <hostname> when applicable.
|
||||
- You must specify a port (e.g., 443 for HTTPS).
|
||||
- The entire certificate chain is printed exactly as presented by the server.
|
||||
64
cmd/tlsinfo/main.go
Normal file
64
cmd/tlsinfo/main.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
fmt.Printf("Usage: %s ‹hostname:port>\n", os.Args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
hostPort := os.Args[1]
|
||||
conn, err := tls.Dial("tcp", hostPort, &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to connect to the TLS server: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer conn.Close()
|
||||
state := conn.ConnectionState()
|
||||
printConnectionDetails(state)
|
||||
}
|
||||
|
||||
func printConnectionDetails(state tls.ConnectionState) {
|
||||
version := tlsVersion(state.Version)
|
||||
cipherSuite := tls.CipherSuiteName(state.CipherSuite)
|
||||
fmt.Printf("TLS Version: %s\n", version)
|
||||
fmt.Printf("Cipher Suite: %s\n", cipherSuite)
|
||||
printPeerCertificates(state.PeerCertificates)
|
||||
}
|
||||
|
||||
func tlsVersion(version uint16) string {
|
||||
switch version {
|
||||
|
||||
case tls.VersionTLS13:
|
||||
return "TLS 1.3"
|
||||
case tls.VersionTLS12:
|
||||
|
||||
return "TLS 1.2"
|
||||
case tls.VersionTLS11:
|
||||
return "TLS 1.1"
|
||||
case tls.VersionTLS10:
|
||||
return "TLS 1.0"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func printPeerCertificates(certificates []*x509.Certificate) {
|
||||
for i, cert := range certificates {
|
||||
fmt.Printf("Certificate %d\n", i+1)
|
||||
fmt.Printf("\tSubject: %s\n", cert.Subject)
|
||||
fmt.Printf("\tIssuer: %s\n", cert.Issuer)
|
||||
fmt.Printf("\tDNS Names: %v\n", cert.DNSNames)
|
||||
fmt.Printf("\tNot Before: %s\n:", cert.NotBefore)
|
||||
fmt.Printf("\tNot After: %s\n", cert.NotAfter)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "tlskeypair_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/tlskeypair",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//die:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "tlskeypair",
|
||||
embed = [":tlskeypair_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "utc_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/utc",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "utc",
|
||||
embed = [":utc_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "yamll_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/cmd/yamll",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["@in_gopkg_yaml_v2//:yaml_v2"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "yamll",
|
||||
embed = [":yamll_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
136
cmd/zsearch/main.go
Normal file
136
cmd/zsearch/main.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// zsearch is a utility for searching zlib-compressed files for a
|
||||
// search string. It was really designed for use with the Git object
|
||||
// store, i.e. to aid in the recovery of files after Git does what Git
|
||||
// do.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const defaultDirectory = ".git/objects"
|
||||
|
||||
func errorf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
if format[len(format)-1] != '\n' {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
func isDir(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
func loadFile(path string) ([]byte, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
zread, err := zlib.NewReader(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
_, err = io.Copy(buf, zread)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func showFile(path string) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", fileData)
|
||||
}
|
||||
|
||||
func searchFile(path string, search *regexp.Regexp) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
zread, err := zlib.NewReader(file)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
zbuf := bufio.NewReader(zread)
|
||||
if search.MatchReader(zbuf) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s:\n%s\n", path, fileData)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() {
|
||||
return searchFile(path, searchExpr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
|
||||
flag.Parse()
|
||||
|
||||
if *flSearch == "" {
|
||||
for _, path := range flag.Args() {
|
||||
showFile(path)
|
||||
}
|
||||
} else {
|
||||
search, err := regexp.Compile(*flSearch)
|
||||
if err != nil {
|
||||
errorf("Bad regexp: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pathList := flag.Args()
|
||||
if len(pathList) == 0 {
|
||||
pathList = []string{defaultDirectory}
|
||||
}
|
||||
|
||||
for _, path := range pathList {
|
||||
if isDir(path) {
|
||||
err := filepath.Walk(path, buildWalker(search))
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
searchFile(path, search)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "config",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"path_linux.go",
|
||||
],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@ht_sr_git_kisom_goutils//config/iniconf:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "config_test",
|
||||
srcs = [
|
||||
"config_test.go",
|
||||
"path_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":config"],
|
||||
)
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/config/iniconf"
|
||||
@@ -139,3 +140,14 @@ func Require(key string) string {
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// ListKeys returns a slice of the currently known keys.
|
||||
func ListKeys() []string {
|
||||
keyList := []string{}
|
||||
for k := range vars {
|
||||
keyList = append(keyList, k)
|
||||
}
|
||||
|
||||
sort.Strings(keyList)
|
||||
return keyList
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "iniconf",
|
||||
srcs = ["iniconf.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/config/iniconf",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "iniconf_test",
|
||||
srcs = ["iniconf_test.go"],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":iniconf"],
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package config
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "dbg",
|
||||
srcs = ["dbg.go"],
|
||||
importpath = "git.wntrmute.dev/kyle/goutils/dbg",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "dbg_test",
|
||||
srcs = ["dbg_test.go"],
|
||||
embed = [":dbg"],
|
||||
deps = [
|
||||
"@com_github_stretchr_testify//require",
|
||||
"@ht_sr_git_kisom_goutils//testio:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -55,21 +55,21 @@ func To(w io.WriteCloser) *DebugPrinter {
|
||||
}
|
||||
|
||||
// Print calls fmt.Print if Enabled is true.
|
||||
func (dbg DebugPrinter) Print(v ...interface{}) {
|
||||
func (dbg *DebugPrinter) Print(v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprint(dbg.out, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Println calls fmt.Println if Enabled is true.
|
||||
func (dbg DebugPrinter) Println(v ...interface{}) {
|
||||
func (dbg *DebugPrinter) Println(v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprintln(dbg.out, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Printf calls fmt.Printf if Enabled is true.
|
||||
func (dbg DebugPrinter) Printf(format string, v ...interface{}) {
|
||||
func (dbg *DebugPrinter) Printf(format string, v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprintf(dbg.out, format, v...)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user