Compare commits
79 Commits
Author | SHA1 | Date |
---|---|---|
|
beccb551e2 | |
|
c761d98b82 | |
|
e68d22337b | |
|
4cb6f5b6f0 | |
|
6d5708800f | |
|
fa3eb821e6 | |
|
dd5ed403b9 | |
|
b4fde22c31 | |
|
9715293773 | |
|
f6d227946b | |
|
6f7a8fa4d4 | |
|
622f6a2638 | |
|
e3162b6164 | |
|
9d1e3ab2f0 | |
|
dd98356479 | |
|
9307f44601 | |
|
b9f69e4aa1 | |
|
7a4e7977c3 | |
|
72fdc255e7 | |
|
63957ff22a | |
|
83d42dc489 | |
|
984baa6bb4 | |
|
34982c122f | |
|
fb11c0c27c | |
|
27cc67d2cf | |
|
0fe13439b6 | |
|
fb9caec663 | |
|
e3e83630b5 | |
|
c1f06604e3 | |
|
9091cc7682 | |
|
74ce7bc58a | |
|
8b2d10209e | |
|
770100b688 | |
|
29630c55a9 | |
|
3c2ec896f8 | |
|
7828726ba4 | |
|
458f3ceaed | |
|
2c45ae7b4e | |
|
c1b8b72cf1 | |
|
bfc7fedbf9 | |
|
965312f48e | |
|
237aa46ddd | |
|
f8c64d3be5 | |
|
d66cfe1145 | |
|
ad03c5f991 | |
|
0dd4e1c6ca | |
|
078230217d | |
|
90318f861b | |
|
3bb1362c0e | |
|
30ffbbdbc5 | |
|
b893e99864 | |
|
c7c51568d8 | |
|
7793021260 | |
|
692562818c | |
|
9e19346fc0 | |
|
cb827169dc | |
|
027d0173bc | |
|
6f19b69bbd | |
|
7e118bfdb0 | |
|
e0868841bf | |
|
c558405d11 | |
|
a1eb035af7 | |
|
5eedcff042 | |
|
6ac8eb04b4 | |
|
4a4e4cd3fd | |
|
1207093a56 | |
|
2b6ae03d1a | |
|
ef0f14a512 | |
|
6ae393ebf2 | |
|
76d88c220d | |
|
40e015373f | |
|
50c226b726 | |
|
070ffb9dff | |
|
5ac05bd298 | |
|
cf1edf2d31 | |
|
03e8958dd7 | |
|
6cef585071 | |
|
06678499d4 | |
|
fad17065fe |
|
@ -0,0 +1,42 @@
|
|||
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||
version: 2.1
|
||||
|
||||
# Define a job to be invoked later in a workflow.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||
jobs:
|
||||
testbuild:
|
||||
working_directory: ~/repo
|
||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||
docker:
|
||||
- image: cimg/go:1.22.2
|
||||
# Add steps to the job
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v4-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: Install Dependencies
|
||||
command: go mod download
|
||||
- save_cache:
|
||||
key: go-mod-v4-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- run:
|
||||
name: Run tests
|
||||
command: go test ./...
|
||||
- run:
|
||||
name: Run build
|
||||
command: go build ./...
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
|
||||
# Invoke jobs via workflows
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||
workflows:
|
||||
testbuild:
|
||||
jobs:
|
||||
- testbuild
|
|
@ -0,0 +1,4 @@
|
|||
bazel-bin
|
||||
bazel-goutils
|
||||
bazel-out
|
||||
bazel-testlogs
|
|
@ -0,0 +1,8 @@
|
|||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/goutils.iml" filepath="$PROJECT_DIR$/.idea/goutils.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
11
.travis.yml
11
.travis.yml
|
@ -1,10 +1,19 @@
|
|||
arch:
|
||||
- amd64
|
||||
- ppc64le
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
- 1.9
|
||||
jobs:
|
||||
exclude:
|
||||
- go: 1.9
|
||||
arch: amd64
|
||||
- go: 1.9
|
||||
arch: ppc64le
|
||||
script:
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get golang.org/x/lint/golint
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/kisom/goutils/...
|
||||
- go test -cover github.com/kisom/goutils/...
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
Release 1.2.1 - 2018-09-15
|
||||
|
||||
+ Add missing format argument to Errorf call in kgz.
|
||||
|
||||
Release 1.2.0 - 2018-09-15
|
||||
|
||||
+ Adds the kgz command line utility.
|
||||
|
|
33
LICENSE
33
LICENSE
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2015 Kyle Isom <kyle@tyrfingr.is>
|
||||
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -11,3 +11,34 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
=======================================================================
|
||||
The backoff package (written during my time at Cloudflare) is released
|
||||
under the following license:
|
||||
|
||||
Copyright (c) 2016 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
|
31
README.md
31
README.md
|
@ -2,12 +2,17 @@ GOUTILS
|
|||
|
||||
This is a collection of small utility code I've written in Go; the `cmd/`
|
||||
directory has a number of command-line utilities. Rather than keep all
|
||||
of these in superfluous repositories of their own, I'm putting them here.
|
||||
of these in superfluous repositories of their own, or rewriting them
|
||||
for each project, I'm putting them here.
|
||||
|
||||
The project can be built with the standard Go tooling, or it can be built
|
||||
with Bazel.
|
||||
|
||||
Contents:
|
||||
|
||||
ahash/ Provides hashes from string algorithm specifiers.
|
||||
assert/ Error handling, assertion-style.
|
||||
backoff/ Implementation of an intelligent backoff strategy.
|
||||
cmd/
|
||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||
certchain/ Display the certificate chain from a
|
||||
|
@ -23,33 +28,47 @@ Contents:
|
|||
cruntar/ Untar an archive with hard links, copying instead of
|
||||
linking.
|
||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||
data_sync/ Sync the user's homedir to external storage.
|
||||
diskimg/ Write a disk image to a device.
|
||||
eig/ EEPROM image generator.
|
||||
fragment/ Print a fragment of a file.
|
||||
jlp/ JSON linter/prettifier.
|
||||
kgz/ Custom gzip compressor / decompressor that handles 99%
|
||||
of my use cases.
|
||||
parts/ Simple parts database management for my collection of
|
||||
electronic components.
|
||||
pem2bin/ Dump the binary body of a PEM-encoded block.
|
||||
pembody/ Print the body of a PEM certificate.
|
||||
pemit/ Dump data to a PEM file.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
readchain/ Print the common name for the certificates
|
||||
in a bundle.
|
||||
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
||||
rhash/ Compute the digest of remote files.
|
||||
showimp Display the external imports in a package.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
ski Display the SKI for PEM-encoded TLS material.
|
||||
sprox/ Simple TCP proxy.
|
||||
stealchain/ Dump the verified chain from a TLS
|
||||
connection.
|
||||
connection to a server.
|
||||
stealchain- Dump the verified chain from a TLS
|
||||
server/ connection from a client.
|
||||
subjhash/ Print or match subject info from a certificate.
|
||||
tlskeypair/ Check whether a TLS certificate and key file match.
|
||||
utc/ Convert times to UTC.
|
||||
yamll/ A small YAML linter.
|
||||
config/ A simple global configuration system where configuration
|
||||
data is pulled from a file or an environment variable
|
||||
transparently.
|
||||
dbg/ A debug printer.
|
||||
die/ Death of a program.
|
||||
fileutil/ Common file functions.
|
||||
lib/ Commonly-useful functions for writing Go programs.
|
||||
logging/ A logging library.
|
||||
mwc/ MultiwriteCloser implementation.
|
||||
rand/ Utilities for working with math/rand.
|
||||
sbuf/ A byte buffer that can be wiped.
|
||||
seekbuf/ A read-seekable byte buffer.
|
||||
syslog/ Syslog-type logging.
|
||||
tee/ Emulate tee(1)'s functionality in io.Writers.
|
||||
testio/ Various I/O utilities useful during testing.
|
||||
testutil/ Various utility functions useful during testing.
|
||||
|
@ -58,4 +77,4 @@ Contents:
|
|||
Each program should have a small README in the directory with more
|
||||
information.
|
||||
|
||||
All code here is licensed under the MIT license.
|
||||
All code here is licensed under the ISC license.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package ahash provides support for hashing data with a selectable
|
||||
// hash function.
|
||||
//
|
||||
// hash function.
|
||||
package ahash
|
||||
|
||||
import (
|
||||
|
@ -16,7 +17,7 @@ import (
|
|||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/kisom/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
"golang.org/x/crypto/blake2b"
|
||||
"golang.org/x/crypto/blake2s"
|
||||
"golang.org/x/crypto/md4"
|
||||
|
@ -44,13 +45,6 @@ func sha512Slicer(bs []byte) []byte {
|
|||
return sum[:]
|
||||
}
|
||||
|
||||
var sliceFunctions = map[string]func([]byte) []byte{
|
||||
"sha224": sha224Slicer,
|
||||
"sha256": sha256Slicer,
|
||||
"sha384": sha384Slicer,
|
||||
"sha512": sha512Slicer,
|
||||
}
|
||||
|
||||
// Hash represents a generic hash function that may or may not be secure. It
|
||||
// satisfies the hash.Hash interface.
|
||||
type Hash struct {
|
||||
|
@ -213,6 +207,17 @@ func SumReader(algo string, r io.Reader) ([]byte, error) {
|
|||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// SumLimitedReader reads n bytes of data from the io.reader and returns the
|
||||
// digest (not the hex digest) from the specified algorithm.
|
||||
func SumLimitedReader(algo string, r io.Reader, n int64) ([]byte, error) {
|
||||
limit := &io.LimitedReader{
|
||||
R: r,
|
||||
N: n,
|
||||
}
|
||||
|
||||
return SumReader(algo, limit)
|
||||
}
|
||||
|
||||
var insecureHashList, secureHashList, hashList []string
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kisom/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
)
|
||||
|
||||
func TestSecureHash(t *testing.T) {
|
||||
|
@ -139,3 +139,19 @@ func TestListLengthSanity(t *testing.T) {
|
|||
|
||||
assert.BoolT(t, len(all) == len(secure)+len(insecure))
|
||||
}
|
||||
|
||||
func TestSumLimitedReader(t *testing.T) {
|
||||
data := bytes.NewBufferString("hello, world")
|
||||
dataLen := data.Len()
|
||||
extendedData := bytes.NewBufferString("hello, world! this is an extended message")
|
||||
expected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
||||
|
||||
hash, err := SumReader("sha256", data)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, fmt.Sprintf("%x", hash) == expected, fmt.Sprintf("have hash %x, want %s", hash, expected))
|
||||
|
||||
extendedHash, err := SumLimitedReader("sha256", extendedData, int64(dataLen))
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
assert.BoolT(t, bytes.Equal(hash, extendedHash), fmt.Sprintf("have hash %x, want %x", extendedHash, hash))
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ func NoError(err error, s ...string) {
|
|||
}
|
||||
|
||||
if nil != err {
|
||||
die(err.Error())
|
||||
die(err.Error(), s...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,5 +170,5 @@ func ErrorEqT(t *testing.T, expected, actual error) {
|
|||
should = fmt.Sprintf("have '%s'", actual)
|
||||
}
|
||||
|
||||
die(fmt.Sprintf("assert.Error2: expected '%s', but %s", expected, should))
|
||||
t.Fatalf("assert.Error2: expected '%s', but %s", expected, should)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
Copyright (c) 2016 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,83 @@
|
|||
# backoff
|
||||
## Go implementation of "Exponential Backoff And Jitter"
|
||||
|
||||
This package implements the backoff strategy described in the AWS
|
||||
Architecture Blog article
|
||||
["Exponential Backoff And Jitter"](http://www.awsarchitectureblog.com/2015/03/backoff.html). Essentially,
|
||||
the backoff has an interval `time.Duration`; the *n<sup>th</sup>* call
|
||||
to backoff will return an a `time.Duration` that is *2 <sup>n</sup> *
|
||||
interval*. If jitter is enabled (which is the default behaviour), the
|
||||
duration is a random value between 0 and *2 <sup>n</sup> * interval*.
|
||||
The backoff is configured with a maximum duration that will not be
|
||||
exceeded; e.g., by default, the longest duration returned is
|
||||
`backoff.DefaultMaxDuration`.
|
||||
|
||||
## Usage
|
||||
|
||||
A `Backoff` is initialised with a call to `New`. Using zero values
|
||||
causes it to use `DefaultMaxDuration` and `DefaultInterval` as the
|
||||
maximum duration and interval.
|
||||
|
||||
```
|
||||
package something
|
||||
|
||||
import "github.com/cloudflare/backoff"
|
||||
|
||||
func retryable() {
|
||||
b := backoff.New(0, 0)
|
||||
for {
|
||||
err := someOperation()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("error in someOperation: %v", err)
|
||||
<-time.After(b.Duration())
|
||||
}
|
||||
|
||||
log.Printf("succeeded after %d tries", b.Tries()+1)
|
||||
b.Reset()
|
||||
}
|
||||
```
|
||||
|
||||
It can also be used to rate limit code that should retry infinitely, but which does not
|
||||
use `Backoff` itself.
|
||||
|
||||
```
|
||||
package something
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/backoff"
|
||||
)
|
||||
|
||||
func retryable() {
|
||||
b := backoff.New(0, 0)
|
||||
b.SetDecay(30 * time.Second)
|
||||
|
||||
for {
|
||||
// b will reset if someOperation returns later than
|
||||
// the last call to b.Duration() + 30s.
|
||||
err := someOperation()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("error in someOperation: %v", err)
|
||||
<-time.After(b.Duration())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tunables
|
||||
|
||||
* `NewWithoutJitter` creates a Backoff that doesn't use jitter.
|
||||
|
||||
The default behaviour is controlled by two variables:
|
||||
|
||||
* `DefaultInterval` sets the base interval for backoffs created with
|
||||
the zero `time.Duration` value in the `Interval` field.
|
||||
* `DefaultMaxDuration` sets the maximum duration for backoffs created
|
||||
with the zero `time.Duration` value in the `MaxDuration` field.
|
||||
|
|
@ -0,0 +1,197 @@
|
|||
// Package backoff contains an implementation of an intelligent backoff
|
||||
// strategy. It is based on the approach in the AWS architecture blog
|
||||
// article titled "Exponential Backoff And Jitter", which is found at
|
||||
// http://www.awsarchitectureblog.com/2015/03/backoff.html.
|
||||
//
|
||||
// Essentially, the backoff has an interval `time.Duration`; the nth
|
||||
// call to backoff will return a `time.Duration` that is 2^n *
|
||||
// interval. If jitter is enabled (which is the default behaviour),
|
||||
// the duration is a random value between 0 and 2^n * interval. The
|
||||
// backoff is configured with a maximum duration that will not be
|
||||
// exceeded.
|
||||
//
|
||||
// The `New` function will attempt to use the system's cryptographic
|
||||
// random number generator to seed a Go math/rand random number
|
||||
// source. If this fails, the package will panic on startup.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
mrand "math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var prngMu sync.Mutex
|
||||
var prng *mrand.Rand
|
||||
|
||||
// DefaultInterval is used when a Backoff is initialised with a
|
||||
// zero-value Interval.
|
||||
var DefaultInterval = 5 * time.Minute
|
||||
|
||||
// DefaultMaxDuration is maximum amount of time that the backoff will
|
||||
// delay for.
|
||||
var DefaultMaxDuration = 6 * time.Hour
|
||||
|
||||
// A Backoff contains the information needed to intelligently backoff
|
||||
// and retry operations using an exponential backoff algorithm. It should
|
||||
// be initialised with a call to `New`.
|
||||
//
|
||||
// Only use a Backoff from a single goroutine, it is not safe for concurrent
|
||||
// access.
|
||||
type Backoff struct {
|
||||
// maxDuration is the largest possible duration that can be
|
||||
// returned from a call to Duration.
|
||||
maxDuration time.Duration
|
||||
|
||||
// interval controls the time step for backing off.
|
||||
interval time.Duration
|
||||
|
||||
// noJitter controls whether to use the "Full Jitter"
|
||||
// improvement to attempt to smooth out spikes in a high
|
||||
// contention scenario. If noJitter is set to true, no
|
||||
// jitter will be introduced.
|
||||
noJitter bool
|
||||
|
||||
// decay controls the decay of n. If it is non-zero, n is
|
||||
// reset if more than the last backoff + decay has elapsed since
|
||||
// the last try.
|
||||
decay time.Duration
|
||||
|
||||
n uint64
|
||||
lastTry time.Time
|
||||
}
|
||||
|
||||
// New creates a new backoff with the specified max duration and
|
||||
// interval. Zero values may be used to use the default values.
|
||||
//
|
||||
// Panics if either max or interval is negative.
|
||||
func New(max time.Duration, interval time.Duration) *Backoff {
|
||||
if max < 0 || interval < 0 {
|
||||
panic("backoff: max or interval is negative")
|
||||
}
|
||||
|
||||
b := &Backoff{
|
||||
maxDuration: max,
|
||||
interval: interval,
|
||||
}
|
||||
b.setup()
|
||||
return b
|
||||
}
|
||||
|
||||
// NewWithoutJitter works similarly to New, except that the created
|
||||
// Backoff will not use jitter.
|
||||
func NewWithoutJitter(max time.Duration, interval time.Duration) *Backoff {
|
||||
b := New(max, interval)
|
||||
b.noJitter = true
|
||||
return b
|
||||
}
|
||||
|
||||
func init() {
|
||||
var buf [8]byte
|
||||
var n int64
|
||||
|
||||
_, err := io.ReadFull(rand.Reader, buf[:])
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
n = int64(binary.LittleEndian.Uint64(buf[:]))
|
||||
|
||||
src := mrand.NewSource(n)
|
||||
prng = mrand.New(src)
|
||||
}
|
||||
|
||||
func (b *Backoff) setup() {
|
||||
if b.interval == 0 {
|
||||
b.interval = DefaultInterval
|
||||
}
|
||||
|
||||
if b.maxDuration == 0 {
|
||||
b.maxDuration = DefaultMaxDuration
|
||||
}
|
||||
}
|
||||
|
||||
// Duration returns a time.Duration appropriate for the backoff,
|
||||
// incrementing the attempt counter.
|
||||
func (b *Backoff) Duration() time.Duration {
|
||||
b.setup()
|
||||
|
||||
b.decayN()
|
||||
|
||||
t := b.duration(b.n)
|
||||
|
||||
if b.n < math.MaxUint64 {
|
||||
b.n++
|
||||
}
|
||||
|
||||
if !b.noJitter {
|
||||
prngMu.Lock()
|
||||
t = time.Duration(prng.Int63n(int64(t)))
|
||||
prngMu.Unlock()
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// requires b to be locked.
|
||||
func (b *Backoff) duration(n uint64) (t time.Duration) {
|
||||
// Saturate pow
|
||||
pow := time.Duration(math.MaxInt64)
|
||||
if n < 63 {
|
||||
pow = 1 << n
|
||||
}
|
||||
|
||||
t = b.interval * pow
|
||||
if t/pow != b.interval || t > b.maxDuration {
|
||||
t = b.maxDuration
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the attempt counter of a backoff.
|
||||
//
|
||||
// It should be called when the rate-limited action succeeds.
|
||||
func (b *Backoff) Reset() {
|
||||
b.lastTry = time.Time{}
|
||||
b.n = 0
|
||||
}
|
||||
|
||||
// SetDecay sets the duration after which the try counter will be reset.
|
||||
// Panics if decay is smaller than 0.
|
||||
//
|
||||
// The decay only kicks in if at least the last backoff + decay has elapsed
|
||||
// since the last try.
|
||||
func (b *Backoff) SetDecay(decay time.Duration) {
|
||||
if decay < 0 {
|
||||
panic("backoff: decay < 0")
|
||||
}
|
||||
|
||||
b.decay = decay
|
||||
}
|
||||
|
||||
// requires b to be locked
|
||||
func (b *Backoff) decayN() {
|
||||
if b.decay == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if b.lastTry.IsZero() {
|
||||
b.lastTry = time.Now()
|
||||
return
|
||||
}
|
||||
|
||||
lastDuration := b.duration(b.n - 1)
|
||||
decayed := time.Since(b.lastTry) > lastDuration+b.decay
|
||||
b.lastTry = time.Now()
|
||||
|
||||
if !decayed {
|
||||
return
|
||||
}
|
||||
|
||||
b.n = 0
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// If given New with 0's and no jitter, ensure that certain invariants are met:
|
||||
//
|
||||
// - the default max duration and interval should be used
|
||||
// - noJitter should be true
|
||||
// - the RNG should not be initialised
|
||||
// - the first duration should be equal to the default interval
|
||||
func TestDefaults(t *testing.T) {
|
||||
b := NewWithoutJitter(0, 0)
|
||||
|
||||
if b.maxDuration != DefaultMaxDuration {
|
||||
t.Fatalf("expected new backoff to use the default max duration (%s), but have %s", DefaultMaxDuration, b.maxDuration)
|
||||
}
|
||||
|
||||
if b.interval != DefaultInterval {
|
||||
t.Fatalf("exepcted new backoff to use the default interval (%s), but have %s", DefaultInterval, b.interval)
|
||||
}
|
||||
|
||||
if b.noJitter != true {
|
||||
t.Fatal("backoff should have been initialised without jitter")
|
||||
}
|
||||
|
||||
dur := b.Duration()
|
||||
if dur != DefaultInterval {
|
||||
t.Fatalf("expected first duration to be %s, have %s", DefaultInterval, dur)
|
||||
}
|
||||
}
|
||||
|
||||
// Given a zero-value initialised Backoff, it should be transparently
|
||||
// setup.
|
||||
func TestSetup(t *testing.T) {
|
||||
b := new(Backoff)
|
||||
dur := b.Duration()
|
||||
if dur < 0 || dur > (5*time.Minute) {
|
||||
t.Fatalf("want duration between 0 and 5 minutes, have %s", dur)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that tries incremenets as expected.
|
||||
func TestTries(t *testing.T) {
|
||||
b := NewWithoutJitter(5, 1)
|
||||
|
||||
for i := uint64(0); i < 3; i++ {
|
||||
if b.n != i {
|
||||
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
|
||||
}
|
||||
|
||||
pow := 1 << i
|
||||
expected := time.Duration(pow)
|
||||
dur := b.Duration()
|
||||
if dur != expected {
|
||||
t.Fatalf("want duration=%d, have duration=%d at i=%d", expected, dur, i)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint(3); i < 5; i++ {
|
||||
dur := b.Duration()
|
||||
if dur != 5 {
|
||||
t.Fatalf("want duration=5, have %d at i=%d", dur, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a call to Reset will actually reset the Backoff.
|
||||
func TestReset(t *testing.T) {
|
||||
const iter = 10
|
||||
b := New(1000, 1)
|
||||
for i := 0; i < iter; i++ {
|
||||
_ = b.Duration()
|
||||
}
|
||||
|
||||
if b.n != iter {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||
}
|
||||
|
||||
b.Reset()
|
||||
if b.n != 0 {
|
||||
t.Fatalf("expected tries=0 after reset, have tries=%d", b.n)
|
||||
}
|
||||
}
|
||||
|
||||
const decay = 5 * time.Millisecond
|
||||
const max = 10 * time.Millisecond
|
||||
const interval = time.Millisecond
|
||||
|
||||
func TestDecay(t *testing.T) {
|
||||
const iter = 10
|
||||
|
||||
b := NewWithoutJitter(max, 1)
|
||||
b.SetDecay(decay)
|
||||
|
||||
var backoff time.Duration
|
||||
for i := 0; i < iter; i++ {
|
||||
backoff = b.Duration()
|
||||
}
|
||||
|
||||
if b.n != iter {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
|
||||
}
|
||||
|
||||
// Don't decay below backoff
|
||||
b.lastTry = time.Now().Add(-backoff + 1)
|
||||
backoff = b.Duration()
|
||||
if b.n != iter+1 {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", iter+1, b.n)
|
||||
}
|
||||
|
||||
// Reset after backoff + decay
|
||||
b.lastTry = time.Now().Add(-backoff - decay)
|
||||
b.Duration()
|
||||
if b.n != 1 {
|
||||
t.Fatalf("expected tries=%d, have tries=%d", 1, b.n)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that decay works even if the retry counter is saturated.
|
||||
func TestDecaySaturation(t *testing.T) {
|
||||
b := NewWithoutJitter(1<<2, 1)
|
||||
b.SetDecay(decay)
|
||||
|
||||
var duration time.Duration
|
||||
for i := 0; i <= 2; i++ {
|
||||
duration = b.Duration()
|
||||
}
|
||||
|
||||
if duration != 1<<2 {
|
||||
t.Fatalf("expected duration=%v, have duration=%v", 1<<2, duration)
|
||||
}
|
||||
|
||||
b.lastTry = time.Now().Add(-duration - decay)
|
||||
b.n = math.MaxUint64
|
||||
|
||||
duration = b.Duration()
|
||||
if duration != 1 {
|
||||
t.Errorf("expected duration=%v, have duration=%v", 1, duration)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBackoff_SetDecay() {
|
||||
b := NewWithoutJitter(max, interval)
|
||||
b.SetDecay(decay)
|
||||
|
||||
// try 0
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// try 1
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// try 2
|
||||
duration := b.Duration()
|
||||
fmt.Println(duration)
|
||||
|
||||
// try 3, below decay
|
||||
time.Sleep(duration)
|
||||
duration = b.Duration()
|
||||
fmt.Println(duration)
|
||||
|
||||
// try 4, resets
|
||||
time.Sleep(duration + decay)
|
||||
fmt.Println(b.Duration())
|
||||
|
||||
// Output: 1ms
|
||||
// 2ms
|
||||
// 4ms
|
||||
// 8ms
|
||||
// 1ms
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package certerr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrEmptyCertificate indicates that a certificate could not be processed
|
||||
// because there was no data to process.
|
||||
var ErrEmptyCertificate = errors.New("certlib: empty certificate")
|
||||
|
||||
type ErrorSourceType uint8
|
||||
|
||||
func (t ErrorSourceType) String() string {
|
||||
switch t {
|
||||
case ErrorSourceCertificate:
|
||||
return "certificate"
|
||||
case ErrorSourcePrivateKey:
|
||||
return "private key"
|
||||
case ErrorSourceCSR:
|
||||
return "CSR"
|
||||
case ErrorSourceSCTList:
|
||||
return "SCT list"
|
||||
case ErrorSourceKeypair:
|
||||
return "TLS keypair"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown error source %d", t))
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
ErrorSourceCertificate ErrorSourceType = 1
|
||||
ErrorSourcePrivateKey ErrorSourceType = 2
|
||||
ErrorSourceCSR ErrorSourceType = 3
|
||||
ErrorSourceSCTList ErrorSourceType = 4
|
||||
ErrorSourceKeypair ErrorSourceType = 5
|
||||
)
|
||||
|
||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
||||
// file, but saw another.
|
||||
type InvalidPEMType struct {
|
||||
have string
|
||||
want []string
|
||||
}
|
||||
|
||||
func (err *InvalidPEMType) Error() string {
|
||||
if len(err.want) == 1 {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||
} else {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
||||
func ErrInvalidPEMType(have string, want ...string) error {
|
||||
return &InvalidPEMType{
|
||||
have: have,
|
||||
want: want,
|
||||
}
|
||||
}
|
||||
|
||||
func LoadingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to load %s from disk: %w", t, err)
|
||||
}
|
||||
|
||||
func ParsingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to parse %s: %w", t, err)
|
||||
}
|
||||
|
||||
func DecodeError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to decode %s: %w", t, err)
|
||||
}
|
||||
|
||||
func VerifyError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to verify %s: %w", t, err)
|
||||
}
|
||||
|
||||
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
|
@ -0,0 +1,85 @@
|
|||
package certlib
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||
// byte slice.
|
||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
||||
if len(in) == 0 {
|
||||
err = certerr.ErrEmptyCertificate
|
||||
return
|
||||
}
|
||||
|
||||
if in[0] == '-' {
|
||||
p, remaining := pem.Decode(in)
|
||||
if p == nil {
|
||||
err = errors.New("certlib: invalid PEM file")
|
||||
return
|
||||
}
|
||||
|
||||
rest = remaining
|
||||
if p.Type != "CERTIFICATE" {
|
||||
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
|
||||
return
|
||||
}
|
||||
|
||||
in = p.Bytes
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(in)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadCertificates tries to read all the certificates in a
|
||||
// PEM-encoded collection.
|
||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
var cert *x509.Certificate
|
||||
for {
|
||||
cert, in, err = ReadCertificate(in)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert)
|
||||
if len(in) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return certs, err
|
||||
}
|
||||
|
||||
// LoadCertificate tries to read a single certificate from disk. If
|
||||
// the file contains multiple certificates (e.g. a chain), only the
|
||||
// first certificate is returned.
|
||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, _, err := ReadCertificate(in)
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// LoadCertificates tries to read all the certificates in a file,
|
||||
// returning them in the order that it found them in the file.
|
||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ReadCertificates(in)
|
||||
}
|
|
@ -1,10 +1,10 @@
|
|||
package lib
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kisom/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
)
|
||||
|
||||
// some CA certs I found on my computerbox.
|
|
@ -0,0 +1,75 @@
|
|||
package certlib
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, ECDSA, or Ed25519 DER-encoded
|
||||
// private key. The key must not be in PEM format. If an error is returned, it
|
||||
// may contain information about the private key, so care should be taken when
|
||||
// displaying it directly.
|
||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch generalKey := generalKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case ed25519.PrivateKey:
|
||||
return generalKey, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
package certlib
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
var errEd25519WrongID = errors.New("incorrect object identifier")
|
||||
var errEd25519WrongKeyType = errors.New("incorrect key type")
|
||||
|
||||
// ed25519OID is the OID for the Ed25519 signature scheme: see
|
||||
// https://datatracker.ietf.org/doc/draft-ietf-curdle-pkix-04.
|
||||
var ed25519OID = asn1.ObjectIdentifier{1, 3, 101, 112}
|
||||
|
||||
// subjectPublicKeyInfo reflects the ASN.1 object defined in the X.509 standard.
|
||||
//
|
||||
// This is defined in crypto/x509 as "publicKeyInfo".
|
||||
type subjectPublicKeyInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
PublicKey asn1.BitString
|
||||
}
|
||||
|
||||
// MarshalEd25519PublicKey creates a DER-encoded SubjectPublicKeyInfo for an
|
||||
// ed25519 public key, as defined in
|
||||
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. This is analogous to
|
||||
// MarshalPKIXPublicKey in crypto/x509, which doesn't currently support Ed25519.
|
||||
func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
||||
pub, ok := pk.(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
spki := subjectPublicKeyInfo{
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PublicKey: asn1.BitString{
|
||||
BitLength: len(pub) * 8,
|
||||
Bytes: pub,
|
||||
},
|
||||
}
|
||||
|
||||
return asn1.Marshal(spki)
|
||||
}
|
||||
|
||||
// ParseEd25519PublicKey returns the Ed25519 public key encoded by the input.
|
||||
func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
||||
var spki subjectPublicKeyInfo
|
||||
if rest, err := asn1.Unmarshal(der, &spki); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("SubjectPublicKeyInfo too long")
|
||||
}
|
||||
|
||||
if !spki.Algorithm.Algorithm.Equal(ed25519OID) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||
}
|
||||
|
||||
return ed25519.PublicKey(spki.PublicKey.Bytes), nil
|
||||
}
|
||||
|
||||
// oneAsymmetricKey reflects the ASN.1 structure for storing private keys in
|
||||
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04, excluding the optional
|
||||
// fields, which we don't use here.
|
||||
//
|
||||
// This is identical to pkcs8 in crypto/x509.
|
||||
type oneAsymmetricKey struct {
|
||||
Version int
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
// curvePrivateKey is the innter type of the PrivateKey field of
|
||||
// oneAsymmetricKey.
|
||||
type curvePrivateKey []byte
|
||||
|
||||
// MarshalEd25519PrivateKey returns a DER encoding of the input private key as
|
||||
// specified in https://tools.ietf.org/html/draft-ietf-curdle-pkix-04.
|
||||
func MarshalEd25519PrivateKey(sk crypto.PrivateKey) ([]byte, error) {
|
||||
priv, ok := sk.(ed25519.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
// Marshal the innter CurvePrivateKey.
|
||||
curvePrivateKey, err := asn1.Marshal(priv.Seed())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Marshal the OneAsymmetricKey.
|
||||
asym := oneAsymmetricKey{
|
||||
Version: 0,
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PrivateKey: curvePrivateKey,
|
||||
}
|
||||
return asn1.Marshal(asym)
|
||||
}
|
||||
|
||||
// ParseEd25519PrivateKey returns the Ed25519 private key encoded by the input.
|
||||
func ParseEd25519PrivateKey(der []byte) (crypto.PrivateKey, error) {
|
||||
asym := new(oneAsymmetricKey)
|
||||
if rest, err := asn1.Unmarshal(der, asym); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("OneAsymmetricKey too long")
|
||||
}
|
||||
|
||||
// Check that the key type is correct.
|
||||
if !asym.Algorithm.Algorithm.Equal(ed25519OID) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
// Unmarshal the inner CurvePrivateKey.
|
||||
seed := new(curvePrivateKey)
|
||||
if rest, err := asn1.Unmarshal(asym.PrivateKey, seed); err != nil {
|
||||
return nil, err
|
||||
} else if len(rest) > 0 {
|
||||
return nil, errors.New("CurvePrivateKey too long")
|
||||
}
|
||||
|
||||
return ed25519.NewKeyFromSeed(*seed), nil
|
||||
}
|
|
@ -0,0 +1,630 @@
|
|||
package certlib
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
|
||||
ct "github.com/google/certificate-transparency-go"
|
||||
cttls "github.com/google/certificate-transparency-go/tls"
|
||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
const OneYear = 8760 * time.Hour
|
||||
|
||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||
const OneDay = 24 * time.Hour
|
||||
|
||||
// DelegationUsage is the OID for the DelegationUseage extensions
|
||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||
|
||||
// DelegationExtension
|
||||
var DelegationExtension = pkix.Extension{
|
||||
Id: DelegationUsage,
|
||||
Critical: false,
|
||||
Value: []byte{0x05, 0x00}, // ASN.1 NULL
|
||||
}
|
||||
|
||||
// InclusiveDate returns the time.Time representation of a date - 1
|
||||
// nanosecond. This allows time.After to be used inclusively.
|
||||
func InclusiveDate(year int, month time.Month, day int) time.Time {
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||
}
|
||||
|
||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 5 years.
|
||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
||||
|
||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 39 months.
|
||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
||||
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
||||
func KeyLength(key interface{}) int {
|
||||
if key == nil {
|
||||
return 0
|
||||
}
|
||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
||||
return ecdsaKey.Curve.Params().BitSize
|
||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
||||
return rsaKey.N.BitLen()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExpiryTime returns the time when the certificate chain is expired.
|
||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
||||
if len(chain) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
notAfter = chain[0].NotAfter
|
||||
for _, cert := range chain {
|
||||
if notAfter.After(cert.NotAfter) {
|
||||
notAfter = cert.NotAfter
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MonthsValid returns the number of months for which a certificate is valid.
|
||||
func MonthsValid(c *x509.Certificate) int {
|
||||
issued := c.NotBefore
|
||||
expiry := c.NotAfter
|
||||
years := (expiry.Year() - issued.Year())
|
||||
months := years*12 + int(expiry.Month()) - int(issued.Month())
|
||||
|
||||
// Round up if valid for less than a full month
|
||||
if expiry.Day() > issued.Day() {
|
||||
months++
|
||||
}
|
||||
return months
|
||||
}
|
||||
|
||||
// ValidExpiry determines if a certificate is valid for an acceptable
|
||||
// length of time per the CA/Browser Forum baseline requirements.
|
||||
// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
|
||||
func ValidExpiry(c *x509.Certificate) bool {
|
||||
issued := c.NotBefore
|
||||
|
||||
var maxMonths int
|
||||
switch {
|
||||
case issued.After(Apr2015):
|
||||
maxMonths = 39
|
||||
case issued.After(Jul2012):
|
||||
maxMonths = 60
|
||||
case issued.Before(Jul2012):
|
||||
maxMonths = 120
|
||||
}
|
||||
|
||||
if MonthsValid(c) > maxMonths {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2WithRSA"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5WithRSA"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1WithRSA"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256WithRSA"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384WithRSA"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512WithRSA"
|
||||
case x509.DSAWithSHA1:
|
||||
return "DSAWithSHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "DSAWithSHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "ECDSAWithSHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "ECDSAWithSHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "ECDSAWithSHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "ECDSAWithSHA512"
|
||||
default:
|
||||
return "Unknown Signature"
|
||||
}
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512"
|
||||
case x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return "Unknown Hash Algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
// StringTLSVersion returns underlying enum values from human names for TLS
|
||||
// versions, defaults to current golang default of TLS 1.0
|
||||
func StringTLSVersion(version string) uint16 {
|
||||
switch version {
|
||||
case "1.2":
|
||||
return tls.VersionTLS12
|
||||
case "1.1":
|
||||
return tls.VersionTLS11
|
||||
default:
|
||||
return tls.VersionTLS10
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
|
||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||
var buffer bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
pem.Encode(&buffer, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
})
|
||||
}
|
||||
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM
|
||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||
}
|
||||
|
||||
// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
||||
var certs []*x509.Certificate
|
||||
var err error
|
||||
certsPEM = bytes.TrimSpace(certsPEM)
|
||||
for len(certsPEM) > 0 {
|
||||
var cert []*x509.Certificate
|
||||
cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
} else if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert...)
|
||||
}
|
||||
if len(certsPEM) > 0 {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||
// either PKCS #7, PKCS #12, or raw x509.
|
||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
||||
certsDER = bytes.TrimSpace(certsDER)
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
||||
if err != nil {
|
||||
var pkcs12data interface{}
|
||||
certs = make([]*x509.Certificate, 1)
|
||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
||||
if err != nil {
|
||||
certs, err = x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
} else {
|
||||
key = pkcs12data.(crypto.Signer)
|
||||
}
|
||||
} else {
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
|
||||
}
|
||||
certs = pkcs7data.Content.SignedData.Certificates
|
||||
}
|
||||
if certs == nil {
|
||||
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||
}
|
||||
return certs, key, nil
|
||||
}
|
||||
|
||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||
func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
cert, err := ParseCertificatePEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// ParseCertificatePEM parses and returns a PEM-encoded certificate,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
certPEM = bytes.TrimSpace(certPEM)
|
||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
} else if cert == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||
} else if len(rest) > 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
|
||||
} else if len(cert) > 1 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
||||
}
|
||||
return cert[0], nil
|
||||
}
|
||||
|
||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||
// either a raw x509 certificate or a PKCS #7 structure possibly containing
|
||||
// multiple certificates, from the top of certsPEM, which itself may
|
||||
// contain multiple PEM encoded certificate objects.
|
||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||
|
||||
block, rest := pem.Decode(certsPEM)
|
||||
if block == nil {
|
||||
return nil, rest, nil
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
|
||||
}
|
||||
certs := pkcs7data.Content.SignedData.Certificates
|
||||
if certs == nil {
|
||||
return nil, rest, errors.New("PKCS #7 structure contains no certificates")
|
||||
}
|
||||
return certs, rest, nil
|
||||
}
|
||||
var certs = []*x509.Certificate{cert}
|
||||
return certs, rest, nil
|
||||
}
|
||||
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
if certsFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
pemCerts, err := os.ReadFile(certsFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PEMToCertPool(pemCerts)
|
||||
}
|
||||
|
||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
if len(pemCerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||
return nil, errors.New("failed to load cert pool")
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParsePrivateKeyDER(keyDER)
|
||||
}
|
||||
|
||||
// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
|
||||
func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
||||
// Ignore any EC PARAMETERS blocks when looking for a key (openssl includes
|
||||
// them by default).
|
||||
var keyDER *pem.Block
|
||||
for {
|
||||
keyDER, in = pem.Decode(in)
|
||||
if keyDER == nil || keyDER.Type != "EC PARAMETERS" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if keyDER != nil {
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
||||
if strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
}
|
||||
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
}
|
||||
|
||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
||||
in = bytes.TrimSpace(in)
|
||||
p, rest := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
|
||||
}
|
||||
|
||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
||||
} else {
|
||||
csr, err = x509.ParseCertificateRequest(in)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
err = csr.CheckSignature()
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
// ParseCSRPEM parses a PEM-encoded certificate signing request.
|
||||
// It does not check the signature. This is useful for dumping data from a CSR
|
||||
// locally.
|
||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
block, _ := pem.Decode([]byte(csrPEM))
|
||||
if block == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||
}
|
||||
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return csrObject, nil
|
||||
}
|
||||
|
||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
switch pub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
bitLength := pub.N.BitLen()
|
||||
switch {
|
||||
case bitLength >= 4096:
|
||||
return x509.SHA512WithRSA
|
||||
case bitLength >= 3072:
|
||||
return x509.SHA384WithRSA
|
||||
case bitLength >= 2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P521():
|
||||
return x509.ECDSAWithSHA512
|
||||
case elliptic.P384():
|
||||
return x509.ECDSAWithSHA384
|
||||
case elliptic.P256():
|
||||
return x509.ECDSAWithSHA256
|
||||
default:
|
||||
return x509.ECDSAWithSHA1
|
||||
}
|
||||
default:
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// LoadClientCertificate load key/certificate from pem files
|
||||
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
||||
if certFile != "" && keyFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, certerr.LoadingError(certerr.ErrorSourceKeypair, err)
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config object from certs and roots
|
||||
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
||||
var certs []tls.Certificate
|
||||
if cert != nil {
|
||||
certs = []tls.Certificate{*cert}
|
||||
}
|
||||
return &tls.Config{
|
||||
Certificates: certs,
|
||||
RootCAs: remoteCAs,
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeSCTList serializes a list of SCTs.
|
||||
func SerializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
|
||||
list := ctx509.SignedCertificateTimestampList{}
|
||||
for _, sct := range sctList {
|
||||
sctBytes, err := cttls.Marshal(sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes})
|
||||
}
|
||||
return cttls.Marshal(list)
|
||||
}
|
||||
|
||||
// DeserializeSCTList deserializes a list of SCTs.
|
||||
func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) {
|
||||
var sctList ctx509.SignedCertificateTimestampList
|
||||
rest, err := cttls.Unmarshal(serializedSCTList, &sctList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
}
|
||||
|
||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||
for i, serializedSCT := range sctList.SCTList {
|
||||
var sct ct.SignedCertificateTimestamp
|
||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
}
|
||||
list[i] = sct
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// SCTListFromOCSPResponse extracts the SCTList from an ocsp.Response,
|
||||
// returning an empty list if the SCT extension was not found or could not be
|
||||
// unmarshalled.
|
||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||
// This loop finds the SCTListExtension in the OCSP response.
|
||||
var SCTListExtension, ext pkix.Extension
|
||||
for _, ext = range response.Extensions {
|
||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||
if ext.Id.Equal(sctExtOid) {
|
||||
SCTListExtension = ext
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// This code block extracts the sctList from the SCT extension.
|
||||
var sctList []ct.SignedCertificateTimestamp
|
||||
var err error
|
||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
||||
var serializedSCTList []byte
|
||||
rest := make([]byte, numBytes)
|
||||
copy(rest, SCTListExtension.Value)
|
||||
for len(rest) != 0 {
|
||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, err)
|
||||
}
|
||||
}
|
||||
sctList, err = DeserializeSCTList(serializedSCTList)
|
||||
}
|
||||
return sctList, err
|
||||
}
|
||||
|
||||
// ReadBytes reads a []byte either from a file or an environment variable.
|
||||
// If valFile has a prefix of 'env:', the []byte is read from the environment
|
||||
// using the subsequent name. If the prefix is 'file:' the []byte is read from
|
||||
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
||||
// file path.
|
||||
func ReadBytes(valFile string) ([]byte, error) {
|
||||
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
|
||||
case 1:
|
||||
return os.ReadFile(valFile)
|
||||
case 2:
|
||||
switch splitVal[0] {
|
||||
case "env":
|
||||
return []byte(os.Getenv(splitVal[1])), nil
|
||||
case "file":
|
||||
return os.ReadFile(splitVal[1])
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("multiple prefixes: %s",
|
||||
strings.Join(splitVal[:len(splitVal)-1], ", "))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,220 @@
|
|||
package pkcs7
|
||||
|
||||
// Originally from CFSSL, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
|
||||
// used to package certificates and CRLs. Using openssl, every certificate converted
|
||||
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
|
||||
// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html
|
||||
//
|
||||
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
|
||||
//
|
||||
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
|
||||
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
|
||||
// sent over a network and then verified and decrypted. It is asn1, and the type of
|
||||
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
|
||||
//
|
||||
// ContentInfo ::= SEQUENCE {
|
||||
// contentType ContentType,
|
||||
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
|
||||
// }
|
||||
//
|
||||
// There are 6 possible ContentTypes, data, signedData, envelopedData,
|
||||
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
|
||||
// Data are implemented, as the degenerate case of signedData without a signature is the typical
|
||||
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
|
||||
// formats.
|
||||
// The ContentType signedData has the form:
|
||||
//
|
||||
// signedData ::= SEQUENCE {
|
||||
// version Version,
|
||||
// digestAlgorithms DigestAlgorithmIdentifiers,
|
||||
// contentInfo ContentInfo,
|
||||
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
|
||||
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
|
||||
// signerInfos SignerInfos
|
||||
// }
|
||||
//
|
||||
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
|
||||
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
|
||||
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
|
||||
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
|
||||
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
|
||||
// of any number of extended certificates is not yet supported in this implementation.
|
||||
//
|
||||
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
|
||||
//
|
||||
// The ContentType encryptedData is the most complicated and its form can be gathered by
|
||||
// the go type below. It essentially contains a raw octet string of encrypted data and an
|
||||
// algorithm identifier for use in decrypting this data.
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// Types used for asn1 Unmarshaling.
|
||||
|
||||
type signedData struct {
|
||||
Version int
|
||||
DigestAlgorithms asn1.RawValue
|
||||
ContentInfo asn1.RawValue
|
||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
||||
Crls asn1.RawValue `asn1:"optional"`
|
||||
SignerInfos asn1.RawValue
|
||||
}
|
||||
|
||||
type initPKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
|
||||
}
|
||||
|
||||
// Object identifier strings of the three implemented PKCS7 types.
|
||||
const (
|
||||
ObjIDData = "1.2.840.113549.1.7.1"
|
||||
ObjIDSignedData = "1.2.840.113549.1.7.2"
|
||||
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
|
||||
)
|
||||
|
||||
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
|
||||
// possible types of Content objects, as denoted by the object identifier in
|
||||
// the ContentInfo field, the other two being nil. SignedData
|
||||
// is the degenerate SignedData Content info without signature used
|
||||
// to hold certificates and crls. Data is raw bytes, and EncryptedData
|
||||
// is as defined in PKCS #7 standard.
|
||||
type PKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentInfo string
|
||||
Content Content
|
||||
}
|
||||
|
||||
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
|
||||
type Content struct {
|
||||
Data []byte
|
||||
SignedData SignedData
|
||||
EncryptedData EncryptedData
|
||||
}
|
||||
|
||||
// SignedData defines the typical carrier of certificates and crls.
|
||||
type SignedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
Certificates []*x509.Certificate
|
||||
Crl *x509.RevocationList
|
||||
}
|
||||
|
||||
// Data contains raw bytes. Used as a subtype in PKCS12.
|
||||
type Data struct {
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
|
||||
type EncryptedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
EncryptedContentInfo EncryptedContentInfo
|
||||
}
|
||||
|
||||
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
|
||||
type EncryptedContentInfo struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||
// PKCS7 structure.
|
||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
||||
|
||||
var pkcs7 initPKCS7
|
||||
_, err = asn1.Unmarshal(raw, &pkcs7)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
|
||||
msg = new(PKCS7)
|
||||
msg.Raw = pkcs7.Raw
|
||||
msg.ContentInfo = pkcs7.ContentType.String()
|
||||
switch {
|
||||
case msg.ContentInfo == ObjIDData:
|
||||
msg.ContentInfo = "Data"
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
case msg.ContentInfo == ObjIDSignedData:
|
||||
msg.ContentInfo = "SignedData"
|
||||
var signedData signedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
if len(signedData.Certificates.Bytes) != 0 {
|
||||
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
}
|
||||
if len(signedData.Crls.Bytes) != 0 {
|
||||
msg.Content.SignedData.Crl, err = x509.ParseRevocationList(signedData.Crls.Bytes)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
}
|
||||
msg.Content.SignedData.Version = signedData.Version
|
||||
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
|
||||
case msg.ContentInfo == ObjIDEncryptedData:
|
||||
msg.ContentInfo = "EncryptedData"
|
||||
var encryptedData EncryptedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
if encryptedData.Version != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
|
||||
}
|
||||
msg.Content.EncryptedData = encryptedData
|
||||
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
// Package revoke provides functionality for checking the validity of
|
||||
// a cert. Specifically, the temporal validity of the certificate is
|
||||
// checked first, then any CRL and OCSP url in the cert is checked.
|
||||
package revoke
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/log"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// HTTPClient is an instance of http.Client that will be used for all HTTP requests.
|
||||
var HTTPClient = http.DefaultClient
|
||||
|
||||
// HardFail determines whether the failure to check the revocation
|
||||
// status of a certificate (i.e. due to network failure) causes
|
||||
// verification to fail (a hard failure).
|
||||
var HardFail = false
|
||||
|
||||
// CRLSet associates a PKIX certificate list with the URL the CRL is
|
||||
// fetched from.
|
||||
var CRLSet = map[string]*x509.RevocationList{}
|
||||
var crlLock = new(sync.Mutex)
|
||||
|
||||
// We can't handle LDAP certificates, so this checks to see if the
|
||||
// URL string points to an LDAP resource so that we can ignore it.
|
||||
func ldapURL(url string) bool {
|
||||
u, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
log.Warningf("error parsing url %s: %v", url, err)
|
||||
return false
|
||||
}
|
||||
if u.Scheme == "ldap" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// revCheck should check the certificate for any revocations. It
|
||||
// returns a pair of booleans: the first indicates whether the certificate
|
||||
// is revoked, the second indicates whether the revocations were
|
||||
// successfully checked.. This leads to the following combinations:
|
||||
//
|
||||
// - false, false: an error was encountered while checking revocations.
|
||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||
// - true, false: failure to check revocation status causes verification to fail
|
||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
for _, url := range cert.CRLDistributionPoints {
|
||||
if ldapURL(url) {
|
||||
log.Infof("skipping LDAP CRL: %s", url)
|
||||
continue
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
||||
log.Warning("error checking revocation via CRL")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
log.Info("certificate is revoked via CRL")
|
||||
return true, true, err
|
||||
}
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
||||
log.Warning("error checking revocation via OCSP")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
log.Info("certificate is revoked via OCSP")
|
||||
return true, true, err
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// fetchCRL fetches and parses a CRL.
|
||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 300 {
|
||||
return nil, errors.New("failed to retrieve CRL")
|
||||
}
|
||||
|
||||
body, err := crlRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseRevocationList(body)
|
||||
}
|
||||
|
||||
func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
||||
var issuer *x509.Certificate
|
||||
var err error
|
||||
for _, issuingCert := range cert.IssuingCertificateURL {
|
||||
issuer, err = fetchRemote(issuingCert)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return issuer
|
||||
|
||||
}
|
||||
|
||||
// check a cert against a specific CRL. Returns the same bool pair
|
||||
// as revCheck, plus an error if one occurred.
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
||||
crlLock.Lock()
|
||||
crl, ok := CRLSet[url]
|
||||
if ok && crl == nil {
|
||||
ok = false
|
||||
delete(CRLSet, url)
|
||||
}
|
||||
crlLock.Unlock()
|
||||
|
||||
var shouldFetchCRL = true
|
||||
if ok {
|
||||
if time.Now().After(crl.ThisUpdate) {
|
||||
shouldFetchCRL = false
|
||||
}
|
||||
}
|
||||
|
||||
issuer := getIssuer(cert)
|
||||
|
||||
if shouldFetchCRL {
|
||||
var err error
|
||||
crl, err = fetchCRL(url)
|
||||
if err != nil {
|
||||
log.Warningf("failed to fetch CRL: %v", err)
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// check CRL signature
|
||||
if issuer != nil {
|
||||
err = crl.CheckSignatureFrom(issuer)
|
||||
if err != nil {
|
||||
log.Warningf("failed to verify CRL: %v", err)
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
|
||||
crlLock.Lock()
|
||||
CRLSet[url] = crl
|
||||
crlLock.Unlock()
|
||||
}
|
||||
|
||||
for _, revoked := range crl.RevokedCertificates {
|
||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
||||
log.Info("Serial number match: intermediate is revoked.")
|
||||
return true, true, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, true, err
|
||||
}
|
||||
|
||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
||||
revoked, ok, _ = VerifyCertificateError(cert)
|
||||
return revoked, ok
|
||||
}
|
||||
|
||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
if !time.Now().Before(cert.NotAfter) {
|
||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||
log.Info(msg)
|
||||
return true, true, fmt.Errorf(msg)
|
||||
} else if !time.Now().After(cert.NotBefore) {
|
||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||
log.Info(msg)
|
||||
return true, true, fmt.Errorf(msg)
|
||||
}
|
||||
return revCheck(cert)
|
||||
}
|
||||
|
||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
in, err := remoteRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, _ := pem.Decode(in)
|
||||
if p != nil {
|
||||
return certlib.ParseCertificatePEM(in)
|
||||
}
|
||||
|
||||
return x509.ParseCertificate(in)
|
||||
}
|
||||
|
||||
var ocspOpts = ocsp.RequestOptions{
|
||||
Hash: crypto.SHA1,
|
||||
}
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
||||
var err error
|
||||
|
||||
ocspURLs := leaf.OCSPServer
|
||||
if len(ocspURLs) == 0 {
|
||||
// OCSP not enabled for this certificate.
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
issuer := getIssuer(leaf)
|
||||
|
||||
if issuer == nil {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||
if err != nil {
|
||||
return revoked, ok, err
|
||||
}
|
||||
|
||||
for _, server := range ocspURLs {
|
||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if err != nil {
|
||||
if strict {
|
||||
return revoked, ok, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// There wasn't an error fetching the OCSP status.
|
||||
ok = true
|
||||
|
||||
if resp.Status != ocsp.Good {
|
||||
// The certificate was revoked.
|
||||
revoked = true
|
||||
}
|
||||
|
||||
return revoked, ok, err
|
||||
}
|
||||
return revoked, ok, err
|
||||
}
|
||||
|
||||
// sendOCSPRequest attempts to request an OCSP response from the
|
||||
// server. The error only indicates a failure to *fetch* the
|
||||
// certificate, and *does not* mean the certificate is valid.
|
||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
if len(req) > 256 {
|
||||
buf := bytes.NewBuffer(req)
|
||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
||||
} else {
|
||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||
resp, err = HTTPClient.Get(reqURL)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New("failed to retrieve OSCP")
|
||||
}
|
||||
|
||||
body, err := ocspRead(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case bytes.Equal(body, ocsp.UnauthorizedErrorResponse):
|
||||
return nil, errors.New("OSCP unauthorized")
|
||||
case bytes.Equal(body, ocsp.MalformedRequestErrorResponse):
|
||||
return nil, errors.New("OSCP malformed")
|
||||
case bytes.Equal(body, ocsp.InternalErrorErrorResponse):
|
||||
return nil, errors.New("OSCP internal error")
|
||||
case bytes.Equal(body, ocsp.TryLaterErrorResponse):
|
||||
return nil, errors.New("OSCP try later")
|
||||
case bytes.Equal(body, ocsp.SigRequredErrorResponse):
|
||||
return nil, errors.New("OSCP signature required")
|
||||
}
|
||||
|
||||
return ocsp.ParseResponseForCert(body, leaf, issuer)
|
||||
}
|
||||
|
||||
var crlRead = io.ReadAll
|
||||
|
||||
// SetCRLFetcher sets the function to use to read from the http response body
|
||||
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
crlRead = fn
|
||||
}
|
||||
|
||||
var remoteRead = io.ReadAll
|
||||
|
||||
// SetRemoteFetcher sets the function to use to read from the http response body
|
||||
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
remoteRead = fn
|
||||
}
|
||||
|
||||
var ocspRead = io.ReadAll
|
||||
|
||||
// SetOCSPFetcher sets the function to use to read from the http response body
|
||||
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
ocspRead = fn
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
package revoke
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Originally from CFSSL, mostly written by me originally, and licensed under:
|
||||
|
||||
/*
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// I've modified it for use in my own code e.g. by removing the CFSSL errors
|
||||
// and replacing them with sane ones.
|
||||
|
||||
// The first three test cases represent known revoked, expired, and good
|
||||
// certificates that were checked on the date listed in the log. The
|
||||
// good certificate will eventually need to be replaced in year 2029.
|
||||
|
||||
// If there is a soft-fail, the test will pass to mimic the default
|
||||
// behaviour used in this software. However, it will print a warning
|
||||
// to indicate that this is the case.
|
||||
|
||||
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
|
||||
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
||||
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
||||
cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
|
||||
b2JhbCBSb290MB4XDTA3MDQwNDE0MTUxNFoXDTE0MDQwNDE0MTQyMFowejELMAkG
|
||||
A1UEBhMCSVQxFzAVBgNVBAoTDkFjdGFsaXMgUy5wLkEuMScwJQYDVQQLEx5DZXJ0
|
||||
aWZpY2F0aW9uIFNlcnZpY2UgUHJvdmlkZXIxKTAnBgNVBAMTIEFjdGFsaXMgU2Vy
|
||||
dmVyIEF1dGhlbnRpY2F0aW9uIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAv6P0bhXbUQkVW8ox0HJ+sP5+j6pTwS7yg/wGEUektB/G1duQiT1v21fo
|
||||
LANr6F353jILQDCpHIfal3MhbSsHEMKU7XaqsyLWV93bcIKbIloS/eXDfkog6KB3
|
||||
u0JHgrtNz584Jg/OLm9feffNbCJ38TiLo0/UWkAQ6PQWaOwZEgyKjVI5F3swoTB3
|
||||
g0LZAzegvkU00Kfp13cSg+cJeU4SajwtfQ+g6s6dlaekaHy/0ef46PfiHHRuhEhE
|
||||
JWIpDtUN2ywTT33MSSUe5glDIiXYfcamJQrebzGsHEwyqI195Yaxb+FLNND4n3HM
|
||||
e7EI2OrLyT+r/WMvQbl+xNihwtv+HwIDAQABo4IBbzCCAWswEgYDVR0TAQH/BAgw
|
||||
BgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1o
|
||||
dHRwOi8vd3d3LnB1YmxpYy10cnVzdC5jb20vQ1BTL09tbmlSb290Lmh0bWwwDgYD
|
||||
VR0PAQH/BAQDAgEGMIGJBgNVHSMEgYEwf6F5pHcwdTELMAkGA1UEBhMCVVMxGDAW
|
||||
BgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3Qg
|
||||
U29sdXRpb25zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwg
|
||||
Um9vdIICAaUwRQYDVR0fBD4wPDA6oDigNoY0aHR0cDovL3d3dy5wdWJsaWMtdHJ1
|
||||
c3QuY29tL2NnaS1iaW4vQ1JMLzIwMTgvY2RwLmNybDAdBgNVHQ4EFgQUpi6OuXYt
|
||||
oxHC3cTezVLuraWpAFEwDQYJKoZIhvcNAQEFBQADgYEAAtjJBwjsvw7DBs+v7BQz
|
||||
gSGeg6nbYUuPL7+1driT5XsUKJ7WZjiwW2zW/WHZ+zGo1Ev8Dc574RpSrg/EIlfH
|
||||
TpBiBuFgiKtJksKdoxPZGSI8FitwcgeW+y8wotmm0CtDzWN27g2kfSqHb5eHfZY5
|
||||
sESPRwHkcMUNdAp37FLweUw=
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
||||
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
|
||||
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
||||
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
||||
Um9vdFNpZ24gUGFydG5lcnMgQ0ExGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
|
||||
CzAJBgNVBAYTAkJFMB4XDTA4MDMxODEyMDAwMFoXDTE4MDMxODEyMDAwMFowJTEj
|
||||
MCEGA1UEAxMaTW9iaWxlIEFybW9yIEVudGVycHJpc2UgQ0EwggEiMA0GCSqGSIb3
|
||||
DQEBAQUAA4IBDwAwggEKAoIBAQCaEjeDR73jSZVlacRn5bc5VIPdyouHvGIBUxyS
|
||||
C6483HgoDlWrWlkEndUYFjRPiQqJFthdJxfglykXD+btHixMIYbz/6eb7hRTdT9w
|
||||
HKsfH+wTBIdb5AZiNjkg3QcCET5HfanJhpREjZWP513jM/GSrG3VwD6X5yttCIH1
|
||||
NFTDAr7aqpW/UPw4gcPfkwS92HPdIkb2DYnsqRrnKyNValVItkxJiotQ1HOO3YfX
|
||||
ivGrHIbJdWYg0rZnkPOgYF0d+aIA4ZfwvdW48+r/cxvLevieuKj5CTBZZ8XrFt8r
|
||||
JTZhZljbZvnvq/t6ZIzlwOj082f+lTssr1fJ3JsIPnG2lmgTAgMBAAGjgfcwgfQw
|
||||
DgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFIZw
|
||||
ns4uzXdLX6xDRXUzFgZxWM7oME0GA1UdIARGMEQwQgYJKwYBBAGgMgE8MDUwMwYI
|
||||
KwYBBQUHAgIwJxolaHR0cDovL3d3dy5nbG9iYWxzaWduLmNvbS9yZXBvc2l0b3J5
|
||||
LzA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmdsb2JhbHNpZ24ubmV0L1Jv
|
||||
b3RTaWduUGFydG5lcnMuY3JsMB8GA1UdIwQYMBaAFFaE7LVxpedj2NtRBNb65vBI
|
||||
UknOMA0GCSqGSIb3DQEBBQUAA4IBAQBZvf+2xUJE0ekxuNk30kPDj+5u9oI3jZyM
|
||||
wvhKcs7AuRAbcxPtSOnVGNYl8By7DPvPun+U3Yci8540y143RgD+kz3jxIBaoW/o
|
||||
c4+X61v6DBUtcBPEt+KkV6HIsZ61SZmc/Y1I2eoeEt6JYoLjEZMDLLvc1cK/+wpg
|
||||
dUZSK4O9kjvIXqvsqIOlkmh/6puSugTNao2A7EIQr8ut0ZmzKzMyZ0BuQhJDnAPd
|
||||
Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
|
||||
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
|
||||
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
||||
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
||||
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
||||
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
|
||||
BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTQwMjEy
|
||||
MDAwMDAwWhcNMjkwMjExMjM1OTU5WjCBkDELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
|
||||
EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
|
||||
Q09NT0RPIENBIExpbWl0ZWQxNjA0BgNVBAMTLUNPTU9ETyBSU0EgRG9tYWluIFZh
|
||||
bGlkYXRpb24gU2VjdXJlIFNlcnZlciBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAI7CAhnhoFmk6zg1jSz9AdDTScBkxwtiBUUWOqigwAwCfx3M28Sh
|
||||
bXcDow+G+eMGnD4LgYqbSRutA776S9uMIO3Vzl5ljj4Nr0zCsLdFXlIvNN5IJGS0
|
||||
Qa4Al/e+Z96e0HqnU4A7fK31llVvl0cKfIWLIpeNs4TgllfQcBhglo/uLQeTnaG6
|
||||
ytHNe+nEKpooIZFNb5JPJaXyejXdJtxGpdCsWTWM/06RQ1A/WZMebFEh7lgUq/51
|
||||
UHg+TLAchhP6a5i84DuUHoVS3AOTJBhuyydRReZw3iVDpA3hSqXttn7IzW3uLh0n
|
||||
c13cRTCAquOyQQuvvUSH2rnlG51/ruWFgqUCAwEAAaOCAWUwggFhMB8GA1UdIwQY
|
||||
MBaAFLuvfgI9+qbxPISOre44mOzZMjLUMB0GA1UdDgQWBBSQr2o6lFoL2JDqElZz
|
||||
30O0Oija5zAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNV
|
||||
HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgG
|
||||
BmeBDAECATBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8vY3JsLmNvbW9kb2NhLmNv
|
||||
bS9DT01PRE9SU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDBxBggrBgEFBQcB
|
||||
AQRlMGMwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29tb2RvY2EuY29tL0NPTU9E
|
||||
T1JTQUFkZFRydXN0Q0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21v
|
||||
ZG9jYS5jb20wDQYJKoZIhvcNAQEMBQADggIBAE4rdk+SHGI2ibp3wScF9BzWRJ2p
|
||||
mj6q1WZmAT7qSeaiNbz69t2Vjpk1mA42GHWx3d1Qcnyu3HeIzg/3kCDKo2cuH1Z/
|
||||
e+FE6kKVxF0NAVBGFfKBiVlsit2M8RKhjTpCipj4SzR7JzsItG8kO3KdY3RYPBps
|
||||
P0/HEZrIqPW1N+8QRcZs2eBelSaz662jue5/DJpmNXMyYE7l3YphLG5SEXdoltMY
|
||||
dVEVABt0iN3hxzgEQyjpFv3ZBdRdRydg1vs4O2xyopT4Qhrf7W8GjEXCBgCq5Ojc
|
||||
2bXhc3js9iPc0d1sjhqPpepUfJa3w/5Vjo1JXvxku88+vZbrac2/4EjxYoIQ5QxG
|
||||
V/Iz2tDIY+3GH5QFlkoakdH368+PUq4NCNk+qKBR6cGHdNXJ93SrLlP7u3r7l+L4
|
||||
HyaPs9Kg4DdbKDsx5Q5XLVq4rXmsXiBmGqW5prU5wfWYQ//u+aen/e7KJD2AFsQX
|
||||
j4rBYKEMrltDR5FL1ZoXX/nUh8HCjLfn4g8wGTeGrODcQgPmlKidrv0PJFGUzpII
|
||||
0fxQ8ANAe4hZ7Q7drNJ3gjTcBpUC2JD5Leo31Rpg0Gcg19hCC0Wvgmje3WYkN5Ap
|
||||
lBlGGSW4gNfL1IYoakRwJiNiqZ+Gb7+6kHDSVneFeO/qJakXzlByjAA6quPbYzSf
|
||||
+AZxAeKCINT+b72x
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
var goodCert = mustParse(goodComodoCA)
|
||||
|
||||
func mustParse(pemData string) *x509.Certificate {
|
||||
block, _ := pem.Decode([]byte(pemData))
|
||||
if block == nil {
|
||||
panic("Invalid PEM data.")
|
||||
} else if block.Type != "CERTIFICATE" {
|
||||
panic("Invalid PEM type.")
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return cert
|
||||
}
|
||||
|
||||
func TestRevoked(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(revokedCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if !revoked {
|
||||
t.Fatalf("revoked certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpired(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(expiredCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if !revoked {
|
||||
t.Fatalf("expired certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
if revoked, ok := VerifyCertificate(goodCert); !ok {
|
||||
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
|
||||
} else if revoked {
|
||||
t.Fatalf("good certificate should not have been marked as revoked")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLdap(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints = append(ldapCert.CRLDistributionPoints, "ldap://myldap.example.com")
|
||||
if revoked, ok := VerifyCertificate(ldapCert); revoked || !ok {
|
||||
t.Fatalf("ldap certificate should have been recognized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLdapURLErr(t *testing.T) {
|
||||
if ldapURL(":") {
|
||||
t.Fatalf("bad url does not cause error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertNotYetValid(t *testing.T) {
|
||||
notReadyCert := expiredCert
|
||||
notReadyCert.NotBefore = time.Date(3000, time.January, 1, 1, 1, 1, 1, time.Local)
|
||||
notReadyCert.NotAfter = time.Date(3005, time.January, 1, 1, 1, 1, 1, time.Local)
|
||||
if revoked, _ := VerifyCertificate(expiredCert); !revoked {
|
||||
t.Fatalf("not yet verified certificate should have been marked as revoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRLFetchError(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints[0] = ""
|
||||
if revoked, ok := VerifyCertificate(ldapCert); ok || revoked {
|
||||
t.Fatalf("Fetching error not encountered")
|
||||
}
|
||||
HardFail = true
|
||||
if revoked, ok := VerifyCertificate(ldapCert); ok || !revoked {
|
||||
t.Fatalf("Fetching error not encountered, hardfail not registered")
|
||||
}
|
||||
HardFail = false
|
||||
}
|
||||
|
||||
func TestBadCRLSet(t *testing.T) {
|
||||
ldapCert := mustParse(goodComodoCA)
|
||||
ldapCert.CRLDistributionPoints[0] = ""
|
||||
CRLSet[""] = nil
|
||||
certIsRevokedCRL(ldapCert, "")
|
||||
if _, ok := CRLSet[""]; ok {
|
||||
t.Fatalf("key emptystring should be deleted from CRLSet")
|
||||
}
|
||||
delete(CRLSet, "")
|
||||
|
||||
}
|
||||
|
||||
func TestCachedCRLSet(t *testing.T) {
|
||||
VerifyCertificate(goodCert)
|
||||
if revoked, ok := VerifyCertificate(goodCert); !ok || revoked {
|
||||
t.Fatalf("Previously fetched CRL's should be read smoothly and unrevoked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteFetchError(t *testing.T) {
|
||||
|
||||
badurl := ":"
|
||||
|
||||
if _, err := fetchRemote(badurl); err == nil {
|
||||
t.Fatalf("fetching bad url should result in non-nil error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNoOCSPServers(t *testing.T) {
|
||||
badIssuer := goodCert
|
||||
badIssuer.IssuingCertificateURL = []string{" "}
|
||||
certIsRevokedOCSP(badIssuer, true)
|
||||
noOCSPCert := goodCert
|
||||
noOCSPCert.OCSPServer = make([]string, 0)
|
||||
if revoked, ok, _ := certIsRevokedOCSP(noOCSPCert, true); revoked || !ok {
|
||||
t.Fatalf("OCSP falsely registered as enabled for this certificate")
|
||||
}
|
||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
var hasPort = regexp.MustCompile(`:\d+$`)
|
||||
|
|
|
@ -12,12 +12,13 @@ import (
|
|||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func certPublic(cert *x509.Certificate) string {
|
||||
|
@ -109,6 +110,14 @@ func showBasicConstraints(cert *x509.Certificate) {
|
|||
|
||||
if cert.IsCA {
|
||||
fmt.Printf(", is a CA certificate")
|
||||
if !cert.BasicConstraintsValid {
|
||||
fmt.Printf(" (basic constraint failure)")
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("is not a CA certificate")
|
||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||
fmt.Printf(" (key encipherment usage enabled!)")
|
||||
}
|
||||
}
|
||||
|
||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||
|
@ -208,17 +217,17 @@ func displayCert(cert *x509.Certificate) {
|
|||
}
|
||||
|
||||
func displayAllCerts(in []byte, leafOnly bool) {
|
||||
certs, err := helpers.ParseCertificatesPEM(in)
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
certs, _, err = helpers.ParseCertificatesDER(in, "")
|
||||
certs, _, err = certlib.ParseCertificatesDER(in, "")
|
||||
if err != nil {
|
||||
Warn(TranslateCFSSLError(err), "failed to parse certificates")
|
||||
lib.Warn(err, "failed to parse certificates")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(certs) == 0 {
|
||||
Warnx("no certificates found")
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -236,7 +245,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
|||
ci := getConnInfo(uri)
|
||||
conn, err := tls.Dial("tcp", ci.Addr, permissiveConfig())
|
||||
if err != nil {
|
||||
Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
lib.Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
@ -252,11 +261,11 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
|||
}
|
||||
conn.Close()
|
||||
} else {
|
||||
Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
lib.Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
}
|
||||
|
||||
if len(state.PeerCertificates) == 0 {
|
||||
Warnx("no certificates found")
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -266,7 +275,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
|
|||
}
|
||||
|
||||
if len(state.VerifiedChains) == 0 {
|
||||
Warnx("no verified chains found; using peer chain")
|
||||
lib.Warnx("no verified chains found; using peer chain")
|
||||
for i := range state.PeerCertificates {
|
||||
displayCert(state.PeerCertificates[i])
|
||||
}
|
||||
|
@ -289,9 +298,9 @@ func main() {
|
|||
flag.Parse()
|
||||
|
||||
if flag.NArg() == 0 || (flag.NArg() == 1 && flag.Arg(0) == "-") {
|
||||
certs, err := ioutil.ReadAll(os.Stdin)
|
||||
certs, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
Warn(err, "couldn't read certificates from standard input")
|
||||
lib.Warn(err, "couldn't read certificates from standard input")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -306,9 +315,9 @@ func main() {
|
|||
if strings.HasPrefix(filename, "https://") {
|
||||
displayAllCertsWeb(filename, leafOnly)
|
||||
} else {
|
||||
in, err := ioutil.ReadFile(filename)
|
||||
in, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
Warn(err, "couldn't read certificate")
|
||||
lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -3,13 +3,10 @@ package main
|
|||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
|
@ -89,34 +86,6 @@ func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
|||
}
|
||||
}
|
||||
|
||||
// TranslateCFSSLError turns a CFSSL error into a more readable string.
|
||||
func TranslateCFSSLError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// printing errors as json is terrible
|
||||
if cfsslError, ok := err.(*cferr.Error); ok {
|
||||
err = errors.New(cfsslError.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Warnx displays a formatted error message to standard error, à la
|
||||
// warnx(3).
|
||||
func Warnx(format string, a ...interface{}) (int, error) {
|
||||
format += "\n"
|
||||
return fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
// Warn displays a formatted error message to standard output,
|
||||
// appending the error string, à la warn(3).
|
||||
func Warn(err error, format string, a ...interface{}) (int, error) {
|
||||
format += ": %v\n"
|
||||
a = append(a, err)
|
||||
return fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
|
|
|
@ -10,9 +10,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
var warnOnly bool
|
||||
|
@ -87,7 +87,7 @@ func main() {
|
|||
continue
|
||||
}
|
||||
|
||||
certs, err := helpers.ParseCertificatesPEM(in)
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
lib.Warn(err, "while parsing certificates")
|
||||
continue
|
||||
|
|
|
@ -8,14 +8,14 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/revoke"
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func printRevocation(cert *x509.Certificate) {
|
||||
remaining := cert.NotAfter.Sub(time.Now())
|
||||
remaining := time.Until(cert.NotAfter)
|
||||
fmt.Printf("certificate expires in %s.\n", lib.Duration(remaining))
|
||||
|
||||
revoked, ok := revoke.VerifyCertificate(cert)
|
||||
|
@ -47,7 +47,7 @@ func main() {
|
|||
if verbose {
|
||||
fmt.Println("[+] loading root certificates from", caFile)
|
||||
}
|
||||
roots, err = helpers.LoadPEMCertPool(caFile)
|
||||
roots, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ func main() {
|
|||
if verbose {
|
||||
fmt.Println("[+] loading intermediate certificates from", intFile)
|
||||
}
|
||||
ints, err = helpers.LoadPEMCertPool(caFile)
|
||||
ints, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
} else {
|
||||
ints = x509.NewCertPool()
|
||||
|
@ -71,7 +71,7 @@ func main() {
|
|||
fileData, err := ioutil.ReadFile(flag.Arg(0))
|
||||
die.If(err)
|
||||
|
||||
chain, err := helpers.ParseCertificatesPEM(fileData)
|
||||
chain, err := certlib.ParseCertificatesPEM(fileData)
|
||||
die.If(err)
|
||||
if verbose {
|
||||
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
|
|
|
@ -11,7 +11,8 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -56,7 +57,7 @@ func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
|||
}
|
||||
filePath := filepath.Clean(filepath.Join(top, hdr.Name))
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
case tar.TypeReg:
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -92,6 +93,17 @@ func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
|||
return err
|
||||
}
|
||||
case tar.TypeSymlink:
|
||||
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
||||
return fmt.Errorf("symlink %s is outside the top-level %s",
|
||||
hdr.Linkname, top)
|
||||
}
|
||||
path := linkTarget(hdr.Linkname, top)
|
||||
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
||||
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
data_sync
|
||||
|
||||
This is a tool I wrote primarily to sync my home directory to a backup
|
||||
drive plugged into my laptop. This system is provisioned by Ansible,
|
||||
and the goal is to be able to just copy my home directory back in the
|
||||
event of a failure without having lost a great deal of work or to wait
|
||||
for ansible to finish installing the right backup software. Specifically,
|
||||
I use a Framework laptop with the 1TB storage module, encrypted with
|
||||
LUKS, and run this twice daily (timed to correspond with my commute,
|
||||
though that's not really necessary). It started off as a shell script,
|
||||
then I decided to just write it as a program.
|
||||
|
||||
Usage: data_sync [-d path] [-l level] [-m path] [-nqsv]
|
||||
[-t path]
|
||||
-d path path to sync source directory
|
||||
(default "~")
|
||||
-l level log level to output (default "INFO"). Valid log
|
||||
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||
CRIT, ALERT, EMERG. The default is INFO.
|
||||
-m path path to sync mount directory
|
||||
(default "/media/$USER/$(hostname -s)_data")
|
||||
-n dry-run mode: only check paths and print files to
|
||||
exclude
|
||||
-q suppress console output
|
||||
-s suppress syslog output
|
||||
-t path path to sync target directory
|
||||
(default "/media/$USER/$(hostname -s)_data/$USER")
|
||||
-v verbose rsync output
|
||||
|
||||
data_sync rsyncs the tree at the sync source directory (-d) to the sync target
|
||||
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||
target directory must exist on the mount directory.
|
|
@ -0,0 +1,230 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/config"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
"git.wntrmute.dev/kyle/goutils/log"
|
||||
)
|
||||
|
||||
func mustHostname() string {
|
||||
hostname, err := os.Hostname()
|
||||
log.FatalError(err, "couldn't retrieve hostname")
|
||||
|
||||
if hostname == "" {
|
||||
log.Fatal("no hostname returned")
|
||||
}
|
||||
return strings.Split(hostname, ".")[0]
|
||||
}
|
||||
|
||||
var (
|
||||
defaultDataDir = mustHostname() + "_data"
|
||||
defaultProgName = defaultDataDir + "_sync"
|
||||
defaultMountDir = filepath.Join("/media", os.Getenv("USER"), defaultDataDir)
|
||||
defaultSyncDir = os.Getenv("HOME")
|
||||
defaultTargetDir = filepath.Join(defaultMountDir, os.Getenv("USER"))
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
prog := filepath.Base(os.Args[0])
|
||||
fmt.Fprintf(w, `Usage: %s [-d path] [-l level] [-m path] [-nqsv]
|
||||
[-t path]
|
||||
-d path path to sync source directory
|
||||
(default "%s")
|
||||
-l level log level to output (default "INFO"). Valid log
|
||||
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
|
||||
CRIT, ALERT, EMERG. The default is INFO.
|
||||
-m path path to sync mount directory
|
||||
(default "%s")
|
||||
-n dry-run mode: only check paths and print files to
|
||||
exclude
|
||||
-q suppress console output
|
||||
-s suppress syslog output
|
||||
-t path path to sync target directory
|
||||
(default "%s")
|
||||
-v verbose rsync output
|
||||
|
||||
%s rsyncs the tree at the sync source directory (-d) to the sync target
|
||||
directory (-t); it checks the mount directory (-m) exists; the sync target
|
||||
target directory must exist on the mount directory.
|
||||
|
||||
`, prog, defaultSyncDir, defaultMountDir, defaultTargetDir, prog)
|
||||
}
|
||||
|
||||
func checkPaths(mount, target string, dryRun bool) error {
|
||||
if !fileutil.DirectoryDoesExist(mount) {
|
||||
return fmt.Errorf("sync dir %s isn't mounted", mount)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(target, mount) {
|
||||
return fmt.Errorf("target dir %s must exist in %s", target, mount)
|
||||
}
|
||||
|
||||
if !fileutil.DirectoryDoesExist(target) {
|
||||
if dryRun {
|
||||
log.Infof("would mkdir %s", target)
|
||||
} else {
|
||||
log.Infof("mkdir %s", target)
|
||||
if err := os.Mkdir(target, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildExcludes(syncDir string) ([]string, error) {
|
||||
var excluded []string
|
||||
|
||||
walker := func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
if info != nil && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode().IsRegular() {
|
||||
if err = fileutil.Access(path, fileutil.AccessRead); err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
}
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if err = fileutil.Access(path, fileutil.AccessExec); err != nil {
|
||||
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(syncDir, walker)
|
||||
return excluded, err
|
||||
}
|
||||
|
||||
func writeExcludes(excluded []string) (string, error) {
|
||||
if len(excluded) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
excludeFile, err := os.CreateTemp("", defaultProgName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, name := range excluded {
|
||||
fmt.Fprintln(excludeFile, name)
|
||||
}
|
||||
|
||||
defer excludeFile.Close()
|
||||
return excludeFile.Name(), nil
|
||||
}
|
||||
|
||||
func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
|
||||
var args []string
|
||||
|
||||
if excludeFile != "" {
|
||||
args = append(args, "--exclude-from")
|
||||
args = append(args, excludeFile)
|
||||
}
|
||||
|
||||
if verboseRsync {
|
||||
args = append(args, "--progress")
|
||||
args = append(args, "-v")
|
||||
}
|
||||
|
||||
args = append(args, []string{"-au", syncDir + "/", target + "/"}...)
|
||||
|
||||
path, err := exec.LookPath("rsync")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Usage = func() { usage(os.Stderr) }
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
var logLevel, mountDir, syncDir, target string
|
||||
var dryRun, quietMode, noSyslog, verboseRsync bool
|
||||
|
||||
flag.StringVar(&syncDir, "d", config.GetDefault("sync_dir", defaultSyncDir),
|
||||
"`path to sync source directory`")
|
||||
flag.StringVar(&logLevel, "l", config.GetDefault("log_level", "INFO"),
|
||||
"log level to output")
|
||||
flag.StringVar(&mountDir, "m", config.GetDefault("mount_dir", defaultMountDir),
|
||||
"`path` to sync mount directory")
|
||||
flag.BoolVar(&dryRun, "n", false, "dry-run mode: only check paths and print files to exclude")
|
||||
flag.BoolVar(&quietMode, "q", quietMode, "suppress console output")
|
||||
flag.BoolVar(&noSyslog, "s", noSyslog, "suppress syslog output")
|
||||
flag.StringVar(&target, "t", config.GetDefault("sync_target", defaultTargetDir),
|
||||
"`path` to sync target directory")
|
||||
flag.BoolVar(&verboseRsync, "v", false, "verbose rsync output")
|
||||
flag.Parse()
|
||||
|
||||
if quietMode && noSyslog {
|
||||
fmt.Fprintln(os.Stderr, "both console and syslog output are suppressed")
|
||||
fmt.Fprintln(os.Stderr, "errors will NOT be reported")
|
||||
}
|
||||
|
||||
logOpts := &log.Options{
|
||||
Level: logLevel,
|
||||
Tag: defaultProgName,
|
||||
Facility: "user",
|
||||
WriteSyslog: !noSyslog,
|
||||
WriteConsole: !quietMode,
|
||||
}
|
||||
err := log.Setup(logOpts)
|
||||
log.FatalError(err, "failed to set up logging")
|
||||
|
||||
log.Infof("checking paths: mount=%s, target=%s", mountDir, target)
|
||||
err = checkPaths(mountDir, target, dryRun)
|
||||
log.FatalError(err, "target dir isn't ready")
|
||||
|
||||
log.Infof("checking for files to exclude from %s", syncDir)
|
||||
excluded, err := buildExcludes(syncDir)
|
||||
log.FatalError(err, "couldn't build excludes")
|
||||
|
||||
if dryRun {
|
||||
fmt.Println("excluded files:")
|
||||
for _, path := range excluded {
|
||||
fmt.Printf("\t%s\n", path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
excludeFile, err := writeExcludes(excluded)
|
||||
log.FatalError(err, "couldn't write exclude file")
|
||||
log.Infof("excluding %d files via %s", len(excluded), excludeFile)
|
||||
|
||||
if excludeFile != "" {
|
||||
defer func() {
|
||||
log.Infof("removing exclude file %s", excludeFile)
|
||||
if err := os.Remove(excludeFile); err != nil {
|
||||
log.Warningf("failed to remove temp file %s", excludeFile)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = rsync(syncDir, target, excludeFile, verboseRsync)
|
||||
log.FatalError(err, "couldn't sync data")
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
diskimg: write disk images
|
||||
|
||||
Usage:
|
||||
diskimg [-a algo] [-v] image device
|
||||
|
||||
Flags:
|
||||
-a algo Select the hashing algorithm to use. The default
|
||||
is 'sha256'. Specifying an algorithm of 'list'
|
||||
will print the supported algorithms to standard
|
||||
output and exit with error code 2.
|
||||
-v Enable verbose (debug) output.
|
||||
|
||||
Examples:
|
||||
|
||||
Copying images/server.img to /dev/sda:
|
||||
|
||||
$ sudo diskimg images/server.img /dev/sda
|
||||
|
||||
Write a bladerunner node image to /dev/sda:
|
||||
|
||||
$ sudo diskimg -v ~/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img /dev/sda
|
||||
opening image /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img for read
|
||||
/home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img 416d4c8f890904167419e3d488d097e9c847273376b650546fdb1f6f9809c184
|
||||
opening device /dev/sda for rw
|
||||
writing /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img -> /dev/sda
|
||||
wrote 4151312384 bytes to /dev/sda
|
||||
syncing /dev/sda
|
||||
verifying the image was written successfully
|
||||
OK
|
||||
|
||||
Motivation:
|
||||
|
||||
I wanted to write something like balena's Etcher, but commandline only.
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/ahash"
|
||||
"git.wntrmute.dev/kyle/goutils/dbg"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
const defaultHashAlgorithm = "sha256"
|
||||
|
||||
var (
|
||||
hAlgo string
|
||||
debug = dbg.New()
|
||||
)
|
||||
|
||||
|
||||
func openImage(imageFile string) (image *os.File, hash []byte, err error) {
|
||||
image, err = os.Open(imageFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hash, err = ahash.SumReader(hAlgo, image)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = image.Seek(0, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
debug.Printf("%s %x\n", imageFile, hash)
|
||||
return
|
||||
}
|
||||
|
||||
func openDevice(devicePath string) (device *os.File, err error) {
|
||||
fi, err := os.Stat(devicePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
device, err = os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&hAlgo, "a", defaultHashAlgorithm, "default hash algorithm")
|
||||
flag.BoolVar(&debug.Enabled, "v", false, "enable debug logging")
|
||||
flag.Parse()
|
||||
|
||||
if hAlgo == "list" {
|
||||
fmt.Println("Supported hashing algorithms:")
|
||||
for _, algo := range ahash.SecureHashList() {
|
||||
fmt.Printf("\t- %s\n", algo)
|
||||
}
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if flag.NArg() != 2 {
|
||||
die.With("usage: diskimg image device")
|
||||
}
|
||||
|
||||
imageFile := flag.Arg(0)
|
||||
devicePath := flag.Arg(1)
|
||||
|
||||
debug.Printf("opening image %s for read\n", imageFile)
|
||||
image, hash, err := openImage(imageFile)
|
||||
if image != nil {
|
||||
defer image.Close()
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
debug.Printf("opening device %s for rw\n", devicePath)
|
||||
device, err := openDevice(devicePath)
|
||||
if device != nil {
|
||||
defer device.Close()
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
debug.Printf("writing %s -> %s\n", imageFile, devicePath)
|
||||
n, err := io.Copy(device, image)
|
||||
die.If(err)
|
||||
debug.Printf("wrote %d bytes to %s\n", n, devicePath)
|
||||
|
||||
debug.Printf("syncing %s\n", devicePath)
|
||||
err = device.Sync()
|
||||
die.If(err)
|
||||
|
||||
debug.Println("verifying the image was written successfully")
|
||||
_, err = device.Seek(0, 0)
|
||||
die.If(err)
|
||||
|
||||
deviceHash, err := ahash.SumLimitedReader(hAlgo, device, n)
|
||||
die.If(err)
|
||||
|
||||
if !bytes.Equal(deviceHash, hash) {
|
||||
fmt.Fprintln(os.Stderr, "Hash mismatch:")
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", imageFile, hash)
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", devicePath, deviceHash)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
debug.Println("OK")
|
||||
os.Exit(0)
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func usage(w io.Writer, exc int) {
|
||||
fmt.Fprintln(w, `usage: dumpbytes <file>`)
|
||||
os.Exit(exc)
|
||||
}
|
||||
|
||||
func printBytes(buf []byte) {
|
||||
fmt.Printf("\t")
|
||||
for i := 0; i < len(buf); i++ {
|
||||
fmt.Printf("0x%02x, ", buf[i])
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func dumpFile(path string, indentLevel int) error {
|
||||
indent := ""
|
||||
for i := 0; i < indentLevel; i++ {
|
||||
indent += "\t"
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
fmt.Printf("%svar buffer = []byte{\n", indent)
|
||||
for {
|
||||
buf := make([]byte, 8)
|
||||
n, err := file.Read(buf)
|
||||
if err == io.EOF {
|
||||
if n > 0 {
|
||||
fmt.Printf("%s", indent)
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s", indent)
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
|
||||
fmt.Printf("%s}\n", indent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
indent := 0
|
||||
flag.Usage = func() { usage(os.Stderr, 0) }
|
||||
flag.IntVar(&indent, "n", 0, "indent level")
|
||||
flag.Parse()
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
err := dumpFile(file, indent)
|
||||
die.If(err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
// size of a kilobit in bytes
|
||||
const kilobit = 128
|
||||
const pageSize = 4096
|
||||
|
||||
func main() {
|
||||
size := flag.Int("s", 256*kilobit, "size of EEPROM image in kilobits")
|
||||
fill := flag.Uint("f", 0, "byte to fill image with")
|
||||
flag.Parse()
|
||||
|
||||
if *fill > 256 {
|
||||
die.With("`fill` argument must be a byte value")
|
||||
}
|
||||
|
||||
path := "eeprom.img"
|
||||
|
||||
if flag.NArg() > 0 {
|
||||
path = flag.Arg(0)
|
||||
}
|
||||
|
||||
fillByte := uint8(*fill)
|
||||
|
||||
buf := make([]byte, pageSize)
|
||||
for i := 0; i < pageSize; i++ {
|
||||
buf[i] = fillByte
|
||||
}
|
||||
|
||||
pages := *size / pageSize
|
||||
last := *size % pageSize
|
||||
|
||||
file, err := os.Create(path)
|
||||
die.If(err)
|
||||
defer file.Close()
|
||||
|
||||
for i := 0; i < pages; i++ {
|
||||
_, err = file.Write(buf)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
if last != 0 {
|
||||
_, err = file.Write(buf[:last])
|
||||
die.If(err)
|
||||
}
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
host
|
||||
|
||||
This is a utility to display CNAME records and IPs for a hostname. It
|
||||
was born of my frustration in trying to figure out how to get the host(1)
|
||||
tool installed on Fedora.
|
|
@ -0,0 +1,41 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
)
|
||||
|
||||
func lookupHost(host string) error {
|
||||
cname, err := net.LookupCNAME(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cname != host {
|
||||
fmt.Printf("%s is a CNAME for %s\n", host, cname)
|
||||
host = cname
|
||||
}
|
||||
|
||||
addrs, err := net.LookupHost(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
fmt.Printf("\t%s\n", addr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
if err := lookupHost(arg); err != nil {
|
||||
log.Printf("%s: %s", arg, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -8,7 +8,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func prettify(file string, validateOnly bool) error {
|
||||
|
|
|
@ -113,7 +113,7 @@ func pathForUncompressing(source, dest string) (string, error) {
|
|||
|
||||
source = filepath.Base(source)
|
||||
if !strings.HasSuffix(source, gzipExt) {
|
||||
return "", errors.Errorf("%s is a not gzip-compressed file")
|
||||
return "", errors.Errorf("%s is a not gzip-compressed file", source)
|
||||
}
|
||||
outFile := source[:len(source)-len(gzipExt)]
|
||||
outFile = filepath.Join(dest, outFile)
|
||||
|
@ -127,7 +127,7 @@ func pathForCompressing(source, dest string) (string, error) {
|
|||
|
||||
source = filepath.Base(source)
|
||||
if strings.HasSuffix(source, gzipExt) {
|
||||
return "", errors.Errorf("%s is a gzip-compressed file")
|
||||
return "", errors.Errorf("%s is a gzip-compressed file", source)
|
||||
}
|
||||
|
||||
dest = filepath.Join(dest, source+gzipExt)
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
minmax
|
||||
|
||||
A quick tool to calculate minmax codes if needed for uLisp.
|
|
@ -0,0 +1,53 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var kinds = map[string]int{
|
||||
"sym": 0,
|
||||
"tf": 1,
|
||||
"fn": 2,
|
||||
"sp": 3,
|
||||
}
|
||||
|
||||
func dieIf(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "[!] %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: minmax type min max\n")
|
||||
fmt.Fprintf(os.Stderr, " type is one of fn, sp, sym, tf\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
kind, ok := kinds[flag.Arg(0)]
|
||||
if !ok {
|
||||
usage()
|
||||
}
|
||||
|
||||
min, err := strconv.Atoi(flag.Arg(1))
|
||||
dieIf(err)
|
||||
|
||||
max, err := strconv.Atoi(flag.Arg(2))
|
||||
dieIf(err)
|
||||
|
||||
code := kind << 6
|
||||
code += (min << 3)
|
||||
code += max
|
||||
fmt.Printf("%0o\n", code)
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
parts: simple parts database for electronic components
|
||||
|
||||
Usage: parts [id] -- query the database for a part
|
||||
parts [-c class] [id] [description] -- store a part in the database
|
||||
|
||||
Options:
|
||||
-f path Path to parts database (default is
|
||||
/home/kyle/.parts.json).
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
const dbVersion = "1"
|
||||
|
||||
var dbFile = filepath.Join(os.Getenv("HOME"), ".parts.json")
|
||||
var partsDB = &database{Version: dbVersion}
|
||||
|
||||
type part struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Class string `json:"class,omitempty"`
|
||||
}
|
||||
|
||||
func (p part) String() string {
|
||||
return fmt.Sprintf("%s: %s", p.Name, p.Description)
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Version string `json:"version"`
|
||||
LastUpdate int64 `json:"json"`
|
||||
Parts map[string]part `json:"parts"`
|
||||
}
|
||||
|
||||
func help(w io.Writer) {
|
||||
fmt.Fprintf(w, `Usage: parts [id] -- query the database for a part
|
||||
parts [-c class] [id] [description] -- store a part in the database
|
||||
|
||||
Options:
|
||||
-f path Path to parts database (default is
|
||||
%s).
|
||||
|
||||
`, dbFile)
|
||||
}
|
||||
|
||||
func loadDatabase() {
|
||||
data, err := ioutil.ReadFile(dbFile)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
partsDB = &database{
|
||||
Version: dbVersion,
|
||||
Parts: map[string]part{},
|
||||
}
|
||||
return
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
err = json.Unmarshal(data, partsDB)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
func findPart(partName string) {
|
||||
partName = strings.ToLower(partName)
|
||||
for name, part := range partsDB.Parts {
|
||||
if strings.Contains(strings.ToLower(name), partName) {
|
||||
fmt.Println(part.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeDB() {
|
||||
data, err := json.Marshal(partsDB)
|
||||
die.If(err)
|
||||
|
||||
err = ioutil.WriteFile(dbFile, data, 0644)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
func storePart(name, class, description string) {
|
||||
p, exists := partsDB.Parts[name]
|
||||
if exists {
|
||||
fmt.Printf("warning: replacing part %s\n", name)
|
||||
fmt.Printf("\t%s\n", p.String())
|
||||
}
|
||||
|
||||
partsDB.Parts[name] = part{
|
||||
Name: name,
|
||||
Class: class,
|
||||
Description: description,
|
||||
}
|
||||
|
||||
writeDB()
|
||||
}
|
||||
|
||||
func listParts() {
|
||||
parts := make([]string, 0, len(partsDB.Parts))
|
||||
for partName := range partsDB.Parts {
|
||||
parts = append(parts, partName)
|
||||
}
|
||||
|
||||
sort.Strings(parts)
|
||||
for _, partName := range parts {
|
||||
fmt.Println(partsDB.Parts[partName].String())
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var class string
|
||||
var helpFlag bool
|
||||
|
||||
flag.StringVar(&class, "c", "", "device class")
|
||||
flag.StringVar(&dbFile, "f", dbFile, "`path` to database")
|
||||
flag.BoolVar(&helpFlag, "h", false, "Print a help message.")
|
||||
flag.Parse()
|
||||
|
||||
if helpFlag {
|
||||
help(os.Stdout)
|
||||
return
|
||||
}
|
||||
|
||||
loadDatabase()
|
||||
|
||||
switch flag.NArg() {
|
||||
case 0:
|
||||
help(os.Stdout)
|
||||
return
|
||||
case 1:
|
||||
partName := flag.Arg(0)
|
||||
if partName == "list" {
|
||||
listParts()
|
||||
} else {
|
||||
findPart(flag.Arg(0))
|
||||
}
|
||||
return
|
||||
default:
|
||||
description := strings.Join(flag.Args()[1:], " ")
|
||||
storePart(flag.Arg(0), class, description)
|
||||
return
|
||||
}
|
||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/assert"
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kisom/goutils/fileutil"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func hashName(path, encodedHash string) string {
|
||||
|
|
|
@ -9,9 +9,9 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kisom/goutils/ahash"
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/ahash"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
var dieRollFormat = regexp.MustCompile(`^(\d+)[dD](\d+)$`)
|
||||
|
||||
func rollDie(count, sides int) []int {
|
||||
sum := 0
|
||||
var rolls []int
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
roll := rand.Intn(sides) + 1
|
||||
sum += roll
|
||||
rolls = append(rolls, roll)
|
||||
}
|
||||
|
||||
rolls = append(rolls, sum)
|
||||
return rolls
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
if !dieRollFormat.MatchString(arg) {
|
||||
fmt.Fprintf(os.Stderr, "invalid die format %s: should be XdY\n", arg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
dieRoll := dieRollFormat.FindAllStringSubmatch(arg, -1)
|
||||
count, err := strconv.Atoi(dieRoll[0][1])
|
||||
die.If(err)
|
||||
|
||||
sides, err := strconv.Atoi(dieRoll[0][2])
|
||||
die.If(err)
|
||||
|
||||
fmt.Println(rollDie(count, sides))
|
||||
}
|
||||
}
|
|
@ -12,36 +12,23 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/logging"
|
||||
"git.wntrmute.dev/kyle/goutils/dbg"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
var (
|
||||
gopath string
|
||||
project string
|
||||
debug bool
|
||||
)
|
||||
|
||||
var (
|
||||
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
|
||||
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
|
||||
log = logging.NewConsole()
|
||||
imports = map[string]bool{}
|
||||
debug = dbg.New()
|
||||
fset = &token.FileSet{}
|
||||
imports = map[string]bool{}
|
||||
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
|
||||
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
|
||||
)
|
||||
|
||||
func debugf(format string, args ...interface{}) {
|
||||
if debug {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func debugln(args ...interface{}) {
|
||||
if debug {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
gopath = os.Getenv("GOPATH")
|
||||
if gopath == "" {
|
||||
|
@ -75,7 +62,7 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
debugln(path)
|
||||
debug.Println(path)
|
||||
|
||||
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
|
@ -85,16 +72,16 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
|||
for _, importSpec := range f.Imports {
|
||||
importPath := strings.Trim(importSpec.Path.Value, `"`)
|
||||
if stdLibRegexp.MatchString(importPath) {
|
||||
debugln("standard lib:", importPath)
|
||||
debug.Println("standard lib:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, project) {
|
||||
debugln("internal import:", importPath)
|
||||
debug.Println("internal import:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, "golang.org/") {
|
||||
debugln("extended lib:", importPath)
|
||||
debug.Println("extended lib:", importPath)
|
||||
continue
|
||||
}
|
||||
debugln("import:", importPath)
|
||||
debug.Println("import:", importPath)
|
||||
imports[importPath] = true
|
||||
}
|
||||
|
||||
|
@ -108,7 +95,7 @@ func main() {
|
|||
var noVendor bool
|
||||
flag.StringVar(&ignoreLine, "i", "", "comma-separated list of directories to ignore")
|
||||
flag.BoolVar(&noVendor, "nv", false, "ignore the vendor directory")
|
||||
flag.BoolVar(&debug, "v", false, "log debugging information")
|
||||
flag.BoolVar(&debug.Enabled, "v", false, "log debugging information")
|
||||
flag.Parse()
|
||||
|
||||
if noVendor {
|
||||
|
|
|
@ -17,8 +17,8 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func proxy(conn net.Conn, inside string) error {
|
||||
proxyConn, err := net.Dial("tcp", inside)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer proxyConn.Close()
|
||||
defer conn.Close()
|
||||
|
||||
go func() {
|
||||
io.Copy(conn, proxyConn)
|
||||
}()
|
||||
_, err = io.Copy(proxyConn, conn)
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
var outside, inside string
|
||||
flag.StringVar(&outside, "f", "8080", "outside port")
|
||||
flag.StringVar(&inside, "p", "4000", "inside port")
|
||||
flag.Parse()
|
||||
|
||||
l, err := net.Listen("tcp", "0.0.0.0:"+outside)
|
||||
die.If(err)
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
|
||||
go proxy(conn, "127.0.0.1:"+inside)
|
||||
}
|
||||
}
|
|
@ -12,7 +12,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -9,8 +9,9 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"github.com/kisom/goutils/lib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -57,7 +58,7 @@ func getSubjectInfoHash(cert *x509.Certificate, issuer bool) []byte {
|
|||
|
||||
func printDigests(paths []string, issuer bool) {
|
||||
for _, path := range paths {
|
||||
cert, err := lib.LoadCertificate(path)
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to load certificate from %s", path)
|
||||
continue
|
||||
|
@ -82,9 +83,9 @@ func matchDigests(paths []string, issuer bool) {
|
|||
snd := paths[1]
|
||||
paths = paths[2:]
|
||||
|
||||
fstCert, err := lib.LoadCertificate(fst)
|
||||
fstCert, err := certlib.LoadCertificate(fst)
|
||||
die.If(err)
|
||||
sndCert, err := lib.LoadCertificate(snd)
|
||||
sndCert, err := certlib.LoadCertificate(snd)
|
||||
die.If(err)
|
||||
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
|
||||
lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/kisom/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
var validPEMs = map[string]bool{
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
// zsearch is a utility for searching zlib-compressed files for a
|
||||
// search string. It was really designed for use with the Git object
|
||||
// store, i.e. to aid in the recovery of files after Git does what Git
|
||||
// do.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const defaultDirectory = ".git/objects"
|
||||
|
||||
func errorf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
if format[len(format)-1] != '\n' {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
func isDir(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
func loadFile(path string) ([]byte, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
zread, err := zlib.NewReader(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
_, err = io.Copy(buf, zread)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func showFile(path string) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", fileData)
|
||||
}
|
||||
|
||||
func searchFile(path string, search *regexp.Regexp) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
zread, err := zlib.NewReader(file)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
zbuf := bufio.NewReader(zread)
|
||||
if search.MatchReader(zbuf) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s:\n%s\n", path, fileData)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() {
|
||||
return searchFile(path, searchExpr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
|
||||
flag.Parse()
|
||||
|
||||
if *flSearch == "" {
|
||||
for _, path := range flag.Args() {
|
||||
showFile(path)
|
||||
}
|
||||
} else {
|
||||
search, err := regexp.Compile(*flSearch)
|
||||
if err != nil {
|
||||
errorf("Bad regexp: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pathList := flag.Args()
|
||||
if len(pathList) == 0 {
|
||||
pathList = []string{defaultDirectory}
|
||||
}
|
||||
|
||||
for _, path := range pathList {
|
||||
if isDir(path) {
|
||||
err := filepath.Walk(path, buildWalker(search))
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
searchFile(path, search)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
// Package config implements a simple global configuration system that
|
||||
// supports a file with key=value pairs and environment variables. Note
|
||||
// that the config system is global.
|
||||
//
|
||||
// This package is intended to be used for small daemons: some configuration
|
||||
// file is optionally populated at program start, then this is used to
|
||||
// transparently look up configuration values from either that file or the
|
||||
// environment.
|
||||
package config
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/config/iniconf"
|
||||
)
|
||||
|
||||
// NB: Rather than define a singleton type, everything is defined at
|
||||
// the top-level
|
||||
|
||||
var (
|
||||
vars = map[string]string{}
|
||||
prefix = ""
|
||||
)
|
||||
|
||||
// SetEnvPrefix sets the prefix for all environment variables; it's
|
||||
// assumed to not be needed for files.
|
||||
func SetEnvPrefix(pfx string) {
|
||||
prefix = pfx
|
||||
}
|
||||
|
||||
func addLine(line string) {
|
||||
if strings.HasPrefix(line, "#") || line == "" {
|
||||
return
|
||||
}
|
||||
|
||||
lineParts := strings.SplitN(line, "=", 2)
|
||||
if len(lineParts) != 2 {
|
||||
log.Print("skipping line: ", line)
|
||||
return // silently ignore empty keys
|
||||
}
|
||||
|
||||
lineParts[0] = strings.TrimSpace(lineParts[0])
|
||||
lineParts[1] = strings.TrimSpace(lineParts[1])
|
||||
vars[lineParts[0]] = lineParts[1]
|
||||
}
|
||||
|
||||
// LoadFile scans the file at path for key=value pairs and adds them
|
||||
// to the configuration.
|
||||
func LoadFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
addLine(line)
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFileFor scans the ini file at path, loading the default section
|
||||
// and overriding any keys found under section. If strict is true, the
|
||||
// named section must exist (i.e. to catch typos in the section name).
|
||||
func LoadFileFor(path, section string, strict bool) error {
|
||||
cmap, err := iniconf.ParseFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range cmap[iniconf.DefaultSection] {
|
||||
vars[key] = value
|
||||
}
|
||||
|
||||
smap, ok := cmap[section]
|
||||
if !ok {
|
||||
if strict {
|
||||
return fmt.Errorf("config: section '%s' wasn't found in the config file", section)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for key, value := range smap {
|
||||
vars[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a value from either a configuration file or the
|
||||
// environment. Note that values from a file will override environment
|
||||
// variables.
|
||||
func Get(key string) string {
|
||||
if v, ok := vars[key]; ok {
|
||||
return v
|
||||
}
|
||||
return os.Getenv(prefix + key)
|
||||
}
|
||||
|
||||
// GetDefault retrieves a value from either a configuration file or
|
||||
// the environment. Note that value from a file will override
|
||||
// environment variables. If a value isn't found (e.g. Get returns an
|
||||
// empty string), the default value will be used.
|
||||
func GetDefault(key, def string) string {
|
||||
if v := Get(key); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Require retrieves a value from either a configuration file or the
|
||||
// environment. If the key isn't present, it will call log.Fatal, printing
|
||||
// the missing key.
|
||||
func Require(key string) string {
|
||||
if v, ok := vars[key]; ok {
|
||||
return v
|
||||
}
|
||||
|
||||
v, ok := os.LookupEnv(prefix + key)
|
||||
if !ok {
|
||||
var envMessage string
|
||||
if prefix != "" {
|
||||
envMessage = " (note: looked for the key " + prefix + key
|
||||
envMessage += " in the local env)"
|
||||
}
|
||||
log.Fatalf("missing required configuration value %s%s", key, envMessage)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// ListKeys returns a slice of the currently known keys.
|
||||
func ListKeys() []string {
|
||||
keyList := []string{}
|
||||
for k := range vars {
|
||||
keyList = append(keyList, k)
|
||||
}
|
||||
|
||||
sort.Strings(keyList)
|
||||
return keyList
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testFilePath = "testdata/test.env"
|
||||
|
||||
// Keys
|
||||
kOrder = "ORDER"
|
||||
kSpecies = "SPECIES"
|
||||
kName = "COMMON_NAME"
|
||||
|
||||
// Env
|
||||
eOrder = "corvus"
|
||||
eSpecies = "corvus corax"
|
||||
eName = "northern raven"
|
||||
|
||||
// File
|
||||
fOrder = "stringiformes"
|
||||
fSpecies = "strix aluco"
|
||||
// Name isn't set in the file to test fall through.
|
||||
)
|
||||
|
||||
func init() {
|
||||
os.Setenv(kOrder, eOrder)
|
||||
os.Setenv(kSpecies, eSpecies)
|
||||
os.Setenv(kName, eName)
|
||||
}
|
||||
|
||||
func TestLoadEnvOnly(t *testing.T) {
|
||||
order := Get(kOrder)
|
||||
species := Get(kSpecies)
|
||||
if order != eOrder {
|
||||
t.Errorf("want %s, have %s", eOrder, order)
|
||||
}
|
||||
|
||||
if species != eSpecies {
|
||||
t.Errorf("want %s, have %s", eSpecies, species)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
err := LoadFile(testFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
order := Get(kOrder)
|
||||
species := Get(kSpecies)
|
||||
name := Get(kName)
|
||||
|
||||
if order != fOrder {
|
||||
t.Errorf("want %s, have %s", fOrder, order)
|
||||
}
|
||||
|
||||
if species != fSpecies {
|
||||
t.Errorf("want %s, have %s", fSpecies, species)
|
||||
}
|
||||
|
||||
if name != eName {
|
||||
t.Errorf("want %s, have %s", eName, name)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,223 @@
|
|||
package iniconf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// ConfigMap is shorthand for the type used as a config struct.
|
||||
type ConfigMap map[string]map[string]string
|
||||
|
||||
var (
|
||||
configSection = regexp.MustCompile(`^\s*\[\s*(\w+)\s*\]\s*$`)
|
||||
quotedConfigLine = regexp.MustCompile(`^\s*(\w+)\s*=\s*["'](.*)["']\s*$`)
|
||||
configLine = regexp.MustCompile(`^\s*(\w+)\s*=\s*(.*)\s*$`)
|
||||
commentLine = regexp.MustCompile(`^#.*$`)
|
||||
blankLine = regexp.MustCompile(`^\s*$`)
|
||||
)
|
||||
|
||||
// DefaultSection is the label for the default ini file section.
|
||||
var DefaultSection = "default"
|
||||
|
||||
// ParseFile attempts to load the named config file.
|
||||
func ParseFile(fileName string) (cfg ConfigMap, err error) {
|
||||
var file *os.File
|
||||
file, err = os.Open(fileName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
return ParseReader(file)
|
||||
}
|
||||
|
||||
// ParseReader reads a configuration from an io.Reader.
|
||||
func ParseReader(r io.Reader) (cfg ConfigMap, err error) {
|
||||
cfg = ConfigMap{}
|
||||
buf := bufio.NewReader(r)
|
||||
|
||||
var (
|
||||
line string
|
||||
longLine bool
|
||||
currentSection string
|
||||
lineBytes []byte
|
||||
isPrefix bool
|
||||
)
|
||||
|
||||
for {
|
||||
err = nil
|
||||
lineBytes, isPrefix, err = buf.ReadLine()
|
||||
if io.EOF == err {
|
||||
err = nil
|
||||
break
|
||||
} else if err != nil {
|
||||
break
|
||||
} else if isPrefix {
|
||||
line += string(lineBytes)
|
||||
|
||||
longLine = true
|
||||
continue
|
||||
} else if longLine {
|
||||
line += string(lineBytes)
|
||||
longLine = false
|
||||
} else {
|
||||
line = string(lineBytes)
|
||||
}
|
||||
|
||||
if commentLine.MatchString(line) {
|
||||
continue
|
||||
} else if blankLine.MatchString(line) {
|
||||
continue
|
||||
} else if configSection.MatchString(line) {
|
||||
section := configSection.ReplaceAllString(line,
|
||||
"$1")
|
||||
if section == "" {
|
||||
err = fmt.Errorf("invalid structure in file")
|
||||
break
|
||||
} else if !cfg.SectionInConfig(section) {
|
||||
cfg[section] = make(map[string]string, 0)
|
||||
}
|
||||
currentSection = section
|
||||
} else if configLine.MatchString(line) {
|
||||
regex := configLine
|
||||
if quotedConfigLine.MatchString(line) {
|
||||
regex = quotedConfigLine
|
||||
}
|
||||
if currentSection == "" {
|
||||
currentSection = DefaultSection
|
||||
if !cfg.SectionInConfig(currentSection) {
|
||||
cfg[currentSection] = map[string]string{}
|
||||
}
|
||||
}
|
||||
key := regex.ReplaceAllString(line, "$1")
|
||||
val := regex.ReplaceAllString(line, "$2")
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
cfg[currentSection][key] = val
|
||||
} else {
|
||||
err = fmt.Errorf("invalid config file")
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SectionInConfig determines whether a section is in the configuration.
|
||||
func (c ConfigMap) SectionInConfig(section string) bool {
|
||||
_, ok := c[section]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ListSections returns the list of sections in the config map.
|
||||
func (c ConfigMap) ListSections() (sections []string) {
|
||||
for section := range c {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteFile writes out the configuration to a file.
|
||||
func (c ConfigMap) WriteFile(filename string) (err error) {
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
for _, section := range c.ListSections() {
|
||||
sName := fmt.Sprintf("[ %s ]\n", section)
|
||||
_, err = file.Write([]byte(sName))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range c[section] {
|
||||
line := fmt.Sprintf("%s = %s\n", k, v)
|
||||
_, err = file.Write([]byte(line))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = file.Write([]byte{0x0a})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AddSection creates a new section in the config map.
|
||||
func (c ConfigMap) AddSection(section string) {
|
||||
if nil != c[section] {
|
||||
c[section] = map[string]string{}
|
||||
}
|
||||
}
|
||||
|
||||
// AddKeyVal adds a key value pair to a config map.
|
||||
func (c ConfigMap) AddKeyVal(section, key, val string) {
|
||||
if section == "" {
|
||||
section = DefaultSection
|
||||
}
|
||||
|
||||
if nil == c[section] {
|
||||
c.AddSection(section)
|
||||
}
|
||||
|
||||
c[section][key] = val
|
||||
}
|
||||
|
||||
// GetValue retrieves the value from a key map.
|
||||
func (c ConfigMap) GetValue(section, key string) (val string, present bool) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if section == "" {
|
||||
section = DefaultSection
|
||||
}
|
||||
|
||||
_, ok := c[section]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
val, present = c[section][key]
|
||||
return
|
||||
}
|
||||
|
||||
// GetValueDefault retrieves the value from a key map if present,
|
||||
// otherwise the default value.
|
||||
func (c ConfigMap) GetValueDefault(section, key, value string) (val string) {
|
||||
kval, ok := c.GetValue(section, key)
|
||||
if !ok {
|
||||
return value
|
||||
}
|
||||
return kval
|
||||
}
|
||||
|
||||
// SectionKeys returns the sections in the config map.
|
||||
func (c ConfigMap) SectionKeys(section string) (keys []string, present bool) {
|
||||
if c == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if section == "" {
|
||||
section = DefaultSection
|
||||
}
|
||||
|
||||
cm := c
|
||||
s, ok := cm[section]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
keys = make([]string, 0, len(s))
|
||||
for key := range s {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, true
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
package iniconf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// FailWithError is a utility for dumping errors and failing the test.
|
||||
func FailWithError(t *testing.T, err error) {
|
||||
fmt.Println("failed")
|
||||
if err != nil {
|
||||
fmt.Println("[!] ", err.Error())
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// UnlinkIfExists removes a file if it exists.
|
||||
func UnlinkIfExists(file string) {
|
||||
_, err := os.Stat(file)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
panic("failed to remove " + file)
|
||||
}
|
||||
os.Remove(file)
|
||||
}
|
||||
|
||||
// stringSlicesEqual compares two string lists, checking that they
|
||||
// contain the same elements.
|
||||
func stringSlicesEqual(slice1, slice2 []string) bool {
|
||||
if len(slice1) != len(slice2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range slice1 {
|
||||
if slice1[i] != slice2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for i := range slice2 {
|
||||
if slice1[i] != slice2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestGoodConfig(t *testing.T) {
|
||||
testFile := "testdata/test.conf"
|
||||
fmt.Printf("[+] validating known-good config... ")
|
||||
cmap, err := ParseFile(testFile)
|
||||
if err != nil {
|
||||
FailWithError(t, err)
|
||||
} else if len(cmap) != 2 {
|
||||
FailWithError(t, err)
|
||||
}
|
||||
fmt.Println("ok")
|
||||
}
|
||||
|
||||
func TestGoodConfig2(t *testing.T) {
|
||||
testFile := "testdata/test2.conf"
|
||||
fmt.Printf("[+] validating second known-good config... ")
|
||||
cmap, err := ParseFile(testFile)
|
||||
if err != nil {
|
||||
FailWithError(t, err)
|
||||
} else if len(cmap) != 1 {
|
||||
FailWithError(t, err)
|
||||
} else if len(cmap["default"]) != 3 {
|
||||
FailWithError(t, err)
|
||||
}
|
||||
fmt.Println("ok")
|
||||
}
|
||||
|
||||
func TestBadConfig(t *testing.T) {
|
||||
testFile := "testdata/bad.conf"
|
||||
fmt.Printf("[+] ensure invalid config file fails... ")
|
||||
_, err := ParseFile(testFile)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("invalid config file should fail")
|
||||
FailWithError(t, err)
|
||||
}
|
||||
fmt.Println("ok")
|
||||
}
|
||||
|
||||
func TestWriteConfigFile(t *testing.T) {
|
||||
fmt.Printf("[+] ensure config file is written properly... ")
|
||||
const testFile = "testdata/test.conf"
|
||||
const testOut = "testdata/test.out"
|
||||
|
||||
cmap, err := ParseFile(testFile)
|
||||
if err != nil {
|
||||
FailWithError(t, err)
|
||||
}
|
||||
|
||||
defer UnlinkIfExists(testOut)
|
||||
err = cmap.WriteFile(testOut)
|
||||
if err != nil {
|
||||
FailWithError(t, err)
|
||||
}
|
||||
|
||||
cmap2, err := ParseFile(testOut)
|
||||
if err != nil {
|
||||
FailWithError(t, err)
|
||||
}
|
||||
|
||||
sectionList1 := cmap.ListSections()
|
||||
sectionList2 := cmap2.ListSections()
|
||||
sort.Strings(sectionList1)
|
||||
sort.Strings(sectionList2)
|
||||
if !stringSlicesEqual(sectionList1, sectionList2) {
|
||||
err = fmt.Errorf("section lists don't match")
|
||||
FailWithError(t, err)
|
||||
}
|
||||
|
||||
for _, section := range sectionList1 {
|
||||
for _, k := range cmap[section] {
|
||||
if cmap[section][k] != cmap2[section][k] {
|
||||
err = fmt.Errorf("config key doesn't match")
|
||||
FailWithError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println("ok")
|
||||
}
|
||||
|
||||
func TestQuotedValue(t *testing.T) {
|
||||
testFile := "testdata/test.conf"
|
||||
fmt.Printf("[+] validating quoted value... ")
|
||||
cmap, _ := ParseFile(testFile)
|
||||
val := cmap["sectionName"]["key4"]
|
||||
if val != " space at beginning and end " {
|
||||
FailWithError(t, errors.New("Wrong value in double quotes ["+val+"]"))
|
||||
}
|
||||
|
||||
val = cmap["sectionName"]["key5"]
|
||||
if val != " is quoted with single quotes " {
|
||||
FailWithError(t, errors.New("Wrong value in single quotes ["+val+"]"))
|
||||
}
|
||||
fmt.Println("ok")
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
[]
|
||||
|
||||
key
|
||||
another key
|
||||
key = val
|
|
@ -0,0 +1,13 @@
|
|||
[ sectionName ]
|
||||
key1=some value
|
||||
key2 = some other value
|
||||
# we want to explain the importance and great forethought
|
||||
# in this next value.
|
||||
key3 = unintuitive value
|
||||
key4 = " space at beginning and end "
|
||||
key5 = ' is quoted with single quotes '
|
||||
|
||||
[ anotherSection ]
|
||||
key1 = a value
|
||||
key2 = yet another value
|
||||
key1 = overwrites previous value of a value
|
|
@ -0,0 +1,3 @@
|
|||
key1 = some value
|
||||
key2 = some other value
|
||||
key3 = unintuitive value
|
|
@ -0,0 +1,19 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// DefaultConfigPath returns a sensible default configuration file path.
|
||||
func DefaultConfigPath(dir, base string) string {
|
||||
user, err := user.Current()
|
||||
if err != nil || user.HomeDir == "" {
|
||||
return filepath.Join(dir, base)
|
||||
}
|
||||
|
||||
return filepath.Join(user.HomeDir, dir, base)
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// canUseXDGConfigDir checks whether the XDG config directory exists
|
||||
// and is accessible by the current user. If it is present, it will
|
||||
// be returned. Note that if the directory does not exist, it is
|
||||
// presumed unusable.
|
||||
func canUseXDGConfigDir() (string, bool) {
|
||||
xdgDir := os.Getenv("XDG_CONFIG_DIR")
|
||||
if xdgDir == "" {
|
||||
userDir := os.Getenv("HOME")
|
||||
if userDir == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
xdgDir = filepath.Join(userDir, ".config")
|
||||
}
|
||||
|
||||
fi, err := os.Stat(xdgDir)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return xdgDir, true
|
||||
}
|
||||
|
||||
// DefaultConfigPath returns a sensible default configuration file path.
|
||||
func DefaultConfigPath(dir, base string) string {
|
||||
dirPath, ok := canUseXDGConfigDir()
|
||||
if !ok {
|
||||
dirPath = "/etc"
|
||||
}
|
||||
|
||||
return filepath.Join(dirPath, dir, base)
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDefaultPath(t *testing.T) {
|
||||
t.Log(DefaultConfigPath("demoapp", "app.conf"))
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
ORDER=stringiformes
|
||||
SPECIES=strix aluco
|
|
@ -0,0 +1,76 @@
|
|||
// Package dbg implements a debug printer.
|
||||
package dbg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A DebugPrinter is a drop-in replacement for fmt.Print*, and also acts as
|
||||
// an io.WriteCloser when enabled.
|
||||
type DebugPrinter struct {
|
||||
// If Enabled is false, the print statements won't do anything.
|
||||
Enabled bool
|
||||
out io.WriteCloser
|
||||
}
|
||||
|
||||
// Close satisfies the Closer interface.
|
||||
func (dbg *DebugPrinter) Close() error {
|
||||
return dbg.out.Close()
|
||||
}
|
||||
|
||||
// Write satisfies the Writer interface.
|
||||
func (dbg *DebugPrinter) Write(p []byte) (int, error) {
|
||||
if dbg.Enabled {
|
||||
return dbg.out.Write(p)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// New returns a new DebugPrinter on os.Stdout.
|
||||
func New() *DebugPrinter {
|
||||
return &DebugPrinter{
|
||||
out: os.Stdout,
|
||||
}
|
||||
}
|
||||
|
||||
// ToFile sets up a new DebugPrinter to a file, truncating it if it exists.
|
||||
func ToFile(path string) (*DebugPrinter, error) {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DebugPrinter{
|
||||
out: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// To sets up a new DebugPrint to an io.WriteCloser.
|
||||
func To(w io.WriteCloser) *DebugPrinter {
|
||||
return &DebugPrinter{
|
||||
out: w,
|
||||
}
|
||||
}
|
||||
|
||||
// Print calls fmt.Print if Enabled is true.
|
||||
func (dbg *DebugPrinter) Print(v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprint(dbg.out, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Println calls fmt.Println if Enabled is true.
|
||||
func (dbg *DebugPrinter) Println(v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprintln(dbg.out, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Printf calls fmt.Printf if Enabled is true.
|
||||
func (dbg *DebugPrinter) Printf(format string, v ...interface{}) {
|
||||
if dbg.Enabled {
|
||||
fmt.Fprintf(dbg.out, format, v...)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package dbg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/testio"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
buf := testio.NewBufCloser(nil)
|
||||
dbg := New()
|
||||
dbg.out = buf
|
||||
|
||||
dbg.Print("hello")
|
||||
dbg.Println("hello")
|
||||
dbg.Printf("hello %s", "world")
|
||||
assert.BoolT(t, buf.Len() == 0)
|
||||
|
||||
dbg.Enabled = true
|
||||
dbg.Print("hello") // +5
|
||||
dbg.Println("hello") // +6
|
||||
dbg.Printf("hello %s", "world") // +11
|
||||
assert.BoolT(t, buf.Len() == 22, fmt.Sprintf("buffer should be length 22 but is length %d", buf.Len()))
|
||||
|
||||
err := dbg.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
}
|
||||
|
||||
func TestTo(t *testing.T) {
|
||||
buf := testio.NewBufCloser(nil)
|
||||
dbg := To(buf)
|
||||
|
||||
dbg.Print("hello")
|
||||
dbg.Println("hello")
|
||||
dbg.Printf("hello %s", "world")
|
||||
assert.BoolT(t, buf.Len() == 0, "debug output should be suppressed")
|
||||
|
||||
dbg.Enabled = true
|
||||
dbg.Print("hello") // +5
|
||||
dbg.Println("hello") // +6
|
||||
dbg.Printf("hello %s", "world") // +11
|
||||
assert.BoolT(t, buf.Len() == 22, "didn't get the expected debug output")
|
||||
|
||||
err := dbg.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
}
|
||||
|
||||
func TestToFile(t *testing.T) {
|
||||
testFile, err := ioutil.TempFile("", "dbg")
|
||||
assert.NoErrorT(t, err)
|
||||
err = testFile.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
testFileName := testFile.Name()
|
||||
defer os.Remove(testFileName)
|
||||
|
||||
dbg, err := ToFile(testFileName)
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
dbg.Print("hello")
|
||||
dbg.Println("hello")
|
||||
dbg.Printf("hello %s", "world")
|
||||
|
||||
stat, err := os.Stat(testFileName)
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
assert.BoolT(t, stat.Size() == 0, "no debug output should have been sent to the log file")
|
||||
|
||||
dbg.Enabled = true
|
||||
dbg.Print("hello") // +5
|
||||
dbg.Println("hello") // +6
|
||||
dbg.Printf("hello %s", "world") // +11
|
||||
|
||||
stat, err = os.Stat(testFileName)
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
assert.BoolT(t, stat.Size() == 22, fmt.Sprintf("have %d bytes in the log file, expected 22", stat.Size()))
|
||||
|
||||
err = dbg.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
}
|
||||
|
||||
func TestWriting(t *testing.T) {
|
||||
data := []byte("hello, world")
|
||||
buf := testio.NewBufCloser(nil)
|
||||
dbg := To(buf)
|
||||
|
||||
n, err := dbg.Write(data)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, n == 0, "expected nothing to be written to the buffer")
|
||||
|
||||
dbg.Enabled = true
|
||||
n, err = dbg.Write(data)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, n == 12, fmt.Sprintf("wrote %d bytes in the buffer, expected to write 12", n))
|
||||
|
||||
err = dbg.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
}
|
||||
|
||||
func TestToFileError(t *testing.T) {
|
||||
testFile, err := ioutil.TempFile("", "dbg")
|
||||
assert.NoErrorT(t, err)
|
||||
err = testFile.Chmod(0400)
|
||||
assert.NoErrorT(t, err)
|
||||
err = testFile.Close()
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
testFileName := testFile.Name()
|
||||
|
||||
_, err = ToFile(testFileName)
|
||||
assert.ErrorT(t, err)
|
||||
|
||||
err = os.Remove(testFileName)
|
||||
assert.NoErrorT(t, err)
|
||||
}
|
|
@ -1,3 +1,6 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package fileutil contains common file functions.
|
||||
package fileutil
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Package fileutil contains common file functions.
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileDoesExist returns true if the file exists.
|
||||
func FileDoesExist(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// DirectoryDoesExist returns true if the file exists.
|
||||
func DirectoryDoesExist(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.Mode().IsDir()
|
||||
}
|
||||
|
||||
const (
|
||||
// AccessExists checks whether the file exists. This is invalid outside of
|
||||
// Unix systems.
|
||||
AccessExists = 0
|
||||
|
||||
// AccessRead checks whether the user has read permissions on
|
||||
// the file. This is invalid outside of Unix systems.
|
||||
AccessRead = 0
|
||||
|
||||
// AccessWrite checks whether the user has write permissions
|
||||
// on the file. This is invalid outside of Unix systems.
|
||||
AccessWrite = 0
|
||||
|
||||
// AccessExec checks whether the user has executable
|
||||
// permissions on the file. This is invalid outside of Unix systems.
|
||||
AccessExec = 0
|
||||
)
|
||||
|
||||
// Access is a Unix-only call, and has no meaning on Windows.
|
||||
func Access(path string, mode int) error {
|
||||
return errors.New("fileutil: Access is meaningless on Windows")
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package fileutil
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidateSymlink checks to make sure a symlink exists in some top-level
|
||||
// directory.
|
||||
func ValidateSymlink(symlink, topLevel string) bool {
|
||||
target, err := filepath.EvalSymlinks(symlink)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(target, topLevel)
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
module git.wntrmute.dev/kyle/goutils
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/hashicorp/go-syslog v1.0.0
|
||||
github.com/kr/text v0.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.12.0
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/certificate-transparency-go v1.0.21
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
)
|
|
@ -0,0 +1,43 @@
|
|||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
|
||||
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.12.0 h1:/f3b24xrDhkhddlaobPe2JgBqfdt+gC/NYl0QY9IOuI=
|
||||
github.com/pkg/sftp v1.12.0/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -1,4 +1,4 @@
|
|||
// +build freebsd darwin netbsd
|
||||
// +build freebsd darwin,386 netbsd
|
||||
|
||||
package lib
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build unix linux openbsd
|
||||
// +build unix linux openbsd darwin,amd64
|
||||
|
||||
package lib
|
||||
|
||||
|
|
83
lib/lib.go
83
lib/lib.go
|
@ -2,11 +2,7 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
@ -89,7 +85,7 @@ func Duration(d time.Duration) string {
|
|||
if d >= yearDuration {
|
||||
years := d / yearDuration
|
||||
s += fmt.Sprintf("%dy", years)
|
||||
d -= (years * yearDuration)
|
||||
d -= years * yearDuration
|
||||
}
|
||||
|
||||
if d >= dayDuration {
|
||||
|
@ -103,82 +99,7 @@ func Duration(d time.Duration) string {
|
|||
|
||||
d %= 1 * time.Second
|
||||
hours := d / time.Hour
|
||||
d -= (hours * time.Hour)
|
||||
d -= hours * time.Hour
|
||||
s += fmt.Sprintf("%dh%s", hours, d)
|
||||
return s
|
||||
}
|
||||
|
||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||
// byte slice.
|
||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
||||
if len(in) == 0 {
|
||||
err = errors.New("lib: empty certificate")
|
||||
return
|
||||
}
|
||||
|
||||
if in[0] == '-' {
|
||||
p, remaining := pem.Decode(in)
|
||||
if p == nil {
|
||||
err = errors.New("lib: invalid PEM file")
|
||||
return
|
||||
}
|
||||
|
||||
rest = remaining
|
||||
if p.Type != "CERTIFICATE" {
|
||||
err = fmt.Errorf("lib: expected a CERTIFICATE PEM file, but have %s", p.Type)
|
||||
return
|
||||
}
|
||||
|
||||
in = p.Bytes
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(in)
|
||||
return
|
||||
}
|
||||
|
||||
// ReadCertificates tries to read all the certificates in a
|
||||
// PEM-encoded collection.
|
||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
var cert *x509.Certificate
|
||||
for {
|
||||
cert, in, err = ReadCertificate(in)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert)
|
||||
if len(in) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return certs, err
|
||||
}
|
||||
|
||||
// LoadCertificate tries to read a single certificate from disk. If
|
||||
// the file contains multiple certificates (e.g. a chain), only the
|
||||
// first certificate is returned.
|
||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cert, _, err := ReadCertificate(in)
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// LoadCertificates tries to read all the certificates in a file,
|
||||
// returning them in the order that it found them in the file.
|
||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ReadCertificates(in)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,288 @@
|
|||
// Package syslog is a syslog-type facility for logging.
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
gsyslog "github.com/hashicorp/go-syslog"
|
||||
)
|
||||
|
||||
type logger struct {
|
||||
l gsyslog.Syslogger
|
||||
p gsyslog.Priority
|
||||
writeConsole bool
|
||||
}
|
||||
|
||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
||||
if !strings.HasSuffix(format, "\n") {
|
||||
format += "\n"
|
||||
}
|
||||
|
||||
if p <= log.p && log.writeConsole {
|
||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
||||
if p <= log.p && log.writeConsole {
|
||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||
fmt.Print(args...)
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
||||
if p <= log.p && log.writeConsole {
|
||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) spew(args ...interface{}) {
|
||||
if log.p == gsyslog.LOG_DEBUG {
|
||||
spew.Dump(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) adjustPriority(level string) error {
|
||||
priority, ok := priorities[level]
|
||||
if !ok {
|
||||
return fmt.Errorf("log: unknown priority %s", level)
|
||||
}
|
||||
|
||||
log.p = priority
|
||||
return nil
|
||||
}
|
||||
|
||||
var log = &logger{p: gsyslog.LOG_WARNING}
|
||||
|
||||
var priorities = map[string]gsyslog.Priority{
|
||||
"EMERG": gsyslog.LOG_EMERG,
|
||||
"ALERT": gsyslog.LOG_ALERT,
|
||||
"CRIT": gsyslog.LOG_CRIT,
|
||||
"ERR": gsyslog.LOG_ERR,
|
||||
"WARNING": gsyslog.LOG_WARNING,
|
||||
"NOTICE": gsyslog.LOG_NOTICE,
|
||||
"INFO": gsyslog.LOG_INFO,
|
||||
"DEBUG": gsyslog.LOG_DEBUG,
|
||||
}
|
||||
|
||||
var prioritiev = map[gsyslog.Priority]string{
|
||||
gsyslog.LOG_EMERG: "EMERG",
|
||||
gsyslog.LOG_ALERT: "ALERT",
|
||||
gsyslog.LOG_CRIT: "CRIT",
|
||||
gsyslog.LOG_ERR: "ERR",
|
||||
gsyslog.LOG_WARNING: "WARNING",
|
||||
gsyslog.LOG_NOTICE: "NOTICE",
|
||||
gsyslog.LOG_INFO: "INFO",
|
||||
gsyslog.LOG_DEBUG: "DEBUG",
|
||||
}
|
||||
|
||||
func timestamp() string {
|
||||
return time.Now().Format("2006-01-02 15:04:05 MST")
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Level string
|
||||
Tag string
|
||||
Facility string
|
||||
WriteSyslog bool
|
||||
WriteConsole bool
|
||||
}
|
||||
|
||||
// DefaultOptions returns a sane set of defaults for syslog, using the program
|
||||
// name as the tag name. withSyslog controls whether logs should be sent to
|
||||
// syslog, too.
|
||||
func DefaultOptions(tag string, withSyslog bool) *Options {
|
||||
if tag == "" {
|
||||
tag = os.Args[0]
|
||||
}
|
||||
|
||||
return &Options{
|
||||
Level: "WARNING",
|
||||
Tag: tag,
|
||||
Facility: "daemon",
|
||||
WriteSyslog: withSyslog,
|
||||
WriteConsole: true,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultDebugOptions returns a sane set of debug defaults for syslog,
|
||||
// using the program name as the tag name. withSyslog controls whether logs
|
||||
// should be sent to syslog, too.
|
||||
func DefaultDebugOptions(tag string, withSyslog bool) *Options {
|
||||
if tag == "" {
|
||||
tag = os.Args[0]
|
||||
}
|
||||
|
||||
return &Options{
|
||||
Level: "DEBUG",
|
||||
Tag: tag,
|
||||
Facility: "daemon",
|
||||
WriteSyslog: withSyslog,
|
||||
WriteConsole: true,
|
||||
}
|
||||
}
|
||||
|
||||
func Setup(opts *Options) error {
|
||||
priority, ok := priorities[opts.Level]
|
||||
if !ok {
|
||||
return fmt.Errorf("log: unknown priority %s", opts.Level)
|
||||
}
|
||||
|
||||
log.p = priority
|
||||
log.writeConsole = opts.WriteConsole
|
||||
|
||||
if opts.WriteSyslog {
|
||||
var err error
|
||||
log.l, err = gsyslog.NewLogger(priority, opts.Facility, opts.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Debug(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_DEBUG, args...)
|
||||
}
|
||||
|
||||
func Info(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_INFO, args...)
|
||||
}
|
||||
|
||||
func Notice(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_NOTICE, args...)
|
||||
}
|
||||
|
||||
func Warning(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_WARNING, args...)
|
||||
}
|
||||
|
||||
func Err(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_ERR, args...)
|
||||
}
|
||||
|
||||
func Crit(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_CRIT, args...)
|
||||
}
|
||||
|
||||
func Alert(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_ALERT, args...)
|
||||
}
|
||||
|
||||
func Emerg(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_EMERG, args...)
|
||||
}
|
||||
|
||||
func Debugln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_DEBUG, args...)
|
||||
}
|
||||
|
||||
func Infoln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_INFO, args...)
|
||||
}
|
||||
|
||||
func Noticeln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_NOTICE, args...)
|
||||
}
|
||||
|
||||
func Warningln(args ...interface{}) {
|
||||
log.print(gsyslog.LOG_WARNING, args...)
|
||||
}
|
||||
|
||||
func Errln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_ERR, args...)
|
||||
}
|
||||
|
||||
func Critln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_CRIT, args...)
|
||||
}
|
||||
|
||||
func Alertln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_ALERT, args...)
|
||||
}
|
||||
|
||||
func Emergln(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_EMERG, args...)
|
||||
}
|
||||
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
||||
}
|
||||
|
||||
func Infof(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_INFO, format, args...)
|
||||
}
|
||||
|
||||
func Noticef(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
||||
}
|
||||
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_WARNING, format, args...)
|
||||
}
|
||||
|
||||
func Errf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||
}
|
||||
|
||||
func Critf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_CRIT, format, args...)
|
||||
}
|
||||
|
||||
func Alertf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_ALERT, format, args...)
|
||||
}
|
||||
|
||||
func Emergf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_EMERG, format, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatal(args ...interface{}) {
|
||||
log.println(gsyslog.LOG_ERR, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// FatalError will only execute if err != nil. If it does,
|
||||
// it will print the message (append the error) and exit
|
||||
// the program.
|
||||
func FatalError(err error, message string) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
Fatal(fmt.Sprintf("%s: %s", message, err))
|
||||
}
|
||||
|
||||
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
||||
func Spew(args ...interface{}) {
|
||||
log.spew(args...)
|
||||
}
|
||||
|
||||
func ChangePriority(level string) error {
|
||||
return log.adjustPriority(level)
|
||||
}
|
|
@ -4,7 +4,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kisom/goutils/logging"
|
||||
"git.wntrmute.dev/kyle/goutils/logging"
|
||||
)
|
||||
|
||||
var log = logging.NewConsole()
|
||||
|
|
|
@ -3,7 +3,7 @@ package logging_test
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kisom/goutils/logging"
|
||||
"git.wntrmute.dev/kyle/goutils/logging"
|
||||
)
|
||||
|
||||
var log = logging.NewConsole()
|
||||
|
|
|
@ -59,7 +59,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
|||
if overwrite {
|
||||
fl.fo, err = os.Create(outpath)
|
||||
} else {
|
||||
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND, 0644)
|
||||
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -69,7 +69,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
|||
if overwrite {
|
||||
fl.fe, err = os.Create(errpath)
|
||||
} else {
|
||||
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND, 0644)
|
||||
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
211
logging/log.go
211
logging/log.go
|
@ -8,6 +8,67 @@ import (
|
|||
)
|
||||
|
||||
// Logger provides a standardised logging interface.
|
||||
//
|
||||
// Log messages consist of four components:
|
||||
//
|
||||
// 1. The **level** attaches a notion of priority to the log message.
|
||||
// Several log levels are available:
|
||||
//
|
||||
// + FATAL (32): the system is in an unsuable state, and cannot
|
||||
// continue to run. Most of the logging for this will cause the
|
||||
// program to exit with an error code.
|
||||
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
||||
// likely to cause a fatal condition shortly. An example is running
|
||||
// out of disk space. This is something that the ops team should get
|
||||
// paged for.
|
||||
// + ERROR (8): error conditions. A single error doesn't require an
|
||||
// ops team to be paged, but repeated errors should often trigger a
|
||||
// page based on threshold triggers. An example is a network
|
||||
// failure: it might be a transient failure (these do happen), but
|
||||
// most of the time it's self-correcting.
|
||||
// + WARNING (4): warning conditions. An example of this is a bad
|
||||
// request sent to a server. This isn't an error on the part of the
|
||||
// program, but it may be indicative of other things. Like errors,
|
||||
// the ops team shouldn't be paged for errors, but a page might be
|
||||
// triggered if a certain threshold of warnings is reached (which is
|
||||
// typically much higher than errors). For example, repeated
|
||||
// warnings might be a sign that the system is under attack.
|
||||
// + INFO (2): informational message. This is a normal log message
|
||||
// that is used to deliver information, such as recording
|
||||
// requests. Ops teams are never paged for informational
|
||||
// messages. This is the default log level.
|
||||
// + DEBUG (1): debug-level message. These are only used during
|
||||
// development or if a deployed system repeatedly sees abnormal
|
||||
// errors.
|
||||
//
|
||||
// The numeric values indicate the priority of a given level.
|
||||
//
|
||||
// 2. The **actor** is used to specify which component is generating
|
||||
// the log message. This could be the program name, or it could be
|
||||
// a specific component inside the system.
|
||||
//
|
||||
// 3. The **event** is a short message indicating what happened. This is
|
||||
// most like the traditional log message.
|
||||
//
|
||||
// 4. The **attributes** are an optional set of key-value string pairs that
|
||||
// provide additional information.
|
||||
//
|
||||
// Additionally, each log message has an associated timestamp. For the
|
||||
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
|
||||
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
|
||||
//
|
||||
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
|
||||
//
|
||||
// Note that this is organised in a manner that facilitates parsing::
|
||||
//
|
||||
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
|
||||
//
|
||||
// will cover the header:
|
||||
//
|
||||
// + ``$1`` contains the timestamp
|
||||
// + ``$2`` contains the level
|
||||
// + ``$3`` contains the actor
|
||||
// + ``$4`` contains the event
|
||||
type Logger interface {
|
||||
// SetLevel sets the minimum log level.
|
||||
SetLevel(Level)
|
||||
|
@ -23,66 +84,6 @@ type Logger interface {
|
|||
// Close gives the Logger the opportunity to perform any cleanup.
|
||||
Close() error
|
||||
|
||||
// Log messages consist of four components:
|
||||
//
|
||||
// 1. The **level** attaches a notion of priority to the log message.
|
||||
// Several log levels are available:
|
||||
//
|
||||
// + FATAL (32): the system is in an unsuable state, and cannot
|
||||
// continue to run. Most of the logging for this will cause the
|
||||
// program to exit with an error code.
|
||||
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
||||
// likely to cause a fatal condition shortly. An example is running
|
||||
// out of disk space. This is something that the ops team should get
|
||||
// paged for.
|
||||
// + ERROR (8): error conditions. A single error doesn't require an
|
||||
// ops team to be paged, but repeated errors should often trigger a
|
||||
// page based on threshold triggers. An example is a network
|
||||
// failure: it might be a transient failure (these do happen), but
|
||||
// most of the time it's self-correcting.
|
||||
// + WARNING (4): warning conditions. An example of this is a bad
|
||||
// request sent to a server. This isn't an error on the part of the
|
||||
// program, but it may be indicative of other things. Like errors,
|
||||
// the ops team shouldn't be paged for errors, but a page might be
|
||||
// triggered if a certain threshold of warnings is reached (which is
|
||||
// typically much higher than errors). For example, repeated
|
||||
// warnings might be a sign that the system is under attack.
|
||||
// + INFO (2): informational message. This is a normal log message
|
||||
// that is used to deliver information, such as recording
|
||||
// requests. Ops teams are never paged for informational
|
||||
// messages. This is the default log level.
|
||||
// + DEBUG (1): debug-level message. These are only used during
|
||||
// development or if a deployed system repeatedly sees abnormal
|
||||
// errors.
|
||||
//
|
||||
// The numeric values indicate the priority of a given level.
|
||||
//
|
||||
// 2. The **actor** is used to specify which component is generating
|
||||
// the log message. This could be the program name, or it could be
|
||||
// a specific component inside the system.
|
||||
//
|
||||
// 3. The **event** is a short message indicating what happened. This is
|
||||
// most like the traditional log message.
|
||||
//
|
||||
// 4. The **attributes** are an optional set of key-value string pairs that
|
||||
// provide additional information.
|
||||
//
|
||||
// Additionally, each log message has an associated timestamp. For the
|
||||
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
|
||||
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
|
||||
//
|
||||
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
|
||||
//
|
||||
// Note that this is organised in a manner that facilitates parsing::
|
||||
//
|
||||
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
|
||||
//
|
||||
// will cover the header:
|
||||
//
|
||||
// + ``$1`` contains the timestamp
|
||||
// + ``$2`` contains the level
|
||||
// + ``$3`` contains the actor
|
||||
// + ``$4`` contains the event
|
||||
Debug(actor, event string, attrs map[string]string)
|
||||
Info(actor, event string, attrs map[string]string)
|
||||
Warn(actor, event string, attrs map[string]string)
|
||||
|
@ -277,3 +278,93 @@ func (lw *LogWriter) SetLevel(l Level) {
|
|||
|
||||
// Close is a no-op that satisfies the Logger interface.
|
||||
func (lw *LogWriter) Close() error { return nil }
|
||||
|
||||
// Multi allows combining of loggers.
|
||||
type Multi struct {
|
||||
loggers []Logger
|
||||
}
|
||||
|
||||
func NewMulti(loggers ...Logger) *Multi {
|
||||
return &Multi{loggers: loggers}
|
||||
}
|
||||
|
||||
func (m *Multi) SetLevel(level Level) {
|
||||
for _, l := range m.loggers {
|
||||
l.SetLevel(level)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Good() bool {
|
||||
good := true
|
||||
for _, l := range m.loggers {
|
||||
good = good && l.Good()
|
||||
}
|
||||
|
||||
return good
|
||||
}
|
||||
|
||||
func (m *Multi) Status() error {
|
||||
for _, l := range m.loggers {
|
||||
if err := l.Status(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Multi) Close() error {
|
||||
for _, l := range m.loggers {
|
||||
l.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Multi) Debug(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Debug(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Info(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Info(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Warn(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Warn(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Error(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Error(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Critical(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Critical(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) Fatal(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.Fatal(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) FatalCode(exitcode int, actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.FatalCode(exitcode, actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Multi) FatalNoDie(actor, event string, attrs map[string]string) {
|
||||
for _, l := range m.loggers {
|
||||
l.FatalNoDie(actor, event, attrs)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,3 +53,12 @@ func TestDestroyLogFiles(t *testing.T) {
|
|||
os.Remove("fw2.log")
|
||||
os.Remove("fw2.err")
|
||||
}
|
||||
|
||||
func TestMulti(t *testing.T) {
|
||||
c1 := NewConsole()
|
||||
c2 := NewConsole()
|
||||
m := NewMulti(c1, c2)
|
||||
if !m.Good() {
|
||||
t.Fatal("failed to set up multi logger")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/kisom/goutils/assert"
|
||||
"github.com/kisom/goutils/testio"
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/testio"
|
||||
)
|
||||
|
||||
func TestMWC(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
// Package rand contains utilities for interacting with math/rand, including
|
||||
// seeding from a random sed.
|
||||
package rand
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
mrand "math/rand"
|
||||
)
|
||||
|
||||
// CryptoUint64 generates a cryptographically-secure 64-bit integer.
|
||||
func CryptoUint64() (uint64, error) {
|
||||
bs := make([]byte, 8)
|
||||
_, err := rand.Read(bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(bs), nil
|
||||
}
|
||||
|
||||
// Seed initialises the non-cryptographic PRNG with a random,
|
||||
// cryptographically secure value. This is done just as a good
|
||||
// way to make this random. The returned 64-bit value is the seed.
|
||||
func Seed() (uint64, error) {
|
||||
seed, err := CryptoUint64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// NB: this is permitted.
|
||||
mrand.Seed(int64(seed))
|
||||
return seed, nil
|
||||
}
|
||||
|
||||
// Int is a wrapper for math.Int so only one package needs to be imported.
|
||||
func Int() int {
|
||||
return mrand.Int()
|
||||
}
|
||||
|
||||
// Intn is a wrapper for math.Intn so only one package needs to be imported.
|
||||
func Intn(max int) int {
|
||||
return mrand.Intn(max)
|
||||
}
|
||||
|
||||
// Intn2 returns a random value between min and max, inclusive.
|
||||
func Intn2(min, max int) int {
|
||||
return Intn(max-min) + min
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package rand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCryptoUint64(t *testing.T) {
|
||||
n1, err := CryptoUint64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n2, err := CryptoUint64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// This has such a low chance of occurring that it's likely to be
|
||||
// indicative of a bad CSPRNG.
|
||||
if n1 == n2 {
|
||||
t.Fatalf("repeated random uint64s: %d", n1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntn(t *testing.T) {
|
||||
expected := []int{3081, 4887, 4847, 1059, 3081}
|
||||
mrand.Seed(1)
|
||||
for i := 0; i < 5; i++ {
|
||||
n := Intn2(1000, 5000)
|
||||
|
||||
if n != expected[i] {
|
||||
fmt.Printf("invalid sequence at %d: expected %d, have %d", i, expected[i], n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeed(t *testing.T) {
|
||||
seed1, err := Seed()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var seed2 uint64
|
||||
n1 := Int()
|
||||
tries := 0
|
||||
|
||||
for {
|
||||
seed2, err = Seed()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if seed1 != seed2 {
|
||||
break
|
||||
}
|
||||
|
||||
tries++
|
||||
|
||||
if tries > 3 {
|
||||
t.Fatal("can't generate two unique seeds")
|
||||
}
|
||||
}
|
||||
|
||||
n2 := Int()
|
||||
|
||||
// Again, this not impossible, merely statistically improbably and a
|
||||
// potential canary for RNG issues.
|
||||
if n1 == n2 {
|
||||
t.Fatalf("repeated integers fresh from two unique seeds: %d/%d -> %d",
|
||||
seed1, seed2, n1)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
// Package seekbuf implements a read-seekable buffer.
|
||||
package seekbuf
|
||||
|
||||
import "io"
|
||||
|
||||
// Buffer is a ReadWriteCloser that supports seeking. It's intended to
|
||||
// replicate the functionality of bytes.Buffer that I use in my projects.
|
||||
//
|
||||
// Note that the seeking is limited to the read marker; all writes are
|
||||
// append-only.
|
||||
type Buffer struct {
|
||||
data []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func New(data []byte) *Buffer {
|
||||
return &Buffer{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) Read(p []byte) (int, error) {
|
||||
if b.pos >= len(b.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n := copy(p, b.data[b.pos:])
|
||||
b.pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *Buffer) Write(p []byte) (int, error) {
|
||||
b.data = append(b.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Seek sets the read pointer to pos.
|
||||
func (b *Buffer) Seek(pos int) {
|
||||
b.pos = pos
|
||||
}
|
||||
|
||||
// Rewind resets the read pointer to 0.
|
||||
func (b *Buffer) Rewind() {
|
||||
b.Seek(0)
|
||||
}
|
||||
|
||||
// Close clears all the data out of the buffer and sets the read position to 0.
|
||||
func (b *Buffer) Close() error {
|
||||
b.data = nil
|
||||
b.pos = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the length of data remaining to be read.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.data[b.pos:])
|
||||
}
|
||||
|
||||
// Bytes returns the underlying bytes from the current position.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.data[b.pos:]
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package seekbuf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
)
|
||||
|
||||
func TestSeeking(t *testing.T) {
|
||||
partA := []byte("hello, ")
|
||||
partB := []byte("world!")
|
||||
|
||||
buf := New(partA)
|
||||
assert.BoolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
|
||||
|
||||
b := make([]byte, 32)
|
||||
|
||||
n, err := buf.Read(b)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after reading 1: have length %d, want length 0", buf.Len()))
|
||||
assert.BoolT(t, n == len(partA), fmt.Sprintf("after reading 2: have length %d, want length %d", n, len(partA)))
|
||||
|
||||
n, err = buf.Write(partB)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, n == len(partB), fmt.Sprintf("after writing: have length %d, want length %d", n, len(partB)))
|
||||
|
||||
n, err = buf.Read(b)
|
||||
assert.NoErrorT(t, err)
|
||||
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after rereading 1: have length %d, want length 0", buf.Len()))
|
||||
assert.BoolT(t, n == len(partB), fmt.Sprintf("after rereading 2: have length %d, want length %d", n, len(partB)))
|
||||
|
||||
partsLen := len(partA) + len(partB)
|
||||
buf.Rewind()
|
||||
assert.BoolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
|
||||
|
||||
buf.Close()
|
||||
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))
|
||||
}
|
|
@ -5,9 +5,15 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
type WriteStringCloser interface {
|
||||
Write([]byte) (int, error)
|
||||
WriteString(string) (int, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Tee emulates the Unix tee(1) command.
|
||||
type Tee struct {
|
||||
f *os.File
|
||||
f WriteStringCloser
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ This is a collection of various utility io types:
|
|||
* LoggingBuffer
|
||||
|
||||
You can check out the
|
||||
[godoc](https://godoc.org/github.com/kisom/goutils/testio) for dtails.
|
||||
[godoc](https://godoc.io/git.wntrmute.dev/kyle/goutils/testio) for dtails.
|
||||
|
||||
It was imported from [kisom/testio](https://github.com/kisom/testio/). The
|
||||
original Git directory is preserved in git-hist.tar.xz.
|
||||
|
|
Binary file not shown.
|
@ -194,6 +194,11 @@ func (buf *BufCloser) Bytes() []byte {
|
|||
return buf.buf.Bytes()
|
||||
}
|
||||
|
||||
// Len returns the length of the buffer.
|
||||
func (buf *BufCloser) Len() int {
|
||||
return buf.buf.Len()
|
||||
}
|
||||
|
||||
// NewBufCloser creates and initializes a new BufCloser using buf as
|
||||
// its initial contents. It is intended to prepare a BufCloser to read
|
||||
// existing data. It can also be used to size the internal buffer for
|
||||
|
|
Loading…
Reference in New Issue