Compare commits

..

115 Commits

Author SHA1 Message Date
Kyle Isom beccb551e2 add minmax 2025-04-10 01:17:11 -07:00
Kyle Isom c761d98b82 additional debugging for basic constraints 2024-08-22 18:06:09 -07:00
Kyle Isom e68d22337b cmd: add die roller 2024-06-14 20:27:00 -07:00
Kyle Isom 4cb6f5b6f0 fix minor issues in Print calls. 2024-05-19 20:51:35 -07:00
Kyle Isom 6d5708800f circleci: Some days I really hate computers. 2024-05-19 20:48:59 -07:00
Kyle Isom fa3eb821e6 circleci: Update go container. 2024-05-19 20:48:25 -07:00
Kyle Isom dd5ed403b9 circleci: Update go version. 2024-05-19 20:46:28 -07:00
Kyle Isom b4fde22c31 circleci: don't use bazel 2024-05-19 20:45:32 -07:00
Kyle Isom 9715293773 Add zsearch and dumpbytes. 2024-05-19 20:41:50 -07:00
Kyle Isom f6d227946b Get rid of bazel.
Good riddance. More of a headache than it's worth.
2024-05-19 20:24:38 -07:00
Kyle Isom 6f7a8fa4d4 Add host tool. 2024-04-26 14:15:08 -07:00
Kyle Isom 622f6a2638 seekbuf: add Bytes method. 2023-08-27 20:56:47 -07:00
Kyle Isom e3162b6164 log: remove extraneous print 2023-05-26 22:37:56 +00:00
Kyle Isom 9d1e3ab2f0 backoff: new package. 2023-05-26 17:28:49 +00:00
Kyle Isom dd98356479 cmd/data_sync: update README 2023-05-11 19:42:31 -07:00
Kyle Isom 9307f44601 README: add data_sync 2023-05-11 19:41:33 -07:00
Kyle Isom b9f69e4aa1 data_sync: sync homedir to external storage. 2023-05-11 19:18:29 -07:00
Kyle Isom 7a4e7977c3 log: fixups, add FatalError
- Support suppressing console output.
- DefaultDebugOptions sets the correct tag now.
- FatalError(error, string) calls log.Fatal(message, err) if err != nil.
2023-05-11 19:03:18 -07:00
Kyle Isom 72fdc255e7 db: clean out dependency on stretchr 2023-05-11 17:14:19 -07:00
Kyle Isom 63957ff22a remove unused dependencies 2023-05-07 11:57:38 -07:00
Kyle Isom 83d42dc489 bazel: running gazelle to pick up dependency changes 2023-05-06 13:37:58 -07:00
Kyle Isom 984baa6bb4 working on removing dependency on cfssl. 2023-05-06 13:25:30 -07:00
Kyle Isom 34982c122f config: add ListKeys 2023-05-06 01:13:41 -07:00
Kyle Isom fb11c0c27c syslog: support not using syslog 2023-05-06 00:57:42 -07:00
Kyle Isom 27cc67d2cf this is why i hate computers 2023-05-04 23:58:51 -07:00
Kyle Isom 0fe13439b6 moving circleci file to the right place. 2023-05-04 23:57:40 -07:00
Kyle Isom fb9caec663 Update CircleCI configuration. 2023-05-04 23:55:26 -07:00
Kyle Isom e3e83630b5 Add CircleCI configuration. 2023-05-04 23:54:47 -07:00
Kyle Isom c1f06604e3 fix dependency issues. 2023-05-04 23:45:08 -07:00
Kyle Isom 9091cc7682 syslog: add Spew option.
This is a debug print that spews any arguments passed in.
2023-05-04 23:40:20 -07:00
Kyle Isom 74ce7bc58a Cleaning up the codebase. 2023-05-04 23:22:35 -07:00
Kyle Isom 8b2d10209e Clean up the README. 2023-05-04 22:58:21 -07:00
Kyle Isom 770100b688 fileutil: start working on symlink checking 2023-05-04 22:52:16 -07:00
Kyle Isom 29630c55a9 packaging is a dead end thus far
I think I'm going to have to write my own rules to do this.
2023-05-04 21:03:08 -07:00
Kyle Isom 3c2ec896f8 cmd/cruntar: avoid writing files outside archive 2023-05-04 17:20:22 -07:00
Kyle Isom 7828726ba4 Updating x/sys. 2023-05-04 17:13:06 -07:00
Kyle Isom 458f3ceaed update dependencies to fix dependabot warnings. 2023-05-04 17:10:41 -07:00
Kyle Isom 2c45ae7b4e syslog: add new logging package. 2023-05-04 16:20:39 -07:00
Kyle Isom c1b8b72cf1 bazel-test: add sizes to all tests.
This gets rid of the warning about mismatched test sizes.
2023-05-04 16:08:18 -07:00
Kyle Isom bfc7fedbf9 seekbuf: add test file 2023-05-04 16:08:11 -07:00
Kyle Isom 965312f48e update README/LICENSE
LICENSE: update years.

README: add missing pieces.
2023-05-04 16:07:34 -07:00
Kyle Isom 237aa46ddd cmd/diskimg: new disk imaging tool 2023-05-04 16:06:56 -07:00
Kyle Isom f8c64d3be5 bazel: updating build files 2023-05-04 15:11:15 -07:00
Kyle Isom d66cfe1145 Cut over to Bazel. 2023-05-04 14:00:30 -07:00
Kyle Isom ad03c5f991 Mass rewrite imports -> git.wntrmute.dev repo. 2023-05-04 13:58:43 -07:00
Kyle Isom 0dd4e1c6ca Update README.md
Depart for other waters.
2023-05-04 13:46:40 -07:00
Kyle Isom 078230217d ahash: add SumLimitedReader 2023-05-04 13:41:33 -07:00
Kyle Isom 90318f861b Add EEPROM image generator. 2022-06-24 14:36:51 -07:00
Kyle Isom 3bb1362c0e tee: allow writing strings. 2022-02-20 17:43:11 -08:00
Kyle Isom 30ffbbdbc5 config: add test data for iniconf. 2022-02-20 17:43:11 -08:00
Kyle Isom b893e99864 config: add default path, customized configs.
A customised config is an ini file with a [default] section and some
other name sections; a config file is loaded from the default section
with any keys in the named section being added in, overriding keys in
the host. This allows for, e.g. setting different paths based on the
host name or operating system.
2022-02-20 17:43:11 -08:00
Kyle Isom c7c51568d8 seekbuf lost the Clear function. 2022-02-05 15:40:13 -08:00
Kyle Isom 7793021260 New package: seekbuf (a seekable buffer). 2022-02-05 15:00:39 -08:00
Kyle Isom 692562818c clean README 2020-11-26 20:32:46 -08:00
Kyle Isom 9e19346fc0 add sum file 2020-11-26 20:09:46 -08:00
Kyle Isom cb827169dc switching hosting providers 2020-11-26 20:09:37 -08:00
Kyle Isom 027d0173bc logging: finish multi implementation 2020-11-11 21:15:02 -08:00
Kyle Isom 6f19b69bbd logging: add CREATE flag to file-based loggers. 2020-11-11 10:06:13 -08:00
Kyle Isom 7e118bfdb0 logging: add Multi 2020-11-11 09:52:07 -08:00
Kyle Isom e0868841bf
Merge pull request #7 from santosh653/master 2020-11-04 07:24:04 -08:00
santosh653 c558405d11
Update .travis.yml
Excluding go version 1.9 as only go version1.13 onwards are supported.
2020-11-04 05:22:10 -05:00
santosh653 a1eb035af7
Update .travis.yml
Adding Power support
2020-10-20 04:56:21 -04:00
Kyle Isom 5eedcff042 add rand package, utilities for math/rand. 2020-06-02 17:26:17 -07:00
Kyle Isom 6ac8eb04b4 Updated by OWASP Threat Dragon 2020-03-05 15:29:57 -08:00
Kyle Isom 4a4e4cd3fd Updated by OWASP Threat Dragon 2020-03-05 12:20:49 -08:00
Kyle Isom 1207093a56 Created by OWASP Threat Dragon 2020-03-05 12:19:57 -08:00
Kyle Isom 2b6ae03d1a config: add a Require message.
Also update explanation of the intended use-case for this package.
2020-03-02 17:36:29 -08:00
Kyle Isom ef0f14a512 Add global config package.
This is a simple config system that I find myself wanting a lot.
2020-03-02 17:11:55 -08:00
Kyle Isom 6ae393ebf2 Have to explicitly allow darwin/amd64 in the build tag. 2020-01-16 06:19:21 -08:00
Kyle Isom 76d88c220d Add go mod, update ftime.
+ Stat_t on Darwin amd64 now uses the same struct fields
  for file times as Lunix, not the BSD ones. I need to
  follow up on this.
2020-01-16 06:13:31 -08:00
Kyle Isom 40e015373f Point to goutils-pkg. 2020-01-16 06:13:18 -08:00
Kyle Isom 50c226b726 Add parts. 2019-01-08 12:46:53 -08:00
Kyle Isom 070ffb9dff Cleanup testio. 2018-12-17 18:16:52 -08:00
Kyle Isom 5ac05bd298 Add debug printer. 2018-12-17 18:12:51 -08:00
Kyle Isom cf1edf2d31 Add simple proxy. 2018-12-12 14:57:08 -08:00
Kyle Isom 03e8958dd7
Merge pull request #6 from golint-fixer/master
Fix golint import path
2018-10-31 18:31:23 -07:00
golint fixer 6cef585071 Fix golint import path 2018-10-25 09:37:10 -05:00
Kyle Isom 06678499d4 Add missing format arg. 2018-09-15 16:49:46 -07:00
Kyle Isom fad17065fe Update README. 2018-09-15 15:26:42 -07:00
Kyle Isom 63e0cbeacb Update CHANGELOG for release. 2018-09-15 15:10:44 -07:00
Kyle Isom 231b98dd68 Add kgz, a gzip tool. 2018-09-15 15:08:37 -07:00
Kyle Isom 160a42ec26 travis: drop go 1.8 2018-04-24 10:47:35 -07:00
Kyle Isom b6b33e00c8 cmd/showimp: support ignoring directories. 2018-04-24 09:44:45 -07:00
Kyle Isom 9e1aed257b rhash: use io.Copy to avoid reading the full file. 2018-04-24 09:44:45 -07:00
Kyle Isom 411907c0ad
Merge pull request #5 from cbroglie/master
Make key and extended usages output stable
2017-11-22 16:24:57 -08:00
Christopher Broglie 06c7f8f42f Make key and extended usages output stable 2017-11-21 15:14:51 -08:00
Kyle Isom 8b638065d1 Fix ahash test Sprintf's. 2017-11-21 13:00:20 -08:00
Kyle Isom 9ac378eaa5 go lint → golint 2017-11-21 12:55:51 -08:00
Kyle Isom eaaaabe439 go vet → golint 2017-11-21 12:52:26 -08:00
Kyle Isom 4122f01644 Fix travis errors. 2017-11-21 12:42:53 -08:00
Kyle Isom 263a5d3973 Add travis config. 2017-11-21 12:30:06 -08:00
Kyle Isom afef3eea62 Fix Logger interface. 2017-11-21 12:19:38 -08:00
Kyle Isom d6c5360a06 Start a change log. 2017-11-16 08:34:46 -08:00
Kyle Isom 0ab21e12f3 Reformat cmd/utc/main.go. 2017-11-16 08:32:42 -08:00
Kyle Isom 832475db56 gofmt cleanups. 2017-11-16 08:32:33 -08:00
Kyle Isom cb16cfa183 golint cleanups. 2017-11-16 08:32:05 -08:00
Kyle Isom d083a39a7d Fix README formatting. 2017-11-16 08:24:35 -08:00
Kyle Isom fc77225740 Add rhash for hashing remote files.
This was originally rsha256, but it's been updated to support multiple
algorithms.
2017-11-16 08:24:13 -08:00
Kyle Isom 41df73d7a8 Add ahash package. 2017-11-16 07:52:36 -08:00
Kyle Isom 0dc478746a Add rsha256 - remote sha256 utility. 2017-11-15 17:40:38 -08:00
Kyle Isom f44bbc9eca Add cruntar - untar for ChromeOS. 2017-11-15 17:37:54 -08:00
Kyle Isom 1df0350fc7 Cleanups and docs. 2017-10-20 12:38:41 -07:00
Kyle Isom d42c1fa1c5 Add subjhash tool. Minor cleanups. 2017-10-20 12:18:56 -07:00
Kyle Isom 4fa6e4ab0e Update README. 2017-09-26 16:01:36 -07:00
Kyle Isom a3ead16faf Add ski utility. 2017-09-26 15:59:39 -07:00
Kyle Isom c8f839de73 Cleanups and improvements. 2017-09-12 03:38:47 -07:00
Kyle Isom 0c56a477bc Add tee. 2017-09-12 03:25:12 -07:00
Kyle Isom 763dbec310 Merge pull request #3 from joshlf/stealchain-server
cmd/stealchain-server: Present server cert, request client cert
2017-08-29 12:47:16 -07:00
Joshua Liebow-Feeser 0e6b60a2c4 cmd/stealchain-server: Present server cert, request client cert 2017-08-29 12:42:19 -07:00
Kyle Isom be34ad263d Merge pull request #2 from joshlf/stealchain-server
cmd/stealchain-server: Explicitly perform TLS handshake.
2017-08-29 10:55:49 -07:00
Joshua Liebow-Feeser 48b03c908d cmd/stealchain-server: Explicitly perform TLS handshake. 2017-08-29 09:53:51 -07:00
Kyle Isom 70d7ff505b Merge pull request #1 from joshlf/stealchain-server
Add stealchain-server utility
2017-08-28 17:39:30 -07:00
Joshua Liebow-Feeser 68e5822176 cmd/stealchain-server: Initial commit. 2017-08-28 17:05:34 -07:00
Kyle Isom 54dd461733 Add option to print the SHA256 hash of a certificate.
This hash isn't the SKI --- it's a SHA256 hash of the raw certificate
contents.
2017-05-03 11:01:33 -07:00
Kyle Isom eba03a2f4a Update README.
Adds atpoing, renfnv, and yamll to list of programs.
2017-05-03 11:01:01 -07:00
107 changed files with 7170 additions and 205 deletions

42
.circleci/config.yml Normal file
View File

@ -0,0 +1,42 @@
# Use the latest 2.1 version of CircleCI pipeline process engine.
# See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
testbuild:
working_directory: ~/repo
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
docker:
- image: cimg/go:1.22.2
# Add steps to the job
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
steps:
- checkout
- restore_cache:
keys:
- go-mod-v4-{{ checksum "go.sum" }}
- run:
name: Install Dependencies
command: go mod download
- save_cache:
key: go-mod-v4-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- run:
name: Run tests
command: go test ./...
- run:
name: Run build
command: go build ./...
- store_test_results:
path: /tmp/test-reports
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
workflows:
testbuild:
jobs:
- testbuild

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
bazel-bin
bazel-goutils
bazel-out
bazel-testlogs

8
.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

9
.idea/goutils.iml Normal file
View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="Go" enabled="true" />
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

8
.idea/modules.xml Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/goutils.iml" filepath="$PROJECT_DIR$/.idea/goutils.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

26
.travis.yml Normal file
View File

@ -0,0 +1,26 @@
arch:
- amd64
- ppc64le
sudo: false
language: go
go:
- tip
- 1.9
jobs:
exclude:
- go: 1.9
arch: amd64
- go: 1.9
arch: ppc64le
script:
- go get golang.org/x/lint/golint
- go get golang.org/x/tools/cmd/cover
- go get github.com/kisom/goutils/...
- go test -cover github.com/kisom/goutils/...
- golint github.com/kisom/goutils/...
notifications:
email:
recipients:
- coder@kyleisom.net
on_success: change
on_failure: change

27
CHANGELOG Normal file
View File

@ -0,0 +1,27 @@
Release 1.2.1 - 2018-09-15
+ Add missing format argument to Errorf call in kgz.
Release 1.2.0 - 2018-09-15
+ Adds the kgz command line utility.
Release 1.1.0 - 2017-11-16
+ A number of new command line utilities were added
+ atping
+ cruntar
+ renfnv
+
+ ski
+ subjhash
+ yamll
+ new package: ahash
+ package for loading hashes from an algorithm string
+ new certificate loading functions in the lib package
+ new package: tee
+ emulates tee(1)

33
LICENSE
View File

@ -1,4 +1,4 @@
Copyright (c) 2015 Kyle Isom <kyle@tyrfingr.is>
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
@ -11,3 +11,34 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
=======================================================================
The backoff package (written during my time at Cloudflare) is released
under the following license:
Copyright (c) 2016 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -2,12 +2,19 @@ GOUTILS
This is a collection of small utility code I've written in Go; the `cmd/`
directory has a number of command-line utilities. Rather than keep all
of these in superfluous repositories of their own, I'm putting them here.
of these in superfluous repositories of their own, or rewriting them
for each project, I'm putting them here.
The project can be built with the standard Go tooling, or it can be built
with Bazel.
Contents:
ahash/ Provides hashes from string algorithm specifiers.
assert/ Error handling, assertion-style.
backoff/ Implementation of an intelligent backoff strategy.
cmd/
atping/ Automated TCP ping, meant for putting in cronjobs.
certchain/ Display the certificate chain from a
TLS connection.
certdump/ Dump certificate information.
@ -18,32 +25,56 @@ Contents:
the time to expiry and checking for revocations.
clustersh/ Run commands or transfer files across multiple
servers via SSH.
cruntar/ Untar an archive with hard links, copying instead of
linking.
csrpubdump/ Dump the public key from an X.509 certificate request.
data_sync/ Sync the user's homedir to external storage.
diskimg/ Write a disk image to a device.
eig/ EEPROM image generator.
fragment/ Print a fragment of a file.
jlp/ JSON linter/prettifier.
kgz/ Custom gzip compressor / decompressor that handles 99%
of my use cases.
parts/ Simple parts database management for my collection of
electronic components.
pem2bin/ Dump the binary body of a PEM-encoded block.
pembody/ Print the body of a PEM certificate.
pemit/ Dump data to a PEM file.
showimp/ List the external (e.g. non-stdlib and outside the
current working directory) imports for a Go file.
readchain/ Print the common name for the certificates
in a bundle.
showimp Display the external imports in a package.
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
rhash/ Compute the digest of remote files.
showimp/ List the external (e.g. non-stdlib and outside the
current working directory) imports for a Go file.
ski Display the SKI for PEM-encoded TLS material.
sprox/ Simple TCP proxy.
stealchain/ Dump the verified chain from a TLS
connection.
connection to a server.
stealchain- Dump the verified chain from a TLS
server/ connection from a client.
subjhash/ Print or match subject info from a certificate.
tlskeypair/ Check whether a TLS certificate and key file match.
utc/ Convert times to UTC.
yamll/ A small YAML linter.
config/ A simple global configuration system where configuration
data is pulled from a file or an environment variable
transparently.
dbg/ A debug printer.
die/ Death of a program.
fileutil/ Common file functions.
fileutil/ Common file functions.
lib/ Commonly-useful functions for writing Go programs.
logging/ A logging library.
mwc/ MultiwriteCloser implementation.
rand/ Utilities for working with math/rand.
sbuf/ A byte buffer that can be wiped.
testio/ Various I/O utilities useful during testing.
testutil/ Various utility functions useful during testing.
seekbuf/ A read-seekable byte buffer.
syslog/ Syslog-type logging.
tee/ Emulate tee(1)'s functionality in io.Writers.
testio/ Various I/O utilities useful during testing.
testutil/ Various utility functions useful during testing.
Each program should have a small README in the directory with more
information.
All code here is licensed under the MIT license.
All code here is licensed under the ISC license.

263
ahash/ahash.go Normal file
View File

@ -0,0 +1,263 @@
// Package ahash provides support for hashing data with a selectable
//
// hash function.
package ahash
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"errors"
"hash"
"hash/adler32"
"hash/crc32"
"hash/crc64"
"hash/fnv"
"io"
"sort"
"git.wntrmute.dev/kyle/goutils/assert"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/blake2s"
"golang.org/x/crypto/md4"
"golang.org/x/crypto/ripemd160"
"golang.org/x/crypto/sha3"
)
func sha224Slicer(bs []byte) []byte {
sum := sha256.Sum224(bs)
return sum[:]
}
func sha256Slicer(bs []byte) []byte {
sum := sha256.Sum256(bs)
return sum[:]
}
func sha384Slicer(bs []byte) []byte {
sum := sha512.Sum384(bs)
return sum[:]
}
func sha512Slicer(bs []byte) []byte {
sum := sha512.Sum512(bs)
return sum[:]
}
// Hash represents a generic hash function that may or may not be secure. It
// satisfies the hash.Hash interface.
type Hash struct {
hash.Hash
secure bool
algo string
}
// HashAlgo returns the name of the underlying hash algorithm.
func (h *Hash) HashAlgo() string {
return h.algo
}
// IsSecure returns true if the Hash is a cryptographic hash.
func (h *Hash) IsSecure() bool {
return h.secure
}
// Sum32 returns true if the underlying hash is a 32-bit hash; if is, the
// uint32 parameter will contain the hash.
func (h *Hash) Sum32() (uint32, bool) {
h32, ok := h.Hash.(hash.Hash32)
if !ok {
return 0, false
}
return h32.Sum32(), true
}
// IsHash32 returns true if the underlying hash is a 32-bit hash function.
func (h *Hash) IsHash32() bool {
_, ok := h.Hash.(hash.Hash32)
return ok
}
// Sum64 returns true if the underlying hash is a 64-bit hash; if is, the
// uint64 parameter will contain the hash.
func (h *Hash) Sum64() (uint64, bool) {
h64, ok := h.Hash.(hash.Hash64)
if !ok {
return 0, false
}
return h64.Sum64(), true
}
// IsHash64 returns true if the underlying hash is a 64-bit hash function.
func (h *Hash) IsHash64() bool {
_, ok := h.Hash.(hash.Hash64)
return ok
}
func blakeFunc(bf func(key []byte) (hash.Hash, error)) func() hash.Hash {
return func() hash.Hash {
h, err := bf(nil)
assert.NoError(err, "while constructing a BLAKE2 hash function")
return h
}
}
var secureHashes = map[string]func() hash.Hash{
"ripemd160": ripemd160.New,
"sha224": sha256.New224,
"sha256": sha256.New,
"sha384": sha512.New384,
"sha512": sha512.New,
"sha3-224": sha3.New224,
"sha3-256": sha3.New256,
"sha3-384": sha3.New384,
"sha3-512": sha3.New512,
"blake2s-256": blakeFunc(blake2s.New256),
"blake2b-256": blakeFunc(blake2b.New256),
"blake2b-384": blakeFunc(blake2b.New384),
"blake2b-512": blakeFunc(blake2b.New512),
}
func newHash32(f func() hash.Hash32) func() hash.Hash {
return func() hash.Hash {
return f()
}
}
func newHash64(f func() hash.Hash64) func() hash.Hash {
return func() hash.Hash {
return f()
}
}
func newCRC64(tab uint64) func() hash.Hash {
return newHash64(
func() hash.Hash64 {
return crc64.New(crc64.MakeTable(tab))
})
}
var insecureHashes = map[string]func() hash.Hash{
"md4": md4.New,
"md5": md5.New,
"sha1": sha1.New,
"adler32": newHash32(adler32.New),
"crc32-ieee": newHash32(crc32.NewIEEE),
"crc64": newCRC64(crc64.ISO),
"crc64-ecma": newCRC64(crc64.ECMA),
"fnv1-32a": newHash32(fnv.New32a),
"fnv1-32": newHash32(fnv.New32),
"fnv1-64a": newHash64(fnv.New64a),
"fnv1-64": newHash64(fnv.New64),
}
// New returns a new Hash for the specified algorithm.
func New(algo string) (*Hash, error) {
h := &Hash{algo: algo}
hf, ok := secureHashes[algo]
if ok {
h.Hash = hf()
h.secure = true
return h, nil
}
hf, ok = insecureHashes[algo]
if ok {
h.Hash = hf()
h.secure = false
return h, nil
}
return nil, errors.New("chash: unsupport hash algorithm " + algo)
}
// Sum returns the digest (not the hex digest) of the data using the given
// algorithm.
func Sum(algo string, data []byte) ([]byte, error) {
h, err := New(algo)
if err != nil {
return nil, err
}
_, err = h.Write(data)
if err != nil {
return nil, err
}
return h.Sum(nil), nil
}
// SumReader reads all the data from the given io.Reader and returns the
// digest (not the hex digest) from the specified algorithm.
func SumReader(algo string, r io.Reader) ([]byte, error) {
h, err := New(algo)
if err != nil {
return nil, err
}
_, err = io.Copy(h, r)
if err != nil {
return nil, err
}
return h.Sum(nil), nil
}
// SumLimitedReader reads n bytes of data from the io.reader and returns the
// digest (not the hex digest) from the specified algorithm.
func SumLimitedReader(algo string, r io.Reader, n int64) ([]byte, error) {
limit := &io.LimitedReader{
R: r,
N: n,
}
return SumReader(algo, limit)
}
var insecureHashList, secureHashList, hashList []string
func init() {
shl := len(secureHashes) // secure hash list length
ihl := len(insecureHashes) // insecure hash list length
ahl := shl + ihl // all hash list length
insecureHashList = make([]string, 0, ihl)
secureHashList = make([]string, 0, shl)
hashList = make([]string, 0, ahl)
for algo := range insecureHashes {
insecureHashList = append(insecureHashList, algo)
}
sort.Strings(insecureHashList)
for algo := range secureHashes {
secureHashList = append(secureHashList, algo)
}
sort.Strings(secureHashList)
hashList = append(hashList, insecureHashList...)
hashList = append(hashList, secureHashList...)
sort.Strings(hashList)
}
// HashList returns a sorted list of all the hash algorithms supported by the
// package.
func HashList() []string {
return hashList[:]
}
// SecureHashList returns a sorted list of all the secure (cryptographic) hash
// algorithms supported by the package.
func SecureHashList() []string {
return secureHashList[:]
}
// InsecureHashList returns a sorted list of all the insecure hash algorithms
// supported by the package.
func InsecureHashList() []string {
return insecureHashList[:]
}

157
ahash/ahash_test.go Normal file
View File

@ -0,0 +1,157 @@
package ahash
import (
"bytes"
"fmt"
"testing"
"git.wntrmute.dev/kyle/goutils/assert"
)
func TestSecureHash(t *testing.T) {
algo := "sha256"
h, err := New(algo)
assert.NoErrorT(t, err)
assert.BoolT(t, h.IsSecure(), algo+" should be a secure hash")
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
assert.BoolT(t, !h.IsHash32(), algo+" isn't actually a 32-bit hash")
assert.BoolT(t, !h.IsHash64(), algo+" isn't actually a 64-bit hash")
var data []byte
var expected = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
sum, err := Sum(algo, data)
assert.NoErrorT(t, err)
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
data = []byte("hello, world")
buf := bytes.NewBuffer(data)
expected = "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
sum, err = SumReader(algo, buf)
assert.NoErrorT(t, err)
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
data = []byte("hello world")
_, err = h.Write(data)
assert.NoErrorT(t, err)
unExpected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
sum = h.Sum(nil)
assert.BoolT(t, fmt.Sprintf("%x", sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
}
func TestInsecureHash(t *testing.T) {
algo := "md5"
h, err := New(algo)
assert.NoErrorT(t, err)
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
assert.BoolT(t, !h.IsHash32(), algo+" isn't actually a 32-bit hash")
assert.BoolT(t, !h.IsHash64(), algo+" isn't actually a 64-bit hash")
var data []byte
var expected = "d41d8cd98f00b204e9800998ecf8427e"
sum, err := Sum(algo, data)
assert.NoErrorT(t, err)
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
data = []byte("hello, world")
buf := bytes.NewBuffer(data)
expected = "e4d7f1b4ed2e42d15898f4b27b019da4"
sum, err = SumReader(algo, buf)
assert.NoErrorT(t, err)
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
data = []byte("hello world")
_, err = h.Write(data)
assert.NoErrorT(t, err)
unExpected := "e4d7f1b4ed2e42d15898f4b27b019da4"
sum = h.Sum(nil)
assert.BoolT(t, fmt.Sprintf("%x", sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
}
func TestHash32(t *testing.T) {
algo := "crc32-ieee"
h, err := New(algo)
assert.NoErrorT(t, err)
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
assert.BoolT(t, h.IsHash32(), algo+" is actually a 32-bit hash")
assert.BoolT(t, !h.IsHash64(), algo+" isn't actually a 64-bit hash")
var data []byte
var expected uint32
h.Write(data)
sum, ok := h.Sum32()
assert.BoolT(t, ok, algo+" should be able to return a Sum32")
assert.BoolT(t, expected == sum, fmt.Sprintf("%s returned the %d but expected %d", algo, sum, expected))
data = []byte("hello, world")
expected = 0xffab723a
h.Write(data)
sum, ok = h.Sum32()
assert.BoolT(t, ok, algo+" should be able to return a Sum32")
assert.BoolT(t, expected == sum, fmt.Sprintf("%s returned the %d but expected %d", algo, sum, expected))
h.Reset()
data = []byte("hello world")
h.Write(data)
sum, ok = h.Sum32()
assert.BoolT(t, ok, algo+" should be able to return a Sum32")
assert.BoolT(t, expected != sum, fmt.Sprintf("%s returned %d but shouldn't have", algo, sum))
}
func TestHash64(t *testing.T) {
algo := "crc64"
h, err := New(algo)
assert.NoErrorT(t, err)
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
assert.BoolT(t, h.IsHash64(), algo+" is actually a 64-bit hash")
assert.BoolT(t, !h.IsHash32(), algo+" isn't actually a 32-bit hash")
var data []byte
var expected uint64
h.Write(data)
sum, ok := h.Sum64()
assert.BoolT(t, ok, algo+" should be able to return a Sum64")
assert.BoolT(t, expected == sum, fmt.Sprintf("%s returned the %d but expected %d", algo, sum, expected))
data = []byte("hello, world")
expected = 0x16c45c0eb1d9c2ec
h.Write(data)
sum, ok = h.Sum64()
assert.BoolT(t, ok, algo+" should be able to return a Sum64")
assert.BoolT(t, expected == sum, fmt.Sprintf("%s returned the %d but expected %d", algo, sum, expected))
h.Reset()
data = []byte("hello world")
h.Write(data)
sum, ok = h.Sum64()
assert.BoolT(t, ok, algo+" should be able to return a Sum64")
assert.BoolT(t, expected != sum, fmt.Sprintf("%s returned %d but shouldn't have", algo, sum))
}
func TestListLengthSanity(t *testing.T) {
all := HashList()
secure := SecureHashList()
insecure := InsecureHashList()
assert.BoolT(t, len(all) == len(secure)+len(insecure))
}
func TestSumLimitedReader(t *testing.T) {
data := bytes.NewBufferString("hello, world")
dataLen := data.Len()
extendedData := bytes.NewBufferString("hello, world! this is an extended message")
expected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
hash, err := SumReader("sha256", data)
assert.NoErrorT(t, err)
assert.BoolT(t, fmt.Sprintf("%x", hash) == expected, fmt.Sprintf("have hash %x, want %s", hash, expected))
extendedHash, err := SumLimitedReader("sha256", extendedData, int64(dataLen))
assert.NoErrorT(t, err)
assert.BoolT(t, bytes.Equal(hash, extendedHash), fmt.Sprintf("have hash %x, want %x", extendedHash, hash))
}

View File

@ -16,7 +16,7 @@ import (
"testing"
)
// NoDebug, if set to true, will cause all asserts to be ignored.
// NoDebug can be set to true to cause all asserts to be ignored.
var NoDebug bool
func die(what string, a ...string) {
@ -94,7 +94,7 @@ func NoError(err error, s ...string) {
}
if nil != err {
die(err.Error())
die(err.Error(), s...)
}
}
@ -170,5 +170,5 @@ func ErrorEqT(t *testing.T, expected, actual error) {
should = fmt.Sprintf("have '%s'", actual)
}
die(fmt.Sprintf("assert.Error2: expected '%s', but %s", expected, should))
t.Fatalf("assert.Error2: expected '%s', but %s", expected, should)
}

24
backoff/LICENSE Normal file
View File

@ -0,0 +1,24 @@
Copyright (c) 2016 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

83
backoff/README.md Normal file
View File

@ -0,0 +1,83 @@
# backoff
## Go implementation of "Exponential Backoff And Jitter"
This package implements the backoff strategy described in the AWS
Architecture Blog article
["Exponential Backoff And Jitter"](http://www.awsarchitectureblog.com/2015/03/backoff.html). Essentially,
the backoff has an interval `time.Duration`; the *n<sup>th</sup>* call
to backoff will return an a `time.Duration` that is *2 <sup>n</sup> *
interval*. If jitter is enabled (which is the default behaviour), the
duration is a random value between 0 and *2 <sup>n</sup> * interval*.
The backoff is configured with a maximum duration that will not be
exceeded; e.g., by default, the longest duration returned is
`backoff.DefaultMaxDuration`.
## Usage
A `Backoff` is initialised with a call to `New`. Using zero values
causes it to use `DefaultMaxDuration` and `DefaultInterval` as the
maximum duration and interval.
```
package something
import "github.com/cloudflare/backoff"
func retryable() {
b := backoff.New(0, 0)
for {
err := someOperation()
if err == nil {
break
}
log.Printf("error in someOperation: %v", err)
<-time.After(b.Duration())
}
log.Printf("succeeded after %d tries", b.Tries()+1)
b.Reset()
}
```
It can also be used to rate limit code that should retry infinitely, but which does not
use `Backoff` itself.
```
package something
import (
"time"
"github.com/cloudflare/backoff"
)
func retryable() {
b := backoff.New(0, 0)
b.SetDecay(30 * time.Second)
for {
// b will reset if someOperation returns later than
// the last call to b.Duration() + 30s.
err := someOperation()
if err == nil {
break
}
log.Printf("error in someOperation: %v", err)
<-time.After(b.Duration())
}
}
```
## Tunables
* `NewWithoutJitter` creates a Backoff that doesn't use jitter.
The default behaviour is controlled by two variables:
* `DefaultInterval` sets the base interval for backoffs created with
the zero `time.Duration` value in the `Interval` field.
* `DefaultMaxDuration` sets the maximum duration for backoffs created
with the zero `time.Duration` value in the `MaxDuration` field.

197
backoff/backoff.go Normal file
View File

@ -0,0 +1,197 @@
// Package backoff contains an implementation of an intelligent backoff
// strategy. It is based on the approach in the AWS architecture blog
// article titled "Exponential Backoff And Jitter", which is found at
// http://www.awsarchitectureblog.com/2015/03/backoff.html.
//
// Essentially, the backoff has an interval `time.Duration`; the nth
// call to backoff will return a `time.Duration` that is 2^n *
// interval. If jitter is enabled (which is the default behaviour),
// the duration is a random value between 0 and 2^n * interval. The
// backoff is configured with a maximum duration that will not be
// exceeded.
//
// The `New` function will attempt to use the system's cryptographic
// random number generator to seed a Go math/rand random number
// source. If this fails, the package will panic on startup.
package backoff
import (
"crypto/rand"
"encoding/binary"
"io"
"math"
mrand "math/rand"
"sync"
"time"
)
var prngMu sync.Mutex
var prng *mrand.Rand
// DefaultInterval is used when a Backoff is initialised with a
// zero-value Interval.
var DefaultInterval = 5 * time.Minute
// DefaultMaxDuration is maximum amount of time that the backoff will
// delay for.
var DefaultMaxDuration = 6 * time.Hour
// A Backoff contains the information needed to intelligently backoff
// and retry operations using an exponential backoff algorithm. It should
// be initialised with a call to `New`.
//
// Only use a Backoff from a single goroutine, it is not safe for concurrent
// access.
type Backoff struct {
// maxDuration is the largest possible duration that can be
// returned from a call to Duration.
maxDuration time.Duration
// interval controls the time step for backing off.
interval time.Duration
// noJitter controls whether to use the "Full Jitter"
// improvement to attempt to smooth out spikes in a high
// contention scenario. If noJitter is set to true, no
// jitter will be introduced.
noJitter bool
// decay controls the decay of n. If it is non-zero, n is
// reset if more than the last backoff + decay has elapsed since
// the last try.
decay time.Duration
n uint64
lastTry time.Time
}
// New creates a new backoff with the specified max duration and
// interval. Zero values may be used to use the default values.
//
// Panics if either max or interval is negative.
func New(max time.Duration, interval time.Duration) *Backoff {
if max < 0 || interval < 0 {
panic("backoff: max or interval is negative")
}
b := &Backoff{
maxDuration: max,
interval: interval,
}
b.setup()
return b
}
// NewWithoutJitter works similarly to New, except that the created
// Backoff will not use jitter.
func NewWithoutJitter(max time.Duration, interval time.Duration) *Backoff {
b := New(max, interval)
b.noJitter = true
return b
}
func init() {
var buf [8]byte
var n int64
_, err := io.ReadFull(rand.Reader, buf[:])
if err != nil {
panic(err.Error())
}
n = int64(binary.LittleEndian.Uint64(buf[:]))
src := mrand.NewSource(n)
prng = mrand.New(src)
}
func (b *Backoff) setup() {
if b.interval == 0 {
b.interval = DefaultInterval
}
if b.maxDuration == 0 {
b.maxDuration = DefaultMaxDuration
}
}
// Duration returns a time.Duration appropriate for the backoff,
// incrementing the attempt counter.
func (b *Backoff) Duration() time.Duration {
b.setup()
b.decayN()
t := b.duration(b.n)
if b.n < math.MaxUint64 {
b.n++
}
if !b.noJitter {
prngMu.Lock()
t = time.Duration(prng.Int63n(int64(t)))
prngMu.Unlock()
}
return t
}
// requires b to be locked.
func (b *Backoff) duration(n uint64) (t time.Duration) {
// Saturate pow
pow := time.Duration(math.MaxInt64)
if n < 63 {
pow = 1 << n
}
t = b.interval * pow
if t/pow != b.interval || t > b.maxDuration {
t = b.maxDuration
}
return
}
// Reset resets the attempt counter of a backoff.
//
// It should be called when the rate-limited action succeeds.
func (b *Backoff) Reset() {
b.lastTry = time.Time{}
b.n = 0
}
// SetDecay sets the duration after which the try counter will be reset.
// Panics if decay is smaller than 0.
//
// The decay only kicks in if at least the last backoff + decay has elapsed
// since the last try.
func (b *Backoff) SetDecay(decay time.Duration) {
if decay < 0 {
panic("backoff: decay < 0")
}
b.decay = decay
}
// requires b to be locked
func (b *Backoff) decayN() {
if b.decay == 0 {
return
}
if b.lastTry.IsZero() {
b.lastTry = time.Now()
return
}
lastDuration := b.duration(b.n - 1)
decayed := time.Since(b.lastTry) > lastDuration+b.decay
b.lastTry = time.Now()
if !decayed {
return
}
b.n = 0
}

175
backoff/backoff_test.go Normal file
View File

@ -0,0 +1,175 @@
package backoff
import (
"fmt"
"math"
"testing"
"time"
)
// If given New with 0's and no jitter, ensure that certain invariants are met:
//
// - the default max duration and interval should be used
// - noJitter should be true
// - the RNG should not be initialised
// - the first duration should be equal to the default interval
func TestDefaults(t *testing.T) {
b := NewWithoutJitter(0, 0)
if b.maxDuration != DefaultMaxDuration {
t.Fatalf("expected new backoff to use the default max duration (%s), but have %s", DefaultMaxDuration, b.maxDuration)
}
if b.interval != DefaultInterval {
t.Fatalf("exepcted new backoff to use the default interval (%s), but have %s", DefaultInterval, b.interval)
}
if b.noJitter != true {
t.Fatal("backoff should have been initialised without jitter")
}
dur := b.Duration()
if dur != DefaultInterval {
t.Fatalf("expected first duration to be %s, have %s", DefaultInterval, dur)
}
}
// Given a zero-value initialised Backoff, it should be transparently
// setup.
func TestSetup(t *testing.T) {
b := new(Backoff)
dur := b.Duration()
if dur < 0 || dur > (5*time.Minute) {
t.Fatalf("want duration between 0 and 5 minutes, have %s", dur)
}
}
// Ensure that tries incremenets as expected.
func TestTries(t *testing.T) {
b := NewWithoutJitter(5, 1)
for i := uint64(0); i < 3; i++ {
if b.n != i {
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
}
pow := 1 << i
expected := time.Duration(pow)
dur := b.Duration()
if dur != expected {
t.Fatalf("want duration=%d, have duration=%d at i=%d", expected, dur, i)
}
}
for i := uint(3); i < 5; i++ {
dur := b.Duration()
if dur != 5 {
t.Fatalf("want duration=5, have %d at i=%d", dur, i)
}
}
}
// Ensure that a call to Reset will actually reset the Backoff.
func TestReset(t *testing.T) {
const iter = 10
b := New(1000, 1)
for i := 0; i < iter; i++ {
_ = b.Duration()
}
if b.n != iter {
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
}
b.Reset()
if b.n != 0 {
t.Fatalf("expected tries=0 after reset, have tries=%d", b.n)
}
}
const decay = 5 * time.Millisecond
const max = 10 * time.Millisecond
const interval = time.Millisecond
func TestDecay(t *testing.T) {
const iter = 10
b := NewWithoutJitter(max, 1)
b.SetDecay(decay)
var backoff time.Duration
for i := 0; i < iter; i++ {
backoff = b.Duration()
}
if b.n != iter {
t.Fatalf("expected tries=%d, have tries=%d", iter, b.n)
}
// Don't decay below backoff
b.lastTry = time.Now().Add(-backoff + 1)
backoff = b.Duration()
if b.n != iter+1 {
t.Fatalf("expected tries=%d, have tries=%d", iter+1, b.n)
}
// Reset after backoff + decay
b.lastTry = time.Now().Add(-backoff - decay)
b.Duration()
if b.n != 1 {
t.Fatalf("expected tries=%d, have tries=%d", 1, b.n)
}
}
// Ensure that decay works even if the retry counter is saturated.
func TestDecaySaturation(t *testing.T) {
b := NewWithoutJitter(1<<2, 1)
b.SetDecay(decay)
var duration time.Duration
for i := 0; i <= 2; i++ {
duration = b.Duration()
}
if duration != 1<<2 {
t.Fatalf("expected duration=%v, have duration=%v", 1<<2, duration)
}
b.lastTry = time.Now().Add(-duration - decay)
b.n = math.MaxUint64
duration = b.Duration()
if duration != 1 {
t.Errorf("expected duration=%v, have duration=%v", 1, duration)
}
}
func ExampleBackoff_SetDecay() {
b := NewWithoutJitter(max, interval)
b.SetDecay(decay)
// try 0
fmt.Println(b.Duration())
// try 1
fmt.Println(b.Duration())
// try 2
duration := b.Duration()
fmt.Println(duration)
// try 3, below decay
time.Sleep(duration)
duration = b.Duration()
fmt.Println(duration)
// try 4, resets
time.Sleep(duration + decay)
fmt.Println(b.Duration())
// Output: 1ms
// 2ms
// 4ms
// 8ms
// 1ms
}

79
certlib/certerr/errors.go Normal file
View File

@ -0,0 +1,79 @@
package certerr
import (
"errors"
"fmt"
"strings"
)
// ErrEmptyCertificate indicates that a certificate could not be processed
// because there was no data to process.
var ErrEmptyCertificate = errors.New("certlib: empty certificate")
type ErrorSourceType uint8
func (t ErrorSourceType) String() string {
switch t {
case ErrorSourceCertificate:
return "certificate"
case ErrorSourcePrivateKey:
return "private key"
case ErrorSourceCSR:
return "CSR"
case ErrorSourceSCTList:
return "SCT list"
case ErrorSourceKeypair:
return "TLS keypair"
default:
panic(fmt.Sprintf("unknown error source %d", t))
}
}
const (
ErrorSourceCertificate ErrorSourceType = 1
ErrorSourcePrivateKey ErrorSourceType = 2
ErrorSourceCSR ErrorSourceType = 3
ErrorSourceSCTList ErrorSourceType = 4
ErrorSourceKeypair ErrorSourceType = 5
)
// InvalidPEMType is used to indicate that we were expecting one type of PEM
// file, but saw another.
type InvalidPEMType struct {
have string
want []string
}
func (err *InvalidPEMType) Error() string {
if len(err.want) == 1 {
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
} else {
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
}
}
// ErrInvalidPEMType returns a new InvalidPEMType error.
func ErrInvalidPEMType(have string, want ...string) error {
return &InvalidPEMType{
have: have,
want: want,
}
}
func LoadingError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to load %s from disk: %w", t, err)
}
func ParsingError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to parse %s: %w", t, err)
}
func DecodeError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to decode %s: %w", t, err)
}
func VerifyError(t ErrorSourceType, err error) error {
return fmt.Errorf("failed to verify %s: %w", t, err)
}
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")

85
certlib/certlib.go Normal file
View File

@ -0,0 +1,85 @@
package certlib
import (
"crypto/x509"
"encoding/pem"
"errors"
"io/ioutil"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
)
// ReadCertificate reads a DER or PEM-encoded certificate from the
// byte slice.
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
if len(in) == 0 {
err = certerr.ErrEmptyCertificate
return
}
if in[0] == '-' {
p, remaining := pem.Decode(in)
if p == nil {
err = errors.New("certlib: invalid PEM file")
return
}
rest = remaining
if p.Type != "CERTIFICATE" {
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
return
}
in = p.Bytes
}
cert, err = x509.ParseCertificate(in)
return
}
// ReadCertificates tries to read all the certificates in a
// PEM-encoded collection.
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
var cert *x509.Certificate
for {
cert, in, err = ReadCertificate(in)
if err != nil {
break
}
if cert == nil {
break
}
certs = append(certs, cert)
if len(in) == 0 {
break
}
}
return certs, err
}
// LoadCertificate tries to read a single certificate from disk. If
// the file contains multiple certificates (e.g. a chain), only the
// first certificate is returned.
func LoadCertificate(path string) (*x509.Certificate, error) {
in, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
cert, _, err := ReadCertificate(in)
return cert, err
}
// LoadCertificates tries to read all the certificates in a file,
// returning them in the order that it found them in the file.
func LoadCertificates(path string) ([]*x509.Certificate, error) {
in, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return ReadCertificates(in)
}

139
certlib/certlib_test.go Normal file
View File

@ -0,0 +1,139 @@
package certlib
import (
"fmt"
"testing"
"git.wntrmute.dev/kyle/goutils/assert"
)
// some CA certs I found on my computerbox.
var testCerts = `-----BEGIN CERTIFICATE-----
MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE
AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x
CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW
MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF
RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7
09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7
XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P
Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK
t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb
X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28
MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU
fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI
2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH
K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae
ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP
BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ
MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw
RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv
bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm
fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3
gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe
I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i
5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi
ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn
MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ
o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6
zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN
GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt
r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK
Z05phkOTOPu220+DkdRgfks+KzgHVZhepA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
-----END CERTIFICATE-----
`
func TestReadCertificate(t *testing.T) {
cert, remaining, err := ReadCertificate([]byte(testCerts))
assert.NoErrorT(t, err)
assert.BoolT(t, len(remaining) > 0, "lib: expected extra data from ReadCertificate")
assert.BoolT(t, cert != nil, "lib: expected an actual certificate to have been returned")
}
func TestReadCertificates(t *testing.T) {
certs, err := ReadCertificates([]byte(testCerts))
assert.NoErrorT(t, err)
assert.BoolT(t, len(certs) == 3, fmt.Sprintf("lib: expected three certificates, have %d", len(certs)))
for _, cert := range certs {
assert.BoolT(t, cert != nil, "lib: expected an actual certificate to have been returned")
}
}

75
certlib/der_helpers.go Normal file
View File

@ -0,0 +1,75 @@
package certlib
// Originally from CFSSL, mostly written by me originally, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"fmt"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
)
// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, ECDSA, or Ed25519 DER-encoded
// private key. The key must not be in PEM format. If an error is returned, it
// may contain information about the private key, so care should be taken when
// displaying it directly.
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParseECPrivateKey(keyDER)
if err != nil {
generalKey, err = ParseEd25519PrivateKey(keyDER)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
}
}
}
}
switch generalKey := generalKey.(type) {
case *rsa.PrivateKey:
return generalKey, nil
case *ecdsa.PrivateKey:
return generalKey, nil
case ed25519.PrivateKey:
return generalKey, nil
default:
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
}
}

164
certlib/ed25519.go Normal file
View File

@ -0,0 +1,164 @@
package certlib
import (
"crypto"
"crypto/ed25519"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
)
// Originally from CFSSL, mostly written by me originally, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
var errEd25519WrongID = errors.New("incorrect object identifier")
var errEd25519WrongKeyType = errors.New("incorrect key type")
// ed25519OID is the OID for the Ed25519 signature scheme: see
// https://datatracker.ietf.org/doc/draft-ietf-curdle-pkix-04.
var ed25519OID = asn1.ObjectIdentifier{1, 3, 101, 112}
// subjectPublicKeyInfo reflects the ASN.1 object defined in the X.509 standard.
//
// This is defined in crypto/x509 as "publicKeyInfo".
type subjectPublicKeyInfo struct {
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
// MarshalEd25519PublicKey creates a DER-encoded SubjectPublicKeyInfo for an
// ed25519 public key, as defined in
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. This is analogous to
// MarshalPKIXPublicKey in crypto/x509, which doesn't currently support Ed25519.
func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
pub, ok := pk.(ed25519.PublicKey)
if !ok {
return nil, errEd25519WrongKeyType
}
spki := subjectPublicKeyInfo{
Algorithm: pkix.AlgorithmIdentifier{
Algorithm: ed25519OID,
},
PublicKey: asn1.BitString{
BitLength: len(pub) * 8,
Bytes: pub,
},
}
return asn1.Marshal(spki)
}
// ParseEd25519PublicKey returns the Ed25519 public key encoded by the input.
func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
var spki subjectPublicKeyInfo
if rest, err := asn1.Unmarshal(der, &spki); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("SubjectPublicKeyInfo too long")
}
if !spki.Algorithm.Algorithm.Equal(ed25519OID) {
return nil, errEd25519WrongID
}
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
}
return ed25519.PublicKey(spki.PublicKey.Bytes), nil
}
// oneAsymmetricKey reflects the ASN.1 structure for storing private keys in
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04, excluding the optional
// fields, which we don't use here.
//
// This is identical to pkcs8 in crypto/x509.
type oneAsymmetricKey struct {
Version int
Algorithm pkix.AlgorithmIdentifier
PrivateKey []byte
}
// curvePrivateKey is the innter type of the PrivateKey field of
// oneAsymmetricKey.
type curvePrivateKey []byte
// MarshalEd25519PrivateKey returns a DER encoding of the input private key as
// specified in https://tools.ietf.org/html/draft-ietf-curdle-pkix-04.
func MarshalEd25519PrivateKey(sk crypto.PrivateKey) ([]byte, error) {
priv, ok := sk.(ed25519.PrivateKey)
if !ok {
return nil, errEd25519WrongKeyType
}
// Marshal the innter CurvePrivateKey.
curvePrivateKey, err := asn1.Marshal(priv.Seed())
if err != nil {
return nil, err
}
// Marshal the OneAsymmetricKey.
asym := oneAsymmetricKey{
Version: 0,
Algorithm: pkix.AlgorithmIdentifier{
Algorithm: ed25519OID,
},
PrivateKey: curvePrivateKey,
}
return asn1.Marshal(asym)
}
// ParseEd25519PrivateKey returns the Ed25519 private key encoded by the input.
func ParseEd25519PrivateKey(der []byte) (crypto.PrivateKey, error) {
asym := new(oneAsymmetricKey)
if rest, err := asn1.Unmarshal(der, asym); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("OneAsymmetricKey too long")
}
// Check that the key type is correct.
if !asym.Algorithm.Algorithm.Equal(ed25519OID) {
return nil, errEd25519WrongID
}
// Unmarshal the inner CurvePrivateKey.
seed := new(curvePrivateKey)
if rest, err := asn1.Unmarshal(asym.PrivateKey, seed); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("CurvePrivateKey too long")
}
return ed25519.NewKeyFromSeed(*seed), nil
}

630
certlib/helpers.go Normal file
View File

@ -0,0 +1,630 @@
package certlib
// Originally from CFSSL, mostly written by me originally, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"fmt"
"os"
"strings"
"time"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
ct "github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
ctx509 "github.com/google/certificate-transparency-go/x509"
"golang.org/x/crypto/ocsp"
"golang.org/x/crypto/pkcs12"
)
// OneYear is a time.Duration representing a year's worth of seconds.
const OneYear = 8760 * time.Hour
// OneDay is a time.Duration representing a day's worth of seconds.
const OneDay = 24 * time.Hour
// DelegationUsage is the OID for the DelegationUseage extensions
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
// DelegationExtension
var DelegationExtension = pkix.Extension{
Id: DelegationUsage,
Critical: false,
Value: []byte{0x05, 0x00}, // ASN.1 NULL
}
// InclusiveDate returns the time.Time representation of a date - 1
// nanosecond. This allows time.After to be used inclusively.
func InclusiveDate(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
}
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 5 years.
var Jul2012 = InclusiveDate(2012, time.July, 01)
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 39 months.
var Apr2015 = InclusiveDate(2015, time.April, 01)
// KeyLength returns the bit size of ECDSA or RSA PublicKey
func KeyLength(key interface{}) int {
if key == nil {
return 0
}
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
return ecdsaKey.Curve.Params().BitSize
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
return rsaKey.N.BitLen()
}
return 0
}
// ExpiryTime returns the time when the certificate chain is expired.
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
if len(chain) == 0 {
return
}
notAfter = chain[0].NotAfter
for _, cert := range chain {
if notAfter.After(cert.NotAfter) {
notAfter = cert.NotAfter
}
}
return
}
// MonthsValid returns the number of months for which a certificate is valid.
func MonthsValid(c *x509.Certificate) int {
issued := c.NotBefore
expiry := c.NotAfter
years := (expiry.Year() - issued.Year())
months := years*12 + int(expiry.Month()) - int(issued.Month())
// Round up if valid for less than a full month
if expiry.Day() > issued.Day() {
months++
}
return months
}
// ValidExpiry determines if a certificate is valid for an acceptable
// length of time per the CA/Browser Forum baseline requirements.
// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
func ValidExpiry(c *x509.Certificate) bool {
issued := c.NotBefore
var maxMonths int
switch {
case issued.After(Apr2015):
maxMonths = 39
case issued.After(Jul2012):
maxMonths = 60
case issued.Before(Jul2012):
maxMonths = 120
}
if MonthsValid(c) > maxMonths {
return false
}
return true
}
// SignatureString returns the TLS signature string corresponding to
// an X509 signature algorithm.
func SignatureString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2WithRSA"
case x509.MD5WithRSA:
return "MD5WithRSA"
case x509.SHA1WithRSA:
return "SHA1WithRSA"
case x509.SHA256WithRSA:
return "SHA256WithRSA"
case x509.SHA384WithRSA:
return "SHA384WithRSA"
case x509.SHA512WithRSA:
return "SHA512WithRSA"
case x509.DSAWithSHA1:
return "DSAWithSHA1"
case x509.DSAWithSHA256:
return "DSAWithSHA256"
case x509.ECDSAWithSHA1:
return "ECDSAWithSHA1"
case x509.ECDSAWithSHA256:
return "ECDSAWithSHA256"
case x509.ECDSAWithSHA384:
return "ECDSAWithSHA384"
case x509.ECDSAWithSHA512:
return "ECDSAWithSHA512"
default:
return "Unknown Signature"
}
}
// HashAlgoString returns the hash algorithm name contains in the signature
// method.
func HashAlgoString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2"
case x509.MD5WithRSA:
return "MD5"
case x509.SHA1WithRSA:
return "SHA1"
case x509.SHA256WithRSA:
return "SHA256"
case x509.SHA384WithRSA:
return "SHA384"
case x509.SHA512WithRSA:
return "SHA512"
case x509.DSAWithSHA1:
return "SHA1"
case x509.DSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA1:
return "SHA1"
case x509.ECDSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA384:
return "SHA384"
case x509.ECDSAWithSHA512:
return "SHA512"
default:
return "Unknown Hash Algorithm"
}
}
// StringTLSVersion returns underlying enum values from human names for TLS
// versions, defaults to current golang default of TLS 1.0
func StringTLSVersion(version string) uint16 {
switch version {
case "1.2":
return tls.VersionTLS12
case "1.1":
return tls.VersionTLS11
default:
return tls.VersionTLS10
}
}
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
var buffer bytes.Buffer
for _, cert := range certs {
pem.Encode(&buffer, &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
})
}
return buffer.Bytes()
}
// EncodeCertificatePEM encodes a single x509 certificates to PEM
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
return EncodeCertificatesPEM([]*x509.Certificate{cert})
}
// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
// can handle PEM encoded PKCS #7 structures.
func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
var err error
certsPEM = bytes.TrimSpace(certsPEM)
for len(certsPEM) > 0 {
var cert []*x509.Certificate
cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
} else if cert == nil {
break
}
certs = append(certs, cert...)
}
if len(certsPEM) > 0 {
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
}
return certs, nil
}
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
// either PKCS #7, PKCS #12, or raw x509.
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
certsDER = bytes.TrimSpace(certsDER)
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
if err != nil {
var pkcs12data interface{}
certs = make([]*x509.Certificate, 1)
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
if err != nil {
certs, err = x509.ParseCertificates(certsDER)
if err != nil {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
}
} else {
key = pkcs12data.(crypto.Signer)
}
} else {
if pkcs7data.ContentInfo != "SignedData" {
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
}
certs = pkcs7data.Content.SignedData.Certificates
}
if certs == nil {
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
}
return certs, key, nil
}
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
cert, err := ParseCertificatePEM(certPEM)
if err != nil {
return nil, err
}
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
}
return cert, nil
}
// ParseCertificatePEM parses and returns a PEM-encoded certificate,
// can handle PEM encoded PKCS #7 structures.
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
certPEM = bytes.TrimSpace(certPEM)
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
} else if cert == nil {
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
} else if len(rest) > 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
} else if len(cert) > 1 {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
}
return cert[0], nil
}
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
// either a raw x509 certificate or a PKCS #7 structure possibly containing
// multiple certificates, from the top of certsPEM, which itself may
// contain multiple PEM encoded certificate objects.
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
block, rest := pem.Decode(certsPEM)
if block == nil {
return nil, rest, nil
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
if err != nil {
return nil, rest, err
}
if pkcs7data.ContentInfo != "SignedData" {
return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
}
certs := pkcs7data.Content.SignedData.Certificates
if certs == nil {
return nil, rest, errors.New("PKCS #7 structure contains no certificates")
}
return certs, rest, nil
}
var certs = []*x509.Certificate{cert}
return certs, rest, nil
}
// LoadPEMCertPool loads a pool of PEM certificates from file.
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
if certsFile == "" {
return nil, nil
}
pemCerts, err := os.ReadFile(certsFile)
if err != nil {
return nil, err
}
return PEMToCertPool(pemCerts)
}
// PEMToCertPool concerts PEM certificates to a CertPool.
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
if len(pemCerts) == 0 {
return nil, nil
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemCerts) {
return nil, errors.New("failed to load cert pool")
}
return certPool, nil
}
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
}
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
if err != nil {
return nil, err
}
return ParsePrivateKeyDER(keyDER)
}
// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
// Ignore any EC PARAMETERS blocks when looking for a key (openssl includes
// them by default).
var keyDER *pem.Block
for {
keyDER, in = pem.Decode(in)
if keyDER == nil || keyDER.Type != "EC PARAMETERS" {
break
}
}
if keyDER != nil {
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
if strings.Contains(procType, "ENCRYPTED") {
if password != nil {
return x509.DecryptPEMBlock(keyDER, password)
}
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
}
}
return keyDER.Bytes, nil
}
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
}
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
in = bytes.TrimSpace(in)
p, rest := pem.Decode(in)
if p != nil {
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
}
csr, err = x509.ParseCertificateRequest(p.Bytes)
} else {
csr, err = x509.ParseCertificateRequest(in)
}
if err != nil {
return nil, rest, err
}
err = csr.CheckSignature()
if err != nil {
return nil, rest, err
}
return csr, rest, nil
}
// ParseCSRPEM parses a PEM-encoded certificate signing request.
// It does not check the signature. This is useful for dumping data from a CSR
// locally.
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode([]byte(csrPEM))
if block == nil {
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
}
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csrObject, nil
}
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
switch pub := priv.Public().(type) {
case *rsa.PublicKey:
bitLength := pub.N.BitLen()
switch {
case bitLength >= 4096:
return x509.SHA512WithRSA
case bitLength >= 3072:
return x509.SHA384WithRSA
case bitLength >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case *ecdsa.PublicKey:
switch pub.Curve {
case elliptic.P521():
return x509.ECDSAWithSHA512
case elliptic.P384():
return x509.ECDSAWithSHA384
case elliptic.P256():
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// LoadClientCertificate load key/certificate from pem files
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
if certFile != "" && keyFile != "" {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, certerr.LoadingError(certerr.ErrorSourceKeypair, err)
}
return &cert, nil
}
return nil, nil
}
// CreateTLSConfig creates a tls.Config object from certs and roots
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
var certs []tls.Certificate
if cert != nil {
certs = []tls.Certificate{*cert}
}
return &tls.Config{
Certificates: certs,
RootCAs: remoteCAs,
}
}
// SerializeSCTList serializes a list of SCTs.
func SerializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
list := ctx509.SignedCertificateTimestampList{}
for _, sct := range sctList {
sctBytes, err := cttls.Marshal(sct)
if err != nil {
return nil, err
}
list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes})
}
return cttls.Marshal(list)
}
// DeserializeSCTList deserializes a list of SCTs.
func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) {
var sctList ctx509.SignedCertificateTimestampList
rest, err := cttls.Unmarshal(serializedSCTList, &sctList)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
}
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
for i, serializedSCT := range sctList.SCTList {
var sct ct.SignedCertificateTimestamp
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
}
list[i] = sct
}
return list, nil
}
// SCTListFromOCSPResponse extracts the SCTList from an ocsp.Response,
// returning an empty list if the SCT extension was not found or could not be
// unmarshalled.
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
// This loop finds the SCTListExtension in the OCSP response.
var SCTListExtension, ext pkix.Extension
for _, ext = range response.Extensions {
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
if ext.Id.Equal(sctExtOid) {
SCTListExtension = ext
break
}
}
// This code block extracts the sctList from the SCT extension.
var sctList []ct.SignedCertificateTimestamp
var err error
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
var serializedSCTList []byte
rest := make([]byte, numBytes)
copy(rest, SCTListExtension.Value)
for len(rest) != 0 {
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, err)
}
}
sctList, err = DeserializeSCTList(serializedSCTList)
}
return sctList, err
}
// ReadBytes reads a []byte either from a file or an environment variable.
// If valFile has a prefix of 'env:', the []byte is read from the environment
// using the subsequent name. If the prefix is 'file:' the []byte is read from
// the subsequent file. If no prefix is provided, valFile is assumed to be a
// file path.
func ReadBytes(valFile string) ([]byte, error) {
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
case 1:
return os.ReadFile(valFile)
case 2:
switch splitVal[0] {
case "env":
return []byte(os.Getenv(splitVal[1])), nil
case "file":
return os.ReadFile(splitVal[1])
default:
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
}
default:
return nil, fmt.Errorf("multiple prefixes: %s",
strings.Join(splitVal[:len(splitVal)-1], ", "))
}
}

220
certlib/pkcs7/pkcs7.go Normal file
View File

@ -0,0 +1,220 @@
package pkcs7
// Originally from CFSSL, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
// used to package certificates and CRLs. Using openssl, every certificate converted
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html
//
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
//
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
// sent over a network and then verified and decrypted. It is asn1, and the type of
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
//
// ContentInfo ::= SEQUENCE {
// contentType ContentType,
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
// }
//
// There are 6 possible ContentTypes, data, signedData, envelopedData,
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
// Data are implemented, as the degenerate case of signedData without a signature is the typical
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
// formats.
// The ContentType signedData has the form:
//
// signedData ::= SEQUENCE {
// version Version,
// digestAlgorithms DigestAlgorithmIdentifiers,
// contentInfo ContentInfo,
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
// signerInfos SignerInfos
// }
//
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
// of any number of extended certificates is not yet supported in this implementation.
//
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
//
// The ContentType encryptedData is the most complicated and its form can be gathered by
// the go type below. It essentially contains a raw octet string of encrypted data and an
// algorithm identifier for use in decrypting this data.
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
)
// Types used for asn1 Unmarshaling.
type signedData struct {
Version int
DigestAlgorithms asn1.RawValue
ContentInfo asn1.RawValue
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
Crls asn1.RawValue `asn1:"optional"`
SignerInfos asn1.RawValue
}
type initPKCS7 struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
}
// Object identifier strings of the three implemented PKCS7 types.
const (
ObjIDData = "1.2.840.113549.1.7.1"
ObjIDSignedData = "1.2.840.113549.1.7.2"
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
)
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
// possible types of Content objects, as denoted by the object identifier in
// the ContentInfo field, the other two being nil. SignedData
// is the degenerate SignedData Content info without signature used
// to hold certificates and crls. Data is raw bytes, and EncryptedData
// is as defined in PKCS #7 standard.
type PKCS7 struct {
Raw asn1.RawContent
ContentInfo string
Content Content
}
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
type Content struct {
Data []byte
SignedData SignedData
EncryptedData EncryptedData
}
// SignedData defines the typical carrier of certificates and crls.
type SignedData struct {
Raw asn1.RawContent
Version int
Certificates []*x509.Certificate
Crl *x509.RevocationList
}
// Data contains raw bytes. Used as a subtype in PKCS12.
type Data struct {
Bytes []byte
}
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
type EncryptedData struct {
Raw asn1.RawContent
Version int
EncryptedContentInfo EncryptedContentInfo
}
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
type EncryptedContentInfo struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent []byte `asn1:"tag:0,optional"`
}
// ParsePKCS7 attempts to parse the DER encoded bytes of a
// PKCS7 structure.
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
var pkcs7 initPKCS7
_, err = asn1.Unmarshal(raw, &pkcs7)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
msg = new(PKCS7)
msg.Raw = pkcs7.Raw
msg.ContentInfo = pkcs7.ContentType.String()
switch {
case msg.ContentInfo == ObjIDData:
msg.ContentInfo = "Data"
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
case msg.ContentInfo == ObjIDSignedData:
msg.ContentInfo = "SignedData"
var signedData signedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
if len(signedData.Certificates.Bytes) != 0 {
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
}
if len(signedData.Crls.Bytes) != 0 {
msg.Content.SignedData.Crl, err = x509.ParseRevocationList(signedData.Crls.Bytes)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
}
msg.Content.SignedData.Version = signedData.Version
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
case msg.ContentInfo == ObjIDEncryptedData:
msg.ContentInfo = "EncryptedData"
var encryptedData EncryptedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
if err != nil {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
}
if encryptedData.Version != 0 {
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
}
msg.Content.EncryptedData = encryptedData
default:
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
}
return msg, nil
}

363
certlib/revoke/revoke.go Normal file
View File

@ -0,0 +1,363 @@
// Package revoke provides functionality for checking the validity of
// a cert. Specifically, the temporal validity of the certificate is
// checked first, then any CRL and OCSP url in the cert is checked.
package revoke
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"io"
"net/http"
neturl "net/url"
"sync"
"time"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/log"
"golang.org/x/crypto/ocsp"
)
// Originally from CFSSL, mostly written by me originally, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
// HTTPClient is an instance of http.Client that will be used for all HTTP requests.
var HTTPClient = http.DefaultClient
// HardFail determines whether the failure to check the revocation
// status of a certificate (i.e. due to network failure) causes
// verification to fail (a hard failure).
var HardFail = false
// CRLSet associates a PKIX certificate list with the URL the CRL is
// fetched from.
var CRLSet = map[string]*x509.RevocationList{}
var crlLock = new(sync.Mutex)
// We can't handle LDAP certificates, so this checks to see if the
// URL string points to an LDAP resource so that we can ignore it.
func ldapURL(url string) bool {
u, err := neturl.Parse(url)
if err != nil {
log.Warningf("error parsing url %s: %v", url, err)
return false
}
if u.Scheme == "ldap" {
return true
}
return false
}
// revCheck should check the certificate for any revocations. It
// returns a pair of booleans: the first indicates whether the certificate
// is revoked, the second indicates whether the revocations were
// successfully checked.. This leads to the following combinations:
//
// - false, false: an error was encountered while checking revocations.
// - false, true: the certificate was checked successfully, and it is not revoked.
// - true, true: the certificate was checked successfully, and it is revoked.
// - true, false: failure to check revocation status causes verification to fail
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
for _, url := range cert.CRLDistributionPoints {
if ldapURL(url) {
log.Infof("skipping LDAP CRL: %s", url)
continue
}
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
log.Warning("error checking revocation via CRL")
if HardFail {
return true, false, err
}
return false, false, err
} else if revoked {
log.Info("certificate is revoked via CRL")
return true, true, err
}
}
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
log.Warning("error checking revocation via OCSP")
if HardFail {
return true, false, err
}
return false, false, err
} else if revoked {
log.Info("certificate is revoked via OCSP")
return true, true, err
}
return false, true, nil
}
// fetchCRL fetches and parses a CRL.
func fetchCRL(url string) (*x509.RevocationList, error) {
resp, err := HTTPClient.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return nil, errors.New("failed to retrieve CRL")
}
body, err := crlRead(resp.Body)
if err != nil {
return nil, err
}
return x509.ParseRevocationList(body)
}
func getIssuer(cert *x509.Certificate) *x509.Certificate {
var issuer *x509.Certificate
var err error
for _, issuingCert := range cert.IssuingCertificateURL {
issuer, err = fetchRemote(issuingCert)
if err != nil {
continue
}
break
}
return issuer
}
// check a cert against a specific CRL. Returns the same bool pair
// as revCheck, plus an error if one occurred.
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
crlLock.Lock()
crl, ok := CRLSet[url]
if ok && crl == nil {
ok = false
delete(CRLSet, url)
}
crlLock.Unlock()
var shouldFetchCRL = true
if ok {
if time.Now().After(crl.ThisUpdate) {
shouldFetchCRL = false
}
}
issuer := getIssuer(cert)
if shouldFetchCRL {
var err error
crl, err = fetchCRL(url)
if err != nil {
log.Warningf("failed to fetch CRL: %v", err)
return false, false, err
}
// check CRL signature
if issuer != nil {
err = crl.CheckSignatureFrom(issuer)
if err != nil {
log.Warningf("failed to verify CRL: %v", err)
return false, false, err
}
}
crlLock.Lock()
CRLSet[url] = crl
crlLock.Unlock()
}
for _, revoked := range crl.RevokedCertificates {
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
log.Info("Serial number match: intermediate is revoked.")
return true, true, err
}
}
return false, true, err
}
// VerifyCertificate ensures that the certificate passed in hasn't
// expired and checks the CRL for the server.
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
revoked, ok, _ = VerifyCertificateError(cert)
return revoked, ok
}
// VerifyCertificateError ensures that the certificate passed in hasn't
// expired and checks the CRL for the server.
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
if !time.Now().Before(cert.NotAfter) {
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
log.Info(msg)
return true, true, fmt.Errorf(msg)
} else if !time.Now().After(cert.NotBefore) {
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
log.Info(msg)
return true, true, fmt.Errorf(msg)
}
return revCheck(cert)
}
func fetchRemote(url string) (*x509.Certificate, error) {
resp, err := HTTPClient.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
in, err := remoteRead(resp.Body)
if err != nil {
return nil, err
}
p, _ := pem.Decode(in)
if p != nil {
return certlib.ParseCertificatePEM(in)
}
return x509.ParseCertificate(in)
}
var ocspOpts = ocsp.RequestOptions{
Hash: crypto.SHA1,
}
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
var err error
ocspURLs := leaf.OCSPServer
if len(ocspURLs) == 0 {
// OCSP not enabled for this certificate.
return false, true, nil
}
issuer := getIssuer(leaf)
if issuer == nil {
return false, false, nil
}
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
if err != nil {
return revoked, ok, err
}
for _, server := range ocspURLs {
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
if err != nil {
if strict {
return revoked, ok, err
}
continue
}
// There wasn't an error fetching the OCSP status.
ok = true
if resp.Status != ocsp.Good {
// The certificate was revoked.
revoked = true
}
return revoked, ok, err
}
return revoked, ok, err
}
// sendOCSPRequest attempts to request an OCSP response from the
// server. The error only indicates a failure to *fetch* the
// certificate, and *does not* mean the certificate is valid.
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
var resp *http.Response
var err error
if len(req) > 256 {
buf := bytes.NewBuffer(req)
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
} else {
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
resp, err = HTTPClient.Get(reqURL)
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New("failed to retrieve OSCP")
}
body, err := ocspRead(resp.Body)
if err != nil {
return nil, err
}
switch {
case bytes.Equal(body, ocsp.UnauthorizedErrorResponse):
return nil, errors.New("OSCP unauthorized")
case bytes.Equal(body, ocsp.MalformedRequestErrorResponse):
return nil, errors.New("OSCP malformed")
case bytes.Equal(body, ocsp.InternalErrorErrorResponse):
return nil, errors.New("OSCP internal error")
case bytes.Equal(body, ocsp.TryLaterErrorResponse):
return nil, errors.New("OSCP try later")
case bytes.Equal(body, ocsp.SigRequredErrorResponse):
return nil, errors.New("OSCP signature required")
}
return ocsp.ParseResponseForCert(body, leaf, issuer)
}
var crlRead = io.ReadAll
// SetCRLFetcher sets the function to use to read from the http response body
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
crlRead = fn
}
var remoteRead = io.ReadAll
// SetRemoteFetcher sets the function to use to read from the http response body
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
remoteRead = fn
}
var ocspRead = io.ReadAll
// SetOCSPFetcher sets the function to use to read from the http response body
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
ocspRead = fn
}

View File

@ -0,0 +1,262 @@
package revoke
import (
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"testing"
"time"
)
// Originally from CFSSL, mostly written by me originally, and licensed under:
/*
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// I've modified it for use in my own code e.g. by removing the CFSSL errors
// and replacing them with sane ones.
// The first three test cases represent known revoked, expired, and good
// certificates that were checked on the date listed in the log. The
// good certificate will eventually need to be replaced in year 2029.
// If there is a soft-fail, the test will pass to mimic the default
// behaviour used in this software. However, it will print a warning
// to indicate that this is the case.
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
b2JhbCBSb290MB4XDTA3MDQwNDE0MTUxNFoXDTE0MDQwNDE0MTQyMFowejELMAkG
A1UEBhMCSVQxFzAVBgNVBAoTDkFjdGFsaXMgUy5wLkEuMScwJQYDVQQLEx5DZXJ0
aWZpY2F0aW9uIFNlcnZpY2UgUHJvdmlkZXIxKTAnBgNVBAMTIEFjdGFsaXMgU2Vy
dmVyIEF1dGhlbnRpY2F0aW9uIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAv6P0bhXbUQkVW8ox0HJ+sP5+j6pTwS7yg/wGEUektB/G1duQiT1v21fo
LANr6F353jILQDCpHIfal3MhbSsHEMKU7XaqsyLWV93bcIKbIloS/eXDfkog6KB3
u0JHgrtNz584Jg/OLm9feffNbCJ38TiLo0/UWkAQ6PQWaOwZEgyKjVI5F3swoTB3
g0LZAzegvkU00Kfp13cSg+cJeU4SajwtfQ+g6s6dlaekaHy/0ef46PfiHHRuhEhE
JWIpDtUN2ywTT33MSSUe5glDIiXYfcamJQrebzGsHEwyqI195Yaxb+FLNND4n3HM
e7EI2OrLyT+r/WMvQbl+xNihwtv+HwIDAQABo4IBbzCCAWswEgYDVR0TAQH/BAgw
BgEB/wIBADBTBgNVHSAETDBKMEgGCSsGAQQBsT4BADA7MDkGCCsGAQUFBwIBFi1o
dHRwOi8vd3d3LnB1YmxpYy10cnVzdC5jb20vQ1BTL09tbmlSb290Lmh0bWwwDgYD
VR0PAQH/BAQDAgEGMIGJBgNVHSMEgYEwf6F5pHcwdTELMAkGA1UEBhMCVVMxGDAW
BgNVBAoTD0dURSBDb3Jwb3JhdGlvbjEnMCUGA1UECxMeR1RFIEN5YmVyVHJ1c3Qg
U29sdXRpb25zLCBJbmMuMSMwIQYDVQQDExpHVEUgQ3liZXJUcnVzdCBHbG9iYWwg
Um9vdIICAaUwRQYDVR0fBD4wPDA6oDigNoY0aHR0cDovL3d3dy5wdWJsaWMtdHJ1
c3QuY29tL2NnaS1iaW4vQ1JMLzIwMTgvY2RwLmNybDAdBgNVHQ4EFgQUpi6OuXYt
oxHC3cTezVLuraWpAFEwDQYJKoZIhvcNAQEFBQADgYEAAtjJBwjsvw7DBs+v7BQz
gSGeg6nbYUuPL7+1driT5XsUKJ7WZjiwW2zW/WHZ+zGo1Ev8Dc574RpSrg/EIlfH
TpBiBuFgiKtJksKdoxPZGSI8FitwcgeW+y8wotmm0CtDzWN27g2kfSqHb5eHfZY5
sESPRwHkcMUNdAp37FLweUw=
-----END CERTIFICATE-----`)
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
// 2014/05/22 14:18:31 certificate is revoked via CRL
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
Um9vdFNpZ24gUGFydG5lcnMgQ0ExGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
CzAJBgNVBAYTAkJFMB4XDTA4MDMxODEyMDAwMFoXDTE4MDMxODEyMDAwMFowJTEj
MCEGA1UEAxMaTW9iaWxlIEFybW9yIEVudGVycHJpc2UgQ0EwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQCaEjeDR73jSZVlacRn5bc5VIPdyouHvGIBUxyS
C6483HgoDlWrWlkEndUYFjRPiQqJFthdJxfglykXD+btHixMIYbz/6eb7hRTdT9w
HKsfH+wTBIdb5AZiNjkg3QcCET5HfanJhpREjZWP513jM/GSrG3VwD6X5yttCIH1
NFTDAr7aqpW/UPw4gcPfkwS92HPdIkb2DYnsqRrnKyNValVItkxJiotQ1HOO3YfX
ivGrHIbJdWYg0rZnkPOgYF0d+aIA4ZfwvdW48+r/cxvLevieuKj5CTBZZ8XrFt8r
JTZhZljbZvnvq/t6ZIzlwOj082f+lTssr1fJ3JsIPnG2lmgTAgMBAAGjgfcwgfQw
DgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFIZw
ns4uzXdLX6xDRXUzFgZxWM7oME0GA1UdIARGMEQwQgYJKwYBBAGgMgE8MDUwMwYI
KwYBBQUHAgIwJxolaHR0cDovL3d3dy5nbG9iYWxzaWduLmNvbS9yZXBvc2l0b3J5
LzA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmdsb2JhbHNpZ24ubmV0L1Jv
b3RTaWduUGFydG5lcnMuY3JsMB8GA1UdIwQYMBaAFFaE7LVxpedj2NtRBNb65vBI
UknOMA0GCSqGSIb3DQEBBQUAA4IBAQBZvf+2xUJE0ekxuNk30kPDj+5u9oI3jZyM
wvhKcs7AuRAbcxPtSOnVGNYl8By7DPvPun+U3Yci8540y143RgD+kz3jxIBaoW/o
c4+X61v6DBUtcBPEt+KkV6HIsZ61SZmc/Y1I2eoeEt6JYoLjEZMDLLvc1cK/+wpg
dUZSK4O9kjvIXqvsqIOlkmh/6puSugTNao2A7EIQr8ut0ZmzKzMyZ0BuQhJDnAPd
Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
-----END CERTIFICATE-----`)
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTQwMjEy
MDAwMDAwWhcNMjkwMjExMjM1OTU5WjCBkDELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
Q09NT0RPIENBIExpbWl0ZWQxNjA0BgNVBAMTLUNPTU9ETyBSU0EgRG9tYWluIFZh
bGlkYXRpb24gU2VjdXJlIFNlcnZlciBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAI7CAhnhoFmk6zg1jSz9AdDTScBkxwtiBUUWOqigwAwCfx3M28Sh
bXcDow+G+eMGnD4LgYqbSRutA776S9uMIO3Vzl5ljj4Nr0zCsLdFXlIvNN5IJGS0
Qa4Al/e+Z96e0HqnU4A7fK31llVvl0cKfIWLIpeNs4TgllfQcBhglo/uLQeTnaG6
ytHNe+nEKpooIZFNb5JPJaXyejXdJtxGpdCsWTWM/06RQ1A/WZMebFEh7lgUq/51
UHg+TLAchhP6a5i84DuUHoVS3AOTJBhuyydRReZw3iVDpA3hSqXttn7IzW3uLh0n
c13cRTCAquOyQQuvvUSH2rnlG51/ruWFgqUCAwEAAaOCAWUwggFhMB8GA1UdIwQY
MBaAFLuvfgI9+qbxPISOre44mOzZMjLUMB0GA1UdDgQWBBSQr2o6lFoL2JDqElZz
30O0Oija5zAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNV
HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgG
BmeBDAECATBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8vY3JsLmNvbW9kb2NhLmNv
bS9DT01PRE9SU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDBxBggrBgEFBQcB
AQRlMGMwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29tb2RvY2EuY29tL0NPTU9E
T1JTQUFkZFRydXN0Q0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21v
ZG9jYS5jb20wDQYJKoZIhvcNAQEMBQADggIBAE4rdk+SHGI2ibp3wScF9BzWRJ2p
mj6q1WZmAT7qSeaiNbz69t2Vjpk1mA42GHWx3d1Qcnyu3HeIzg/3kCDKo2cuH1Z/
e+FE6kKVxF0NAVBGFfKBiVlsit2M8RKhjTpCipj4SzR7JzsItG8kO3KdY3RYPBps
P0/HEZrIqPW1N+8QRcZs2eBelSaz662jue5/DJpmNXMyYE7l3YphLG5SEXdoltMY
dVEVABt0iN3hxzgEQyjpFv3ZBdRdRydg1vs4O2xyopT4Qhrf7W8GjEXCBgCq5Ojc
2bXhc3js9iPc0d1sjhqPpepUfJa3w/5Vjo1JXvxku88+vZbrac2/4EjxYoIQ5QxG
V/Iz2tDIY+3GH5QFlkoakdH368+PUq4NCNk+qKBR6cGHdNXJ93SrLlP7u3r7l+L4
HyaPs9Kg4DdbKDsx5Q5XLVq4rXmsXiBmGqW5prU5wfWYQ//u+aen/e7KJD2AFsQX
j4rBYKEMrltDR5FL1ZoXX/nUh8HCjLfn4g8wGTeGrODcQgPmlKidrv0PJFGUzpII
0fxQ8ANAe4hZ7Q7drNJ3gjTcBpUC2JD5Leo31Rpg0Gcg19hCC0Wvgmje3WYkN5Ap
lBlGGSW4gNfL1IYoakRwJiNiqZ+Gb7+6kHDSVneFeO/qJakXzlByjAA6quPbYzSf
+AZxAeKCINT+b72x
-----END CERTIFICATE-----`)
var goodCert = mustParse(goodComodoCA)
func mustParse(pemData string) *x509.Certificate {
block, _ := pem.Decode([]byte(pemData))
if block == nil {
panic("Invalid PEM data.")
} else if block.Type != "CERTIFICATE" {
panic("Invalid PEM type.")
}
cert, err := x509.ParseCertificate([]byte(block.Bytes))
if err != nil {
panic(err.Error())
}
return cert
}
func TestRevoked(t *testing.T) {
if revoked, ok := VerifyCertificate(revokedCert); !ok {
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
} else if !revoked {
t.Fatalf("revoked certificate should have been marked as revoked")
}
}
func TestExpired(t *testing.T) {
if revoked, ok := VerifyCertificate(expiredCert); !ok {
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
} else if !revoked {
t.Fatalf("expired certificate should have been marked as revoked")
}
}
func TestGood(t *testing.T) {
if revoked, ok := VerifyCertificate(goodCert); !ok {
fmt.Fprintf(os.Stderr, "Warning: soft fail checking revocation")
} else if revoked {
t.Fatalf("good certificate should not have been marked as revoked")
}
}
func TestLdap(t *testing.T) {
ldapCert := mustParse(goodComodoCA)
ldapCert.CRLDistributionPoints = append(ldapCert.CRLDistributionPoints, "ldap://myldap.example.com")
if revoked, ok := VerifyCertificate(ldapCert); revoked || !ok {
t.Fatalf("ldap certificate should have been recognized")
}
}
func TestLdapURLErr(t *testing.T) {
if ldapURL(":") {
t.Fatalf("bad url does not cause error")
}
}
func TestCertNotYetValid(t *testing.T) {
notReadyCert := expiredCert
notReadyCert.NotBefore = time.Date(3000, time.January, 1, 1, 1, 1, 1, time.Local)
notReadyCert.NotAfter = time.Date(3005, time.January, 1, 1, 1, 1, 1, time.Local)
if revoked, _ := VerifyCertificate(expiredCert); !revoked {
t.Fatalf("not yet verified certificate should have been marked as revoked")
}
}
func TestCRLFetchError(t *testing.T) {
ldapCert := mustParse(goodComodoCA)
ldapCert.CRLDistributionPoints[0] = ""
if revoked, ok := VerifyCertificate(ldapCert); ok || revoked {
t.Fatalf("Fetching error not encountered")
}
HardFail = true
if revoked, ok := VerifyCertificate(ldapCert); ok || !revoked {
t.Fatalf("Fetching error not encountered, hardfail not registered")
}
HardFail = false
}
func TestBadCRLSet(t *testing.T) {
ldapCert := mustParse(goodComodoCA)
ldapCert.CRLDistributionPoints[0] = ""
CRLSet[""] = nil
certIsRevokedCRL(ldapCert, "")
if _, ok := CRLSet[""]; ok {
t.Fatalf("key emptystring should be deleted from CRLSet")
}
delete(CRLSet, "")
}
func TestCachedCRLSet(t *testing.T) {
VerifyCertificate(goodCert)
if revoked, ok := VerifyCertificate(goodCert); !ok || revoked {
t.Fatalf("Previously fetched CRL's should be read smoothly and unrevoked")
}
}
func TestRemoteFetchError(t *testing.T) {
badurl := ":"
if _, err := fetchRemote(badurl); err == nil {
t.Fatalf("fetching bad url should result in non-nil error")
}
}
func TestNoOCSPServers(t *testing.T) {
badIssuer := goodCert
badIssuer.IssuingCertificateURL = []string{" "}
certIsRevokedOCSP(badIssuer, true)
noOCSPCert := goodCert
noOCSPCert.OCSPServer = make([]string, 0)
if revoked, ok, _ := certIsRevokedOCSP(noOCSPCert, true); revoked || !ok {
t.Fatalf("OCSP falsely registered as enabled for this certificate")
}
}

View File

@ -7,7 +7,7 @@ import (
"fmt"
"regexp"
"github.com/kisom/goutils/die"
"git.wntrmute.dev/kyle/goutils/die"
)
var hasPort = regexp.MustCompile(`:\d+$`)

View File

@ -6,16 +6,19 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"flag"
"fmt"
"io/ioutil"
"io"
"os"
"sort"
"strings"
"github.com/cloudflare/cfssl/helpers"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/lib"
)
func certPublic(cert *x509.Certificate) string {
@ -82,6 +85,7 @@ func keyUsages(ku x509.KeyUsage) string {
uses = append(uses, s)
}
}
sort.Strings(uses)
return strings.Join(uses, ", ")
}
@ -91,6 +95,7 @@ func extUsage(ext []x509.ExtKeyUsage) string {
for i := range ext {
ns = append(ns, extKeyUsages[ext[i]])
}
sort.Strings(ns)
return strings.Join(ns, ", ")
}
@ -105,6 +110,14 @@ func showBasicConstraints(cert *x509.Certificate) {
if cert.IsCA {
fmt.Printf(", is a CA certificate")
if !cert.BasicConstraintsValid {
fmt.Printf(" (basic constraint failure)")
}
} else {
fmt.Printf("is not a CA certificate")
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
fmt.Printf(" (key encipherment usage enabled!)")
}
}
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
@ -116,7 +129,10 @@ func showBasicConstraints(cert *x509.Certificate) {
const oneTrueDateFormat = "2006-01-02T15:04:05-0700"
var dateFormat string
var (
dateFormat string
showHash bool // if true, print a SHA256 hash of the certificate's Raw field
)
func wrapPrint(text string, indent int) {
tabs := ""
@ -129,6 +145,9 @@ func wrapPrint(text string, indent int) {
func displayCert(cert *x509.Certificate) {
fmt.Println("CERTIFICATE")
if showHash {
fmt.Println(wrap(fmt.Sprintf("SHA256: %x", sha256.Sum256(cert.Raw)), 0))
}
fmt.Println(wrap("Subject: "+displayName(cert.Subject), 0))
fmt.Println(wrap("Issuer: "+displayName(cert.Issuer), 0))
fmt.Printf("\tSignature algorithm: %s / %s\n", sigAlgoPK(cert.SignatureAlgorithm),
@ -198,17 +217,17 @@ func displayCert(cert *x509.Certificate) {
}
func displayAllCerts(in []byte, leafOnly bool) {
certs, err := helpers.ParseCertificatesPEM(in)
certs, err := certlib.ParseCertificatesPEM(in)
if err != nil {
certs, _, err = helpers.ParseCertificatesDER(in, "")
certs, _, err = certlib.ParseCertificatesDER(in, "")
if err != nil {
Warn(TranslateCFSSLError(err), "failed to parse certificates")
lib.Warn(err, "failed to parse certificates")
return
}
}
if len(certs) == 0 {
Warnx("no certificates found")
lib.Warnx("no certificates found")
return
}
@ -226,7 +245,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
ci := getConnInfo(uri)
conn, err := tls.Dial("tcp", ci.Addr, permissiveConfig())
if err != nil {
Warn(err, "couldn't connect to %s", ci.Addr)
lib.Warn(err, "couldn't connect to %s", ci.Addr)
return
}
defer conn.Close()
@ -242,11 +261,11 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
}
conn.Close()
} else {
Warn(err, "TLS verification error with server name %s", ci.Host)
lib.Warn(err, "TLS verification error with server name %s", ci.Host)
}
if len(state.PeerCertificates) == 0 {
Warnx("no certificates found")
lib.Warnx("no certificates found")
return
}
@ -256,7 +275,7 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
}
if len(state.VerifiedChains) == 0 {
Warnx("no verified chains found; using peer chain")
lib.Warnx("no verified chains found; using peer chain")
for i := range state.PeerCertificates {
displayCert(state.PeerCertificates[i])
}
@ -273,14 +292,15 @@ func displayAllCertsWeb(uri string, leafOnly bool) {
func main() {
var leafOnly bool
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
flag.StringVar(&dateFormat, "s", oneTrueDateFormat, "date `format` in Go time format")
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
flag.Parse()
if flag.NArg() == 0 || (flag.NArg() == 1 && flag.Arg(0) == "-") {
certs, err := ioutil.ReadAll(os.Stdin)
certs, err := io.ReadAll(os.Stdin)
if err != nil {
Warn(err, "couldn't read certificates from standard input")
lib.Warn(err, "couldn't read certificates from standard input")
os.Exit(1)
}
@ -295,9 +315,9 @@ func main() {
if strings.HasPrefix(filename, "https://") {
displayAllCertsWeb(filename, leafOnly)
} else {
in, err := ioutil.ReadFile(filename)
in, err := os.ReadFile(filename)
if err != nil {
Warn(err, "couldn't read certificate")
lib.Warn(err, "couldn't read certificate")
continue
}

View File

@ -3,13 +3,10 @@ package main
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"os"
"strings"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/kr/text"
)
@ -89,34 +86,6 @@ func sigAlgoHash(a x509.SignatureAlgorithm) string {
}
}
// TranslateCFSSLError turns a CFSSL error into a more readable string.
func TranslateCFSSLError(err error) error {
if err == nil {
return nil
}
// printing errors as json is terrible
if cfsslError, ok := err.(*cferr.Error); ok {
err = errors.New(cfsslError.Message)
}
return err
}
// Warnx displays a formatted error message to standard error, à la
// warnx(3).
func Warnx(format string, a ...interface{}) (int, error) {
format += "\n"
return fmt.Fprintf(os.Stderr, format, a...)
}
// Warn displays a formatted error message to standard output,
// appending the error string, à la warn(3).
func Warn(err error, format string, a ...interface{}) (int, error) {
format += ": %v\n"
a = append(a, err)
return fmt.Fprintf(os.Stderr, format, a...)
}
const maxLine = 78
func makeIndent(n int) string {

View File

@ -10,9 +10,9 @@ import (
"strings"
"time"
"github.com/cloudflare/cfssl/helpers"
"github.com/kisom/goutils/die"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
var warnOnly bool
@ -87,7 +87,7 @@ func main() {
continue
}
certs, err := helpers.ParseCertificatesPEM(in)
certs, err := certlib.ParseCertificatesPEM(in)
if err != nil {
lib.Warn(err, "while parsing certificates")
continue

View File

@ -8,14 +8,14 @@ import (
"os"
"time"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/revoke"
"github.com/kisom/goutils/die"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
func printRevocation(cert *x509.Certificate) {
remaining := cert.NotAfter.Sub(time.Now())
remaining := time.Until(cert.NotAfter)
fmt.Printf("certificate expires in %s.\n", lib.Duration(remaining))
revoked, ok := revoke.VerifyCertificate(cert)
@ -47,7 +47,7 @@ func main() {
if verbose {
fmt.Println("[+] loading root certificates from", caFile)
}
roots, err = helpers.LoadPEMCertPool(caFile)
roots, err = certlib.LoadPEMCertPool(caFile)
die.If(err)
}
@ -57,7 +57,7 @@ func main() {
if verbose {
fmt.Println("[+] loading intermediate certificates from", intFile)
}
ints, err = helpers.LoadPEMCertPool(caFile)
ints, err = certlib.LoadPEMCertPool(caFile)
die.If(err)
} else {
ints = x509.NewCertPool()
@ -71,7 +71,7 @@ func main() {
fileData, err := ioutil.ReadFile(flag.Arg(0))
die.If(err)
chain, err := helpers.ParseCertificatesPEM(fileData)
chain, err := certlib.ParseCertificatesPEM(fileData)
die.If(err)
if verbose {
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))

View File

@ -11,7 +11,7 @@ import (
"strings"
"sync"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/lib"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"

20
cmd/cruntar/README Normal file
View File

@ -0,0 +1,20 @@
ChromeOS untar
This is a tool that is intended to support untarring on SquashFS file
systems. In particular, every time it encounters a hard link, it
will just create a copy of the file.
Usage: cruntar [-jmvpz] archive [dest]
Flags:
-a Shortcut for -m -p: preserve owners and file mode.
-j The archive is compressed with bzip2.
-m Preserve file modes.
-p Preserve ownership.
-v Print the name of each file as it is being processed.
-z The archive is compressed with gzip.
I wrote this after running into problems with untarring the
gcc-arm-eabi-none toolchain. The shared storage in Termux under
ChromeOS doesn't support hard links, so I opted to just make a copy
rather than dealing with links and whatnot.

277
cmd/cruntar/main.go Normal file
View File

@ -0,0 +1,277 @@
package main
import (
"archive/tar"
"compress/bzip2"
"compress/gzip"
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/fileutil"
)
var (
preserveOwners bool
preserveMode bool
verbose bool
)
func setupFile(hdr *tar.Header, file *os.File) error {
if preserveMode {
if verbose {
fmt.Printf("\tchmod %0#o\n", hdr.Mode)
}
err := file.Chmod(os.FileMode(hdr.Mode))
if err != nil {
return err
}
}
if preserveOwners {
fmt.Printf("\tchown %d:%d\n", hdr.Uid, hdr.Gid)
err := file.Chown(hdr.Uid, hdr.Gid)
if err != nil {
return err
}
}
return nil
}
func linkTarget(target, top string) string {
if filepath.IsAbs(target) {
return target
}
return filepath.Clean(filepath.Join(target, top))
}
func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
if verbose {
fmt.Println(hdr.Name)
}
filePath := filepath.Clean(filepath.Join(top, hdr.Name))
switch hdr.Typeflag {
case tar.TypeReg:
file, err := os.Create(filePath)
if err != nil {
return err
}
_, err = io.Copy(file, tfr)
if err != nil {
return err
}
err = setupFile(hdr, file)
if err != nil {
return err
}
case tar.TypeLink:
file, err := os.Create(filePath)
if err != nil {
return err
}
source, err := os.Open(hdr.Linkname)
if err != nil {
return err
}
_, err = io.Copy(file, source)
if err != nil {
return err
}
err = setupFile(hdr, file)
if err != nil {
return err
}
case tar.TypeSymlink:
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
return fmt.Errorf("symlink %s is outside the top-level %s",
hdr.Linkname, top)
}
path := linkTarget(hdr.Linkname, top)
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
} else if err != nil {
return err
}
err := os.Symlink(linkTarget(hdr.Linkname, top), filePath)
if err != nil {
return err
}
case tar.TypeDir:
err := os.MkdirAll(filePath, os.FileMode(hdr.Mode))
if err != nil {
return err
}
}
return nil
}
var compression = map[string]bool{
"gzip": false,
"bzip2": false,
}
type bzipCloser struct {
r io.Reader
}
func (brc *bzipCloser) Read(p []byte) (int, error) {
return brc.r.Read(p)
}
func (brc *bzipCloser) Close() error {
return nil
}
func newBzipCloser(r io.ReadCloser) (io.ReadCloser, error) {
br := bzip2.NewReader(r)
return &bzipCloser{r: br}, nil
}
var compressFuncs = map[string]func(io.ReadCloser) (io.ReadCloser, error){
"gzip": func(r io.ReadCloser) (io.ReadCloser, error) { return gzip.NewReader(r) },
"bzip2": newBzipCloser,
}
func verifyCompression() bool {
var compressed bool
for _, v := range compression {
if compressed && v {
return false
}
compressed = compressed || v
}
return true
}
func getReader(r io.ReadCloser) (io.ReadCloser, error) {
for c, v := range compression {
if v {
return compressFuncs[c](r)
}
}
return r, nil
}
func openArchive(path string) (io.ReadCloser, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
r, err := getReader(file)
if err != nil {
return nil, err
}
return r, nil
}
var compressFlags struct {
z bool
j bool
}
func parseCompressFlags() error {
if compressFlags.z {
compression["gzip"] = true
}
if compressFlags.j {
compression["bzip2"] = true
}
if !verifyCompression() {
return errors.New("multiple compression formats specified")
}
return nil
}
func usage(w io.Writer) {
fmt.Fprintf(w, `ChromeOS untar
This is a tool that is intended to support untarring on SquashFS file
systems. In particular, every time it encounters a hard link, it
will just create a copy of the file.
Usage: cruntar [-jmvpz] archive [dest]
Flags:
-a Shortcut for -m -p: preserve owners and file mode.
-j The archive is compressed with bzip2.
-m Preserve file modes.
-p Preserve ownership.
-v Print the name of each file as it is being processed.
-z The archive is compressed with gzip.
`)
}
func init() {
flag.Usage = func() { usage(os.Stderr) }
}
func main() {
var archive, help bool
flag.BoolVar(&archive, "a", false, "Shortcut for -m -p: preserve owners and file mode.")
flag.BoolVar(&help, "h", false, "print a help message")
flag.BoolVar(&compressFlags.j, "j", false, "bzip2 compression")
flag.BoolVar(&preserveMode, "m", false, "preserve file modes")
flag.BoolVar(&preserveOwners, "p", false, "preserve ownership")
flag.BoolVar(&verbose, "v", false, "verbose mode")
flag.BoolVar(&compressFlags.z, "z", false, "gzip compression")
flag.Parse()
if help {
usage(os.Stdout)
os.Exit(0)
}
if archive {
preserveMode = true
preserveOwners = true
}
err := parseCompressFlags()
die.If(err)
if flag.NArg() == 0 {
return
}
top := "./"
if flag.NArg() > 1 {
top = flag.Arg(1)
}
r, err := openArchive(flag.Arg(0))
die.If(err)
tfr := tar.NewReader(r)
for {
hdr, err := tfr.Next()
if err == io.EOF {
break
}
die.If(err)
err = processFile(tfr, hdr, top)
die.If(err)
}
r.Close()
}

View File

@ -10,7 +10,7 @@ import (
"io/ioutil"
"log"
"github.com/kisom/goutils/die"
"git.wntrmute.dev/kyle/goutils/die"
)
func main() {

32
cmd/data_sync/README Normal file
View File

@ -0,0 +1,32 @@
data_sync
This is a tool I wrote primarily to sync my home directory to a backup
drive plugged into my laptop. This system is provisioned by Ansible,
and the goal is to be able to just copy my home directory back in the
event of a failure without having lost a great deal of work or to wait
for ansible to finish installing the right backup software. Specifically,
I use a Framework laptop with the 1TB storage module, encrypted with
LUKS, and run this twice daily (timed to correspond with my commute,
though that's not really necessary). It started off as a shell script,
then I decided to just write it as a program.
Usage: data_sync [-d path] [-l level] [-m path] [-nqsv]
[-t path]
-d path path to sync source directory
(default "~")
-l level log level to output (default "INFO"). Valid log
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
CRIT, ALERT, EMERG. The default is INFO.
-m path path to sync mount directory
(default "/media/$USER/$(hostname -s)_data")
-n dry-run mode: only check paths and print files to
exclude
-q suppress console output
-s suppress syslog output
-t path path to sync target directory
(default "/media/$USER/$(hostname -s)_data/$USER")
-v verbose rsync output
data_sync rsyncs the tree at the sync source directory (-d) to the sync target
directory (-t); it checks the mount directory (-m) exists; the sync target
target directory must exist on the mount directory.

230
cmd/data_sync/main.go Normal file
View File

@ -0,0 +1,230 @@
package main
import (
"flag"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"strings"
"git.wntrmute.dev/kyle/goutils/config"
"git.wntrmute.dev/kyle/goutils/fileutil"
"git.wntrmute.dev/kyle/goutils/log"
)
func mustHostname() string {
hostname, err := os.Hostname()
log.FatalError(err, "couldn't retrieve hostname")
if hostname == "" {
log.Fatal("no hostname returned")
}
return strings.Split(hostname, ".")[0]
}
var (
defaultDataDir = mustHostname() + "_data"
defaultProgName = defaultDataDir + "_sync"
defaultMountDir = filepath.Join("/media", os.Getenv("USER"), defaultDataDir)
defaultSyncDir = os.Getenv("HOME")
defaultTargetDir = filepath.Join(defaultMountDir, os.Getenv("USER"))
)
func usage(w io.Writer) {
prog := filepath.Base(os.Args[0])
fmt.Fprintf(w, `Usage: %s [-d path] [-l level] [-m path] [-nqsv]
[-t path]
-d path path to sync source directory
(default "%s")
-l level log level to output (default "INFO"). Valid log
levels are DEBUG, INFO, NOTICE, WARNING, ERR,
CRIT, ALERT, EMERG. The default is INFO.
-m path path to sync mount directory
(default "%s")
-n dry-run mode: only check paths and print files to
exclude
-q suppress console output
-s suppress syslog output
-t path path to sync target directory
(default "%s")
-v verbose rsync output
%s rsyncs the tree at the sync source directory (-d) to the sync target
directory (-t); it checks the mount directory (-m) exists; the sync target
target directory must exist on the mount directory.
`, prog, defaultSyncDir, defaultMountDir, defaultTargetDir, prog)
}
func checkPaths(mount, target string, dryRun bool) error {
if !fileutil.DirectoryDoesExist(mount) {
return fmt.Errorf("sync dir %s isn't mounted", mount)
}
if !strings.HasPrefix(target, mount) {
return fmt.Errorf("target dir %s must exist in %s", target, mount)
}
if !fileutil.DirectoryDoesExist(target) {
if dryRun {
log.Infof("would mkdir %s", target)
} else {
log.Infof("mkdir %s", target)
if err := os.Mkdir(target, 0755); err != nil {
return err
}
}
}
return nil
}
func buildExcludes(syncDir string) ([]string, error) {
var excluded []string
walker := func(path string, info fs.FileInfo, err error) error {
if err != nil {
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
if info != nil && info.IsDir() {
return filepath.SkipDir
}
return nil
}
if info.Mode().IsRegular() {
if err = fileutil.Access(path, fileutil.AccessRead); err != nil {
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
}
}
if info.IsDir() {
if err = fileutil.Access(path, fileutil.AccessExec); err != nil {
excluded = append(excluded, strings.TrimPrefix(path, syncDir))
}
}
return nil
}
err := filepath.Walk(syncDir, walker)
return excluded, err
}
func writeExcludes(excluded []string) (string, error) {
if len(excluded) == 0 {
return "", nil
}
excludeFile, err := os.CreateTemp("", defaultProgName)
if err != nil {
return "", err
}
for _, name := range excluded {
fmt.Fprintln(excludeFile, name)
}
defer excludeFile.Close()
return excludeFile.Name(), nil
}
func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
var args []string
if excludeFile != "" {
args = append(args, "--exclude-from")
args = append(args, excludeFile)
}
if verboseRsync {
args = append(args, "--progress")
args = append(args, "-v")
}
args = append(args, []string{"-au", syncDir + "/", target + "/"}...)
path, err := exec.LookPath("rsync")
if err != nil {
return err
}
cmd := exec.Command(path, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func init() {
flag.Usage = func() { usage(os.Stderr) }
}
func main() {
var logLevel, mountDir, syncDir, target string
var dryRun, quietMode, noSyslog, verboseRsync bool
flag.StringVar(&syncDir, "d", config.GetDefault("sync_dir", defaultSyncDir),
"`path to sync source directory`")
flag.StringVar(&logLevel, "l", config.GetDefault("log_level", "INFO"),
"log level to output")
flag.StringVar(&mountDir, "m", config.GetDefault("mount_dir", defaultMountDir),
"`path` to sync mount directory")
flag.BoolVar(&dryRun, "n", false, "dry-run mode: only check paths and print files to exclude")
flag.BoolVar(&quietMode, "q", quietMode, "suppress console output")
flag.BoolVar(&noSyslog, "s", noSyslog, "suppress syslog output")
flag.StringVar(&target, "t", config.GetDefault("sync_target", defaultTargetDir),
"`path` to sync target directory")
flag.BoolVar(&verboseRsync, "v", false, "verbose rsync output")
flag.Parse()
if quietMode && noSyslog {
fmt.Fprintln(os.Stderr, "both console and syslog output are suppressed")
fmt.Fprintln(os.Stderr, "errors will NOT be reported")
}
logOpts := &log.Options{
Level: logLevel,
Tag: defaultProgName,
Facility: "user",
WriteSyslog: !noSyslog,
WriteConsole: !quietMode,
}
err := log.Setup(logOpts)
log.FatalError(err, "failed to set up logging")
log.Infof("checking paths: mount=%s, target=%s", mountDir, target)
err = checkPaths(mountDir, target, dryRun)
log.FatalError(err, "target dir isn't ready")
log.Infof("checking for files to exclude from %s", syncDir)
excluded, err := buildExcludes(syncDir)
log.FatalError(err, "couldn't build excludes")
if dryRun {
fmt.Println("excluded files:")
for _, path := range excluded {
fmt.Printf("\t%s\n", path)
}
return
}
excludeFile, err := writeExcludes(excluded)
log.FatalError(err, "couldn't write exclude file")
log.Infof("excluding %d files via %s", len(excluded), excludeFile)
if excludeFile != "" {
defer func() {
log.Infof("removing exclude file %s", excludeFile)
if err := os.Remove(excludeFile); err != nil {
log.Warningf("failed to remove temp file %s", excludeFile)
}
}()
}
err = rsync(syncDir, target, excludeFile, verboseRsync)
log.FatalError(err, "couldn't sync data")
}

34
cmd/diskimg/README Normal file
View File

@ -0,0 +1,34 @@
diskimg: write disk images
Usage:
diskimg [-a algo] [-v] image device
Flags:
-a algo Select the hashing algorithm to use. The default
is 'sha256'. Specifying an algorithm of 'list'
will print the supported algorithms to standard
output and exit with error code 2.
-v Enable verbose (debug) output.
Examples:
Copying images/server.img to /dev/sda:
$ sudo diskimg images/server.img /dev/sda
Write a bladerunner node image to /dev/sda:
$ sudo diskimg -v ~/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img /dev/sda
opening image /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img for read
/home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img 416d4c8f890904167419e3d488d097e9c847273376b650546fdb1f6f9809c184
opening device /dev/sda for rw
writing /home/kyle/code/bladerunner/packer/build/cm4-cnode-ubuntu-22.04.2.img -> /dev/sda
wrote 4151312384 bytes to /dev/sda
syncing /dev/sda
verifying the image was written successfully
OK
Motivation:
I wanted to write something like balena's Etcher, but commandline only.

116
cmd/diskimg/main.go Normal file
View File

@ -0,0 +1,116 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"os"
"git.wntrmute.dev/kyle/goutils/ahash"
"git.wntrmute.dev/kyle/goutils/dbg"
"git.wntrmute.dev/kyle/goutils/die"
)
const defaultHashAlgorithm = "sha256"
var (
hAlgo string
debug = dbg.New()
)
func openImage(imageFile string) (image *os.File, hash []byte, err error) {
image, err = os.Open(imageFile)
if err != nil {
return
}
hash, err = ahash.SumReader(hAlgo, image)
if err != nil {
return
}
_, err = image.Seek(0, 0)
if err != nil {
return
}
debug.Printf("%s %x\n", imageFile, hash)
return
}
func openDevice(devicePath string) (device *os.File, err error) {
fi, err := os.Stat(devicePath)
if err != nil {
return
}
device, err = os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
if err != nil {
return
}
return
}
func main() {
flag.StringVar(&hAlgo, "a", defaultHashAlgorithm, "default hash algorithm")
flag.BoolVar(&debug.Enabled, "v", false, "enable debug logging")
flag.Parse()
if hAlgo == "list" {
fmt.Println("Supported hashing algorithms:")
for _, algo := range ahash.SecureHashList() {
fmt.Printf("\t- %s\n", algo)
}
os.Exit(2)
}
if flag.NArg() != 2 {
die.With("usage: diskimg image device")
}
imageFile := flag.Arg(0)
devicePath := flag.Arg(1)
debug.Printf("opening image %s for read\n", imageFile)
image, hash, err := openImage(imageFile)
if image != nil {
defer image.Close()
}
die.If(err)
debug.Printf("opening device %s for rw\n", devicePath)
device, err := openDevice(devicePath)
if device != nil {
defer device.Close()
}
die.If(err)
debug.Printf("writing %s -> %s\n", imageFile, devicePath)
n, err := io.Copy(device, image)
die.If(err)
debug.Printf("wrote %d bytes to %s\n", n, devicePath)
debug.Printf("syncing %s\n", devicePath)
err = device.Sync()
die.If(err)
debug.Println("verifying the image was written successfully")
_, err = device.Seek(0, 0)
die.If(err)
deviceHash, err := ahash.SumLimitedReader(hAlgo, device, n)
die.If(err)
if !bytes.Equal(deviceHash, hash) {
fmt.Fprintln(os.Stderr, "Hash mismatch:")
fmt.Fprintf(os.Stderr, "\t%s: %s\n", imageFile, hash)
fmt.Fprintf(os.Stderr, "\t%s: %s\n", devicePath, deviceHash)
os.Exit(1)
}
debug.Println("OK")
os.Exit(0)
}

71
cmd/dumpbytes/main.go Normal file
View File

@ -0,0 +1,71 @@
package main
import (
"flag"
"fmt"
"git.wntrmute.dev/kyle/goutils/die"
"io"
"os"
)
func usage(w io.Writer, exc int) {
fmt.Fprintln(w, `usage: dumpbytes <file>`)
os.Exit(exc)
}
func printBytes(buf []byte) {
fmt.Printf("\t")
for i := 0; i < len(buf); i++ {
fmt.Printf("0x%02x, ", buf[i])
}
fmt.Println()
}
func dumpFile(path string, indentLevel int) error {
indent := ""
for i := 0; i < indentLevel; i++ {
indent += "\t"
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
fmt.Printf("%svar buffer = []byte{\n", indent)
for {
buf := make([]byte, 8)
n, err := file.Read(buf)
if err == io.EOF {
if n > 0 {
fmt.Printf("%s", indent)
printBytes(buf[:n])
}
break
}
if err != nil {
return err
}
fmt.Printf("%s", indent)
printBytes(buf[:n])
}
fmt.Printf("%s}\n", indent)
return nil
}
func main() {
indent := 0
flag.Usage = func() { usage(os.Stderr, 0) }
flag.IntVar(&indent, "n", 0, "indent level")
flag.Parse()
for _, file := range flag.Args() {
err := dumpFile(file, indent)
die.If(err)
}
}

52
cmd/eig/main.go Normal file
View File

@ -0,0 +1,52 @@
package main
import (
"flag"
"os"
"git.wntrmute.dev/kyle/goutils/die"
)
// size of a kilobit in bytes
const kilobit = 128
const pageSize = 4096
func main() {
size := flag.Int("s", 256*kilobit, "size of EEPROM image in kilobits")
fill := flag.Uint("f", 0, "byte to fill image with")
flag.Parse()
if *fill > 256 {
die.With("`fill` argument must be a byte value")
}
path := "eeprom.img"
if flag.NArg() > 0 {
path = flag.Arg(0)
}
fillByte := uint8(*fill)
buf := make([]byte, pageSize)
for i := 0; i < pageSize; i++ {
buf[i] = fillByte
}
pages := *size / pageSize
last := *size % pageSize
file, err := os.Create(path)
die.If(err)
defer file.Close()
for i := 0; i < pages; i++ {
_, err = file.Write(buf)
die.If(err)
}
if last != 0 {
_, err = file.Write(buf[:last])
die.If(err)
}
}

View File

@ -4,16 +4,21 @@ import (
"bufio"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"github.com/kisom/goutils/die"
"git.wntrmute.dev/kyle/goutils/die"
)
func usage() {
func init() {
flag.Usage = func() { usage(os.Stdout); os.Exit(1) }
}
func usage(w io.Writer) {
progname := filepath.Base(os.Args[0])
fmt.Printf(`Usage: %s [-nl] file start [end]
fmt.Fprintf(w, `Usage: %s [-nl] file start [end]
Print a fragment of a file starting a line 'start' and ending
at line 'end', or EOF if no end is specified.
@ -27,7 +32,7 @@ func main() {
flag.Parse()
if flag.NArg() < 2 || flag.NArg() > 3 {
usage()
usage(os.Stderr)
os.Exit(1)
}

5
cmd/host/README Normal file
View File

@ -0,0 +1,5 @@
host
This is a utility to display CNAME records and IPs for a hostname. It
was born of my frustration in trying to figure out how to get the host(1)
tool installed on Fedora.

41
cmd/host/main.go Normal file
View File

@ -0,0 +1,41 @@
package main
import (
"flag"
"fmt"
"log"
"net"
)
func lookupHost(host string) error {
cname, err := net.LookupCNAME(host)
if err != nil {
return err
}
if cname != host {
fmt.Printf("%s is a CNAME for %s\n", host, cname)
host = cname
}
addrs, err := net.LookupHost(host)
if err != nil {
return err
}
for _, addr := range addrs {
fmt.Printf("\t%s\n", addr)
}
return nil
}
func main() {
flag.Parse()
for _, arg := range flag.Args() {
if err := lookupHost(arg); err != nil {
log.Printf("%s: %s", arg, err)
}
}
}

View File

@ -8,7 +8,7 @@ import (
"io/ioutil"
"os"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/lib"
)
func prettify(file string, validateOnly bool) error {

23
cmd/kgz/README Normal file
View File

@ -0,0 +1,23 @@
kgz
kgz is like gzip, but supports compressing and decompressing to a different
directory than the source file is in.
Usage: kgz [-l] source [target]
If target is a directory, the basename of the sourcefile will be used
as the target filename. Compression and decompression is selected
based on whether the source filename ends in ".gz".
Flags:
-l level Compression level (0-9). Only meaninful when
compressing a file.

182
cmd/kgz/main.go Normal file
View File

@ -0,0 +1,182 @@
package main
import (
"compress/flate"
"compress/gzip"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
)
const gzipExt = ".gz"
func compress(path, target string, level int) error {
sourceFile, err := os.Open(path)
if err != nil {
return errors.Wrap(err, "opening file for read")
}
defer sourceFile.Close()
destFile, err := os.Create(target)
if err != nil {
return errors.Wrap(err, "opening file for write")
}
defer destFile.Close()
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
if err != nil {
return errors.Wrap(err, "invalid compression level")
}
defer gzipCompressor.Close()
_, err = io.Copy(gzipCompressor, sourceFile)
if err != nil {
return errors.Wrap(err, "compressing file")
}
if err != nil {
return errors.Wrap(err, "stat(2)ing destination file")
}
return nil
}
func uncompress(path, target string) error {
sourceFile, err := os.Open(path)
if err != nil {
return errors.Wrap(err, "opening file for read")
}
defer sourceFile.Close()
gzipUncompressor, err := gzip.NewReader(sourceFile)
if err != nil {
return errors.Wrap(err, "reading gzip headers")
}
defer gzipUncompressor.Close()
destFile, err := os.Create(target)
if err != nil {
return errors.Wrap(err, "opening file for write")
}
defer destFile.Close()
_, err = io.Copy(destFile, gzipUncompressor)
if err != nil {
return errors.Wrap(err, "uncompressing file")
}
return nil
}
func usage(w io.Writer) {
fmt.Fprintf(w, `Usage: %s [-l] source [target]
kgz is like gzip, but supports compressing and decompressing to a different
directory than the source file is in.
Flags:
-l level Compression level (0-9). Only meaninful when
compressing a file.
`, os.Args[0])
}
func init() {
flag.Usage = func() { usage(os.Stderr) }
}
func isDir(path string) bool {
file, err := os.Open(path)
if err == nil {
defer file.Close()
stat, err := file.Stat()
if err != nil {
return false
}
if stat.IsDir() {
return true
}
}
return false
}
func pathForUncompressing(source, dest string) (string, error) {
if !isDir(dest) {
return dest, nil
}
source = filepath.Base(source)
if !strings.HasSuffix(source, gzipExt) {
return "", errors.Errorf("%s is a not gzip-compressed file", source)
}
outFile := source[:len(source)-len(gzipExt)]
outFile = filepath.Join(dest, outFile)
return outFile, nil
}
func pathForCompressing(source, dest string) (string, error) {
if !isDir(dest) {
return dest, nil
}
source = filepath.Base(source)
if strings.HasSuffix(source, gzipExt) {
return "", errors.Errorf("%s is a gzip-compressed file", source)
}
dest = filepath.Join(dest, source+gzipExt)
return dest, nil
}
func main() {
var level int
var path string
var target = "."
flag.IntVar(&level, "l", flate.DefaultCompression, "compression level")
flag.Parse()
if flag.NArg() < 1 || flag.NArg() > 2 {
usage(os.Stderr)
os.Exit(1)
}
path = flag.Arg(0)
if flag.NArg() == 2 {
target = flag.Arg(1)
}
if strings.HasSuffix(path, gzipExt) {
target, err := pathForUncompressing(path, target)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
err = uncompress(path, target)
if err != nil {
os.Remove(target)
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
} else {
target, err := pathForCompressing(path, target)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
err = compress(path, target, level)
if err != nil {
os.Remove(target)
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
}
}

3
cmd/minmax/README Normal file
View File

@ -0,0 +1,3 @@
minmax
A quick tool to calculate minmax codes if needed for uLisp.

53
cmd/minmax/minmax.go Normal file
View File

@ -0,0 +1,53 @@
package main
import (
"flag"
"fmt"
"os"
"strconv"
)
var kinds = map[string]int{
"sym": 0,
"tf": 1,
"fn": 2,
"sp": 3,
}
func dieIf(err error) {
if err == nil {
return
}
fmt.Fprintf(os.Stderr, "[!] %s\n", err)
os.Exit(1)
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: minmax type min max\n")
fmt.Fprintf(os.Stderr, " type is one of fn, sp, sym, tf\n")
os.Exit(1)
}
func main() {
flag.Parse()
if flag.NArg() != 3 {
usage()
}
kind, ok := kinds[flag.Arg(0)]
if !ok {
usage()
}
min, err := strconv.Atoi(flag.Arg(1))
dieIf(err)
max, err := strconv.Atoi(flag.Arg(2))
dieIf(err)
code := kind << 6
code += (min << 3)
code += max
fmt.Printf("%0o\n", code)
}

9
cmd/parts/README Normal file
View File

@ -0,0 +1,9 @@
parts: simple parts database for electronic components
Usage: parts [id] -- query the database for a part
parts [-c class] [id] [description] -- store a part in the database
Options:
-f path Path to parts database (default is
/home/kyle/.parts.json).

142
cmd/parts/main.go Normal file
View File

@ -0,0 +1,142 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"git.wntrmute.dev/kyle/goutils/die"
)
const dbVersion = "1"
var dbFile = filepath.Join(os.Getenv("HOME"), ".parts.json")
var partsDB = &database{Version: dbVersion}
type part struct {
Name string `json:"name"`
Description string `json:"description"`
Class string `json:"class,omitempty"`
}
func (p part) String() string {
return fmt.Sprintf("%s: %s", p.Name, p.Description)
}
type database struct {
Version string `json:"version"`
LastUpdate int64 `json:"json"`
Parts map[string]part `json:"parts"`
}
func help(w io.Writer) {
fmt.Fprintf(w, `Usage: parts [id] -- query the database for a part
parts [-c class] [id] [description] -- store a part in the database
Options:
-f path Path to parts database (default is
%s).
`, dbFile)
}
func loadDatabase() {
data, err := ioutil.ReadFile(dbFile)
if err != nil && os.IsNotExist(err) {
partsDB = &database{
Version: dbVersion,
Parts: map[string]part{},
}
return
}
die.If(err)
err = json.Unmarshal(data, partsDB)
die.If(err)
}
func findPart(partName string) {
partName = strings.ToLower(partName)
for name, part := range partsDB.Parts {
if strings.Contains(strings.ToLower(name), partName) {
fmt.Println(part.String())
}
}
}
func writeDB() {
data, err := json.Marshal(partsDB)
die.If(err)
err = ioutil.WriteFile(dbFile, data, 0644)
die.If(err)
}
func storePart(name, class, description string) {
p, exists := partsDB.Parts[name]
if exists {
fmt.Printf("warning: replacing part %s\n", name)
fmt.Printf("\t%s\n", p.String())
}
partsDB.Parts[name] = part{
Name: name,
Class: class,
Description: description,
}
writeDB()
}
func listParts() {
parts := make([]string, 0, len(partsDB.Parts))
for partName := range partsDB.Parts {
parts = append(parts, partName)
}
sort.Strings(parts)
for _, partName := range parts {
fmt.Println(partsDB.Parts[partName].String())
}
}
func main() {
var class string
var helpFlag bool
flag.StringVar(&class, "c", "", "device class")
flag.StringVar(&dbFile, "f", dbFile, "`path` to database")
flag.BoolVar(&helpFlag, "h", false, "Print a help message.")
flag.Parse()
if helpFlag {
help(os.Stdout)
return
}
loadDatabase()
switch flag.NArg() {
case 0:
help(os.Stdout)
return
case 1:
partName := flag.Arg(0)
if partName == "list" {
listParts()
} else {
findPart(flag.Arg(0))
}
return
default:
description := strings.Join(flag.Args()[1:], " ")
storePart(flag.Arg(0), class, description)
return
}
}

View File

@ -7,7 +7,7 @@ import (
"io/ioutil"
"os"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/lib"
)
func main() {

View File

@ -8,9 +8,9 @@ import (
"io"
"os"
"github.com/kisom/goutils/assert"
"github.com/kisom/goutils/die"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/assert"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
func usage(w io.Writer) {

View File

@ -11,8 +11,8 @@ import (
"path/filepath"
"strings"
"github.com/kisom/goutils/fileutil"
"github.com/kisom/goutils/lib"
"git.wntrmute.dev/kyle/goutils/fileutil"
"git.wntrmute.dev/kyle/goutils/lib"
)
func hashName(path, encodedHash string) string {
@ -46,7 +46,6 @@ func newName(path string) (string, error) {
func move(dst, src string, force bool) (err error) {
if fileutil.FileDoesExist(dst) && !force {
return fmt.Errorf("%s exists (pass the -f flag to overwrite)", dst)
return nil
}
dstFile, err := os.Create(dst)
if err != nil {
@ -92,7 +91,7 @@ Options:
}
func init() {
flag.Usage = func () { usage(os.Stdout) }
flag.Usage = func() { usage(os.Stdout) }
}
func main() {

22
cmd/rhash/README Normal file
View File

@ -0,0 +1,22 @@
rhash: remote hashing tool
Usage: rhash [-a algo] [-h] [-l set] urls...
Compute the hash over each URL.
Flags:
-a algo Specify the hash algorithm to use; the default is sha256.
-h Print this help message.
-l set List the hash functions under set. Set can be one of all,
secure to list only cryptographic hash functions, or
insecure to list only non-cryptographic hash functions.
Examples:
Compute the SHA256 digest of the LICENSE in this repository:
$ rhash https://raw.githubusercontent.com/kisom/goutils/7391da8567952f69990194ead2842d21df217c89/LICENSE
LICENSE: sha256=620bfadeb698df6c6db73908689a29371a9d4cff32b08c48a5c4307946093980
Compute the SHA-1 digest of the LICENSE in this repository:
$ rhash -a sha1 https://raw.githubusercontent.com/kisom/goutils/7391da8567952f69990194ead2842d21df217c89/LICENSE
LICENSE: sha1=83c6e2e410715058ed6e7c1572176122c024e367

97
cmd/rhash/main.go Normal file
View File

@ -0,0 +1,97 @@
package main
import (
"flag"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"git.wntrmute.dev/kyle/goutils/ahash"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
func usage(w io.Writer) {
fmt.Fprintf(w, `Usage: %s [-a algo] [-h] [-l set] urls...
Compute the hash over each URL.
Flags:
-a algo Specify the hash algorithm to use; the default is sha256.
-h Print this help message.
-l set List the hash functions under set. Set can be one of all,
secure to list only cryptographic hash functions, or
insecure to list only non-cryptographic hash functions.
`, lib.ProgName())
}
func init() {
flag.Usage = func() { usage(os.Stderr) }
}
func main() {
var algo, list string
var help bool
flag.StringVar(&algo, "a", "sha256", "hash algorithm to use")
flag.BoolVar(&help, "h", false, "print a help message")
flag.StringVar(&list, "l", "", "list known hash algorithms (one of all, secure, insecure)")
flag.Parse()
if help {
usage(os.Stdout)
}
if list != "" {
var hashes []string
switch list {
case "all":
hashes = ahash.HashList()
case "secure":
hashes = ahash.SecureHashList()
case "insecure":
hashes = ahash.InsecureHashList()
default:
die.With("list option must be one of all, secure, or insecure.")
}
for _, algo := range hashes {
fmt.Printf("- %s\n", algo)
}
os.Exit(1)
}
for _, remote := range flag.Args() {
u, err := url.Parse(remote)
if err != nil {
lib.Warn(err, "parsing %s", remote)
continue
}
name := filepath.Base(u.Path)
if name == "" {
lib.Warnx("source URL doesn't appear to name a file")
continue
}
resp, err := http.Get(remote)
if err != nil {
lib.Warn(err, "fetching %s", remote)
continue
}
if err != nil {
lib.Warn(err, "fetching %s", remote)
continue
}
sum, err := ahash.SumReader(algo, resp.Body)
resp.Body.Close()
if err != nil {
lib.Err(lib.ExitFailure, err, "while hashing data")
}
fmt.Printf("%s: %s=%x\n", name, algo, sum)
}
}

48
cmd/rolldie/main.go Normal file
View File

@ -0,0 +1,48 @@
package main
import (
"flag"
"fmt"
"math/rand"
"os"
"regexp"
"strconv"
"git.wntrmute.dev/kyle/goutils/die"
)
var dieRollFormat = regexp.MustCompile(`^(\d+)[dD](\d+)$`)
func rollDie(count, sides int) []int {
sum := 0
var rolls []int
for i := 0; i < count; i++ {
roll := rand.Intn(sides) + 1
sum += roll
rolls = append(rolls, roll)
}
rolls = append(rolls, sum)
return rolls
}
func main() {
flag.Parse()
for _, arg := range flag.Args() {
if !dieRollFormat.MatchString(arg) {
fmt.Fprintf(os.Stderr, "invalid die format %s: should be XdY\n", arg)
os.Exit(1)
}
dieRoll := dieRollFormat.FindAllStringSubmatch(arg, -1)
count, err := strconv.Atoi(dieRoll[0][1])
die.If(err)
sides, err := strconv.Atoi(dieRoll[0][2])
die.If(err)
fmt.Println(rollDie(count, sides))
}
}

View File

@ -12,36 +12,23 @@ import (
"sort"
"strings"
"github.com/kisom/goutils/die"
"github.com/kisom/goutils/logging"
"git.wntrmute.dev/kyle/goutils/dbg"
"git.wntrmute.dev/kyle/goutils/die"
)
var (
gopath string
project string
debug bool
)
var (
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
log = logging.NewConsole()
imports = map[string]bool{}
debug = dbg.New()
fset = &token.FileSet{}
imports = map[string]bool{}
sourceRegexp = regexp.MustCompile(`^[^.].*\.go$`)
stdLibRegexp = regexp.MustCompile(`^\w+(/\w+)*$`)
)
func debugf(format string, args ...interface{}) {
if debug {
fmt.Printf(format, args...)
}
}
func debugln(args ...interface{}) {
if debug {
fmt.Println(args...)
}
}
func init() {
gopath = os.Getenv("GOPATH")
if gopath == "" {
@ -67,11 +54,15 @@ func init() {
}
func walkFile(path string, info os.FileInfo, err error) error {
if ignores[path] {
return filepath.SkipDir
}
if !sourceRegexp.MatchString(path) {
return nil
}
debugln(path)
debug.Println(path)
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
if err != nil {
@ -81,26 +72,40 @@ func walkFile(path string, info os.FileInfo, err error) error {
for _, importSpec := range f.Imports {
importPath := strings.Trim(importSpec.Path.Value, `"`)
if stdLibRegexp.MatchString(importPath) {
debugln("standard lib:", importPath)
debug.Println("standard lib:", importPath)
continue
} else if strings.HasPrefix(importPath, project) {
debugln("internal import:", importPath)
debug.Println("internal import:", importPath)
continue
} else if strings.HasPrefix(importPath, "golang.org/") {
debugln("extended lib:", importPath)
debug.Println("extended lib:", importPath)
continue
}
debugln("import:", importPath)
debug.Println("import:", importPath)
imports[importPath] = true
}
return nil
}
var ignores = map[string]bool{}
func main() {
flag.BoolVar(&debug, "v", false, "log debugging information")
var ignoreLine string
var noVendor bool
flag.StringVar(&ignoreLine, "i", "", "comma-separated list of directories to ignore")
flag.BoolVar(&noVendor, "nv", false, "ignore the vendor directory")
flag.BoolVar(&debug.Enabled, "v", false, "log debugging information")
flag.Parse()
if noVendor {
ignores["vendor"] = true
}
for _, word := range strings.Split(ignoreLine, ",") {
ignores[strings.TrimSpace(word)] = true
}
err := filepath.Walk(".", walkFile)
die.If(err)

30
cmd/ski/README Normal file
View File

@ -0,0 +1,30 @@
ski: print subject public key info
Usage:
ski [-hm] files...
Flags:
-h Print a help message and exit.
-m All SKIs should match.
Examples:
Printing the SKI of a private key and certificate:
$ ski *
server.key 3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E (RSA private key)
[ski] trailing data in PEM file
server.pem 3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E (RSA certificate)
Making sure the SKIs match:
$ ski -m *
tyrfingr.key 3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E (RSA private key)
[ski] trailing data in PEM file
tyrfingr.pem 3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E (RSA certificate)
Making sure the SKIs match with a bad certificate:
$ ski -m server.key bad.pem
server.key 3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E (RSA private key)
[ski] bad.pem: SKI mismatch (3A:AB:D1:B2:E5:7A:F2:5A:D5:8E:8B:7B:25:D9:41:90:F8:6B:A3:5E != 90:AF:6A:3A:94:5A:0B:D8:90:EA:12:56:73:DF:43:B4:3A:28:DA:E7)
bad.pem 90:AF:6A:3A:94:5A:0B:D8:90:EA:12:56:73:DF:43:B4:3A:28:DA:E7 (RSA certificate)

191
cmd/ski/main.go Normal file
View File

@ -0,0 +1,191 @@
package main
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
func usage(w io.Writer) {
fmt.Fprintf(w, `ski: print subject key info for PEM-encoded files
Usage:
ski [-hm] files...
Flags:
-h Print this help message.
-m All SKIs should match; as soon as an SKI mismatch is found,
it is reported.
`)
}
func init() {
flag.Usage = func() { usage(os.Stderr) }
}
func parse(path string) (public []byte, kt, ft string) {
data, err := ioutil.ReadFile(path)
die.If(err)
data = bytes.TrimSpace(data)
p, rest := pem.Decode(data)
if len(rest) > 0 {
lib.Warnx("trailing data in PEM file")
}
if p == nil {
die.With("no PEM data found")
}
data = p.Bytes
switch p.Type {
case "PRIVATE KEY", "RSA PRIVATE KEY", "EC PRIVATE KEY":
public, kt = parseKey(data)
ft = "private key"
case "CERTIFICATE":
public, kt = parseCertificate(data)
ft = "certificate"
case "CERTIFICATE REQUEST":
public, kt = parseCSR(data)
ft = "certificate request"
default:
die.With("unknown PEM type %s", p.Type)
}
return
}
func parseKey(data []byte) (public []byte, kt string) {
privInterface, err := x509.ParsePKCS8PrivateKey(data)
if err != nil {
privInterface, err = x509.ParsePKCS1PrivateKey(data)
if err != nil {
privInterface, err = x509.ParseECPrivateKey(data)
if err != nil {
die.With("couldn't parse private key.")
}
}
}
var priv crypto.Signer
switch privInterface.(type) {
case *rsa.PrivateKey:
priv = privInterface.(*rsa.PrivateKey)
kt = "RSA"
case *ecdsa.PrivateKey:
priv = privInterface.(*ecdsa.PrivateKey)
kt = "ECDSA"
default:
die.With("unknown private key type %T", privInterface)
}
public, err = x509.MarshalPKIXPublicKey(priv.Public())
die.If(err)
return
}
func parseCertificate(data []byte) (public []byte, kt string) {
cert, err := x509.ParseCertificate(data)
die.If(err)
pub := cert.PublicKey
switch pub.(type) {
case *rsa.PublicKey:
kt = "RSA"
case *ecdsa.PublicKey:
kt = "ECDSA"
default:
die.With("unknown public key type %T", pub)
}
public, err = x509.MarshalPKIXPublicKey(pub)
die.If(err)
return
}
func parseCSR(data []byte) (public []byte, kt string) {
csr, err := x509.ParseCertificateRequest(data)
die.If(err)
pub := csr.PublicKey
switch pub.(type) {
case *rsa.PublicKey:
kt = "RSA"
case *ecdsa.PublicKey:
kt = "ECDSA"
default:
die.With("unknown public key type %T", pub)
}
public, err = x509.MarshalPKIXPublicKey(pub)
die.If(err)
return
}
func dumpHex(in []byte) string {
var s string
for i := range in {
s += fmt.Sprintf("%02X:", in[i])
}
return strings.Trim(s, ":")
}
type subjectPublicKeyInfo struct {
Algorithm pkix.AlgorithmIdentifier
SubjectPublicKey asn1.BitString
}
func main() {
var help, shouldMatch bool
flag.BoolVar(&help, "h", false, "print a help message and exit")
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
flag.Parse()
if help {
usage(os.Stdout)
os.Exit(0)
}
var ski string
for _, path := range flag.Args() {
public, kt, ft := parse(path)
var subPKI subjectPublicKeyInfo
_, err := asn1.Unmarshal(public, &subPKI)
if err != nil {
lib.Warn(err, "failed to get subject PKI")
continue
}
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
pubHashString := dumpHex(pubHash[:])
if ski == "" {
ski = pubHashString
}
if shouldMatch && ski != pubHashString {
lib.Warnx("%s: SKI mismatch (%s != %s)",
path, ski, pubHashString)
}
fmt.Printf("%s %s (%s %s)\n", path, pubHashString, kt, ft)
}
}

46
cmd/sprox/main.go Normal file
View File

@ -0,0 +1,46 @@
package main
import (
"flag"
"io"
"log"
"net"
"git.wntrmute.dev/kyle/goutils/die"
)
func proxy(conn net.Conn, inside string) error {
proxyConn, err := net.Dial("tcp", inside)
if err != nil {
return err
}
defer proxyConn.Close()
defer conn.Close()
go func() {
io.Copy(conn, proxyConn)
}()
_, err = io.Copy(proxyConn, conn)
return err
}
func main() {
var outside, inside string
flag.StringVar(&outside, "f", "8080", "outside port")
flag.StringVar(&inside, "p", "4000", "inside port")
flag.Parse()
l, err := net.Listen("tcp", "0.0.0.0:"+outside)
die.If(err)
for {
conn, err := l.Accept()
if err != nil {
log.Println(err)
continue
}
go proxy(conn, "127.0.0.1:"+inside)
}
}

View File

@ -0,0 +1,17 @@
stealchain-server
This is a utility to extract the verified X.509 chain from a TLS
connection initiated by another client. It listens on a port, and
for each connection, it will dump the certificates that the peer
actually sent (and not the verified chain that is built from this).
It was written to assist in debugging issues with certificate chains.
There are a few knobs:
-listen specifies the address to listen on.
-ca allows the trusted CA roots to be specified via a PEM bundle of
root certificates.
-verify requires that the client present a valid certificate chain.

View File

@ -0,0 +1,106 @@
package main
import (
"crypto/rand"
"crypto/tls"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"git.wntrmute.dev/kyle/goutils/die"
)
func main() {
cfg := &tls.Config{}
var sysRoot, listenAddr, certFile, keyFile string
var verify bool
flag.StringVar(&sysRoot, "ca", "", "provide an alternate CA bundle")
flag.StringVar(&listenAddr, "listen", ":443", "address to listen on")
flag.StringVar(&certFile, "cert", "", "server certificate to present to clients")
flag.StringVar(&keyFile, "key", "", "key for server certificate")
flag.BoolVar(&verify, "verify", false, "verify client certificates")
flag.Parse()
if verify {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
} else {
cfg.ClientAuth = tls.RequestClientCert
}
if certFile == "" {
fmt.Println("[!] missing required flag -cert")
os.Exit(1)
}
if keyFile == "" {
fmt.Println("[!] missing required flag -key")
os.Exit(1)
}
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
fmt.Printf("[!] could not load server key pair: %v", err)
os.Exit(1)
}
cfg.Certificates = append(cfg.Certificates, cert)
if sysRoot != "" {
pemList, err := ioutil.ReadFile(sysRoot)
die.If(err)
roots := x509.NewCertPool()
if !roots.AppendCertsFromPEM(pemList) {
fmt.Printf("[!] no valid roots found")
roots = nil
}
cfg.RootCAs = roots
}
l, err := net.Listen("tcp", listenAddr)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
for {
conn, err := l.Accept()
if err != nil {
fmt.Println(err.Error())
}
raddr := conn.RemoteAddr()
tconn := tls.Server(conn, cfg)
err = tconn.Handshake()
if err != nil {
fmt.Printf("[+] %v: failed to complete handshake: %v\n", raddr, err)
continue
}
cs := tconn.ConnectionState()
if len(cs.PeerCertificates) == 0 {
fmt.Printf("[+] %v: no chain presented\n", raddr)
continue
}
var chain []byte
for _, cert := range cs.PeerCertificates {
p := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
chain = append(chain, pem.EncodeToMemory(p)...)
}
var nonce [16]byte
_, err = rand.Read(nonce[:])
if err != nil {
panic(err)
}
fname := fmt.Sprintf("%v-%v.pem", raddr, hex.EncodeToString(nonce[:]))
err = ioutil.WriteFile(fname, chain, 0644)
die.If(err)
fmt.Printf("%v: [+] wrote %v.\n", raddr, fname)
}
}

View File

@ -10,7 +10,7 @@ import (
"net"
"os"
"github.com/kisom/goutils/die"
"git.wntrmute.dev/kyle/goutils/die"
)
func main() {

20
cmd/subjhash/README Normal file
View File

@ -0,0 +1,20 @@
subjhash
This tool prints the SHA-256 hash of an X.509 certificate's subject
info or issuer fields. It can also verify that the hashes of the
subject are the same between two certificates.
Usage: subjhash [-im] certs...
Flags:
-i Print hash of issuer field.
-m Matching mode. This expects arguments to be in the form of
pairs of certificates (e.g. previous, new) whose subjects
will be compared. For example,
subjhash -m ca1.pem ca1-renewed.pem \
ca2.pem ca2-renewed.pem
will exit with a non-zero status if the subject in the
ca1-renewed.pem certificate doesn't match the subject in the
ca.pem certificate; similarly for ca2.

113
cmd/subjhash/main.go Normal file
View File

@ -0,0 +1,113 @@
package main
import (
"bytes"
"crypto/sha256"
"crypto/x509"
"flag"
"fmt"
"io"
"os"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
func init() {
flag.Usage = func() { usage(os.Stdout); os.Exit(1) }
}
func usage(w io.Writer) {
fmt.Fprintf(w, `Print hash of subject or issuer fields in certificates.
Usage: subjhash [-im] certs...
Flags:
-i Print hash of issuer field.
-m Matching mode. This expects arguments to be in the form of
pairs of certificates (e.g. previous, new) whose subjects
will be compared. For example,
subjhash -m ca1.pem ca1-renewed.pem \
ca2.pem ca2-renewed.pem
will exit with a non-zero status if the subject in the
ca1-renewed.pem certificate doesn't match the subject in the
ca.pem certificate; similarly for ca2.
`)
}
// NB: the Issuer field is *also* a subject field. Also, the returned
// hash is *not* hex encoded.
func getSubjectInfoHash(cert *x509.Certificate, issuer bool) []byte {
if cert == nil {
return nil
}
var subject []byte
if issuer {
subject = cert.RawIssuer
} else {
subject = cert.RawSubject
}
digest := sha256.Sum256(subject)
return digest[:]
}
func printDigests(paths []string, issuer bool) {
for _, path := range paths {
cert, err := certlib.LoadCertificate(path)
if err != nil {
lib.Warn(err, "failed to load certificate from %s", path)
continue
}
digest := getSubjectInfoHash(cert, issuer)
fmt.Printf("%x %s\n", digest, path)
}
}
func matchDigests(paths []string, issuer bool) {
if (len(paths) % 2) != 0 {
lib.Errx(lib.ExitFailure, "not all certificates are paired")
}
var invalid int
for {
if len(paths) == 0 {
break
}
fst := paths[0]
snd := paths[1]
paths = paths[2:]
fstCert, err := certlib.LoadCertificate(fst)
die.If(err)
sndCert, err := certlib.LoadCertificate(snd)
die.If(err)
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
lib.Warnx("certificates don't match: %s and %s", fst, snd)
invalid++
}
}
if invalid > 0 {
os.Exit(1)
}
}
func main() {
var issuer, match bool
flag.BoolVar(&issuer, "i", false, "print the issuer")
flag.BoolVar(&match, "m", false, "match mode")
flag.Parse()
paths := flag.Args()
if match {
matchDigests(paths, issuer)
} else {
printDigests(paths, issuer)
}
}

View File

@ -15,7 +15,7 @@ import (
"log"
"os"
"github.com/kisom/goutils/die"
"git.wntrmute.dev/kyle/goutils/die"
)
var validPEMs = map[string]bool{

View File

@ -11,12 +11,12 @@ import (
)
var (
format = "2006-01-02 15:04" // Format that will be used for times.
outFormat = format + " MST" // Output format.
tz = "Local" // String descriptor for timezone.
fromLoc *time.Location = time.Local // Go time.Location for the named timezone.
fromUnix bool // Input times are Unix timestamps.
toLoc *time.Location = time.UTC // Go time.Location for output timezone.
format = "2006-01-02 15:04" // Format that will be used for times.
outFormat = format + " MST" // Output format.
tz = "Local" // String descriptor for timezone.
fromLoc = time.Local // Go time.Location for the named timezone.
fromUnix bool // Input times are Unix timestamps.
toLoc = time.UTC // Go time.Location for output timezone.
)
func usage(w io.Writer) {
@ -72,7 +72,7 @@ Flags:
func usageExamples() {
usage(os.Stdout)
fmt.Println(`
fmt.Printf(`
Examples (note that the examples are done in the America/Los_Angeles /
PST8PDT time zone):
@ -134,6 +134,7 @@ PST8PDT time zone):
(Converting from GMT (offset +0000) to UTC (offset +0000).)
==================================================================
2016-06-14 23:46 = 2016-06-14 23:46
`)
}

136
cmd/zsearch/main.go Normal file
View File

@ -0,0 +1,136 @@
// zsearch is a utility for searching zlib-compressed files for a
// search string. It was really designed for use with the Git object
// store, i.e. to aid in the recovery of files after Git does what Git
// do.
package main
import (
"bufio"
"bytes"
"compress/zlib"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
)
const defaultDirectory = ".git/objects"
func errorf(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
if format[len(format)-1] != '\n' {
fmt.Fprintf(os.Stderr, "\n")
}
}
func isDir(path string) bool {
fi, err := os.Stat(path)
if err != nil {
return false
}
return fi.IsDir()
}
func loadFile(path string) ([]byte, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
buf := new(bytes.Buffer)
zread, err := zlib.NewReader(file)
if err != nil {
return nil, err
}
defer zread.Close()
_, err = io.Copy(buf, zread)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func showFile(path string) {
fileData, err := loadFile(path)
if err != nil {
errorf("%v", err)
return
}
fmt.Printf("%s\n", fileData)
}
func searchFile(path string, search *regexp.Regexp) error {
file, err := os.Open(path)
if err != nil {
errorf("%v", err)
return err
}
defer file.Close()
zread, err := zlib.NewReader(file)
if err != nil {
errorf("%v", err)
return err
}
defer zread.Close()
zbuf := bufio.NewReader(zread)
if search.MatchReader(zbuf) {
fileData, err := loadFile(path)
if err != nil {
errorf("%v", err)
return err
}
fmt.Printf("%s:\n%s\n", path, fileData)
}
return nil
}
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() {
return searchFile(path, searchExpr)
}
return nil
}
}
func main() {
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
flag.Parse()
if *flSearch == "" {
for _, path := range flag.Args() {
showFile(path)
}
} else {
search, err := regexp.Compile(*flSearch)
if err != nil {
errorf("Bad regexp: %v", err)
return
}
pathList := flag.Args()
if len(pathList) == 0 {
pathList = []string{defaultDirectory}
}
for _, path := range pathList {
if isDir(path) {
err := filepath.Walk(path, buildWalker(search))
if err != nil {
errorf("%v", err)
return
}
} else {
searchFile(path, search)
}
}
}
}

153
config/config.go Normal file
View File

@ -0,0 +1,153 @@
// Package config implements a simple global configuration system that
// supports a file with key=value pairs and environment variables. Note
// that the config system is global.
//
// This package is intended to be used for small daemons: some configuration
// file is optionally populated at program start, then this is used to
// transparently look up configuration values from either that file or the
// environment.
package config
import (
"bufio"
"fmt"
"log"
"os"
"sort"
"strings"
"git.wntrmute.dev/kyle/goutils/config/iniconf"
)
// NB: Rather than define a singleton type, everything is defined at
// the top-level
var (
vars = map[string]string{}
prefix = ""
)
// SetEnvPrefix sets the prefix for all environment variables; it's
// assumed to not be needed for files.
func SetEnvPrefix(pfx string) {
prefix = pfx
}
func addLine(line string) {
if strings.HasPrefix(line, "#") || line == "" {
return
}
lineParts := strings.SplitN(line, "=", 2)
if len(lineParts) != 2 {
log.Print("skipping line: ", line)
return // silently ignore empty keys
}
lineParts[0] = strings.TrimSpace(lineParts[0])
lineParts[1] = strings.TrimSpace(lineParts[1])
vars[lineParts[0]] = lineParts[1]
}
// LoadFile scans the file at path for key=value pairs and adds them
// to the configuration.
func LoadFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
addLine(line)
}
if err = scanner.Err(); err != nil {
return err
}
return nil
}
// LoadFileFor scans the ini file at path, loading the default section
// and overriding any keys found under section. If strict is true, the
// named section must exist (i.e. to catch typos in the section name).
func LoadFileFor(path, section string, strict bool) error {
cmap, err := iniconf.ParseFile(path)
if err != nil {
return err
}
for key, value := range cmap[iniconf.DefaultSection] {
vars[key] = value
}
smap, ok := cmap[section]
if !ok {
if strict {
return fmt.Errorf("config: section '%s' wasn't found in the config file", section)
}
return nil
}
for key, value := range smap {
vars[key] = value
}
return nil
}
// Get retrieves a value from either a configuration file or the
// environment. Note that values from a file will override environment
// variables.
func Get(key string) string {
if v, ok := vars[key]; ok {
return v
}
return os.Getenv(prefix + key)
}
// GetDefault retrieves a value from either a configuration file or
// the environment. Note that value from a file will override
// environment variables. If a value isn't found (e.g. Get returns an
// empty string), the default value will be used.
func GetDefault(key, def string) string {
if v := Get(key); v != "" {
return v
}
return def
}
// Require retrieves a value from either a configuration file or the
// environment. If the key isn't present, it will call log.Fatal, printing
// the missing key.
func Require(key string) string {
if v, ok := vars[key]; ok {
return v
}
v, ok := os.LookupEnv(prefix + key)
if !ok {
var envMessage string
if prefix != "" {
envMessage = " (note: looked for the key " + prefix + key
envMessage += " in the local env)"
}
log.Fatalf("missing required configuration value %s%s", key, envMessage)
}
return v
}
// ListKeys returns a slice of the currently known keys.
func ListKeys() []string {
keyList := []string{}
for k := range vars {
keyList = append(keyList, k)
}
sort.Strings(keyList)
return keyList
}

66
config/config_test.go Normal file
View File

@ -0,0 +1,66 @@
package config
import (
"os"
"testing"
)
const (
testFilePath = "testdata/test.env"
// Keys
kOrder = "ORDER"
kSpecies = "SPECIES"
kName = "COMMON_NAME"
// Env
eOrder = "corvus"
eSpecies = "corvus corax"
eName = "northern raven"
// File
fOrder = "stringiformes"
fSpecies = "strix aluco"
// Name isn't set in the file to test fall through.
)
func init() {
os.Setenv(kOrder, eOrder)
os.Setenv(kSpecies, eSpecies)
os.Setenv(kName, eName)
}
func TestLoadEnvOnly(t *testing.T) {
order := Get(kOrder)
species := Get(kSpecies)
if order != eOrder {
t.Errorf("want %s, have %s", eOrder, order)
}
if species != eSpecies {
t.Errorf("want %s, have %s", eSpecies, species)
}
}
func TestLoadFile(t *testing.T) {
err := LoadFile(testFilePath)
if err != nil {
t.Fatal(err)
}
order := Get(kOrder)
species := Get(kSpecies)
name := Get(kName)
if order != fOrder {
t.Errorf("want %s, have %s", fOrder, order)
}
if species != fSpecies {
t.Errorf("want %s, have %s", fSpecies, species)
}
if name != eName {
t.Errorf("want %s, have %s", eName, name)
}
}

223
config/iniconf/iniconf.go Normal file
View File

@ -0,0 +1,223 @@
package iniconf
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
)
// ConfigMap is shorthand for the type used as a config struct.
type ConfigMap map[string]map[string]string
var (
configSection = regexp.MustCompile(`^\s*\[\s*(\w+)\s*\]\s*$`)
quotedConfigLine = regexp.MustCompile(`^\s*(\w+)\s*=\s*["'](.*)["']\s*$`)
configLine = regexp.MustCompile(`^\s*(\w+)\s*=\s*(.*)\s*$`)
commentLine = regexp.MustCompile(`^#.*$`)
blankLine = regexp.MustCompile(`^\s*$`)
)
// DefaultSection is the label for the default ini file section.
var DefaultSection = "default"
// ParseFile attempts to load the named config file.
func ParseFile(fileName string) (cfg ConfigMap, err error) {
var file *os.File
file, err = os.Open(fileName)
if err != nil {
return
}
defer file.Close()
return ParseReader(file)
}
// ParseReader reads a configuration from an io.Reader.
func ParseReader(r io.Reader) (cfg ConfigMap, err error) {
cfg = ConfigMap{}
buf := bufio.NewReader(r)
var (
line string
longLine bool
currentSection string
lineBytes []byte
isPrefix bool
)
for {
err = nil
lineBytes, isPrefix, err = buf.ReadLine()
if io.EOF == err {
err = nil
break
} else if err != nil {
break
} else if isPrefix {
line += string(lineBytes)
longLine = true
continue
} else if longLine {
line += string(lineBytes)
longLine = false
} else {
line = string(lineBytes)
}
if commentLine.MatchString(line) {
continue
} else if blankLine.MatchString(line) {
continue
} else if configSection.MatchString(line) {
section := configSection.ReplaceAllString(line,
"$1")
if section == "" {
err = fmt.Errorf("invalid structure in file")
break
} else if !cfg.SectionInConfig(section) {
cfg[section] = make(map[string]string, 0)
}
currentSection = section
} else if configLine.MatchString(line) {
regex := configLine
if quotedConfigLine.MatchString(line) {
regex = quotedConfigLine
}
if currentSection == "" {
currentSection = DefaultSection
if !cfg.SectionInConfig(currentSection) {
cfg[currentSection] = map[string]string{}
}
}
key := regex.ReplaceAllString(line, "$1")
val := regex.ReplaceAllString(line, "$2")
if key == "" {
continue
}
cfg[currentSection][key] = val
} else {
err = fmt.Errorf("invalid config file")
break
}
}
return
}
// SectionInConfig determines whether a section is in the configuration.
func (c ConfigMap) SectionInConfig(section string) bool {
_, ok := c[section]
return ok
}
// ListSections returns the list of sections in the config map.
func (c ConfigMap) ListSections() (sections []string) {
for section := range c {
sections = append(sections, section)
}
return
}
// WriteFile writes out the configuration to a file.
func (c ConfigMap) WriteFile(filename string) (err error) {
file, err := os.Create(filename)
if err != nil {
return
}
defer file.Close()
for _, section := range c.ListSections() {
sName := fmt.Sprintf("[ %s ]\n", section)
_, err = file.Write([]byte(sName))
if err != nil {
return
}
for k, v := range c[section] {
line := fmt.Sprintf("%s = %s\n", k, v)
_, err = file.Write([]byte(line))
if err != nil {
return
}
}
_, err = file.Write([]byte{0x0a})
if err != nil {
return
}
}
return
}
// AddSection creates a new section in the config map.
func (c ConfigMap) AddSection(section string) {
if nil != c[section] {
c[section] = map[string]string{}
}
}
// AddKeyVal adds a key value pair to a config map.
func (c ConfigMap) AddKeyVal(section, key, val string) {
if section == "" {
section = DefaultSection
}
if nil == c[section] {
c.AddSection(section)
}
c[section][key] = val
}
// GetValue retrieves the value from a key map.
func (c ConfigMap) GetValue(section, key string) (val string, present bool) {
if c == nil {
return
}
if section == "" {
section = DefaultSection
}
_, ok := c[section]
if !ok {
return
}
val, present = c[section][key]
return
}
// GetValueDefault retrieves the value from a key map if present,
// otherwise the default value.
func (c ConfigMap) GetValueDefault(section, key, value string) (val string) {
kval, ok := c.GetValue(section, key)
if !ok {
return value
}
return kval
}
// SectionKeys returns the sections in the config map.
func (c ConfigMap) SectionKeys(section string) (keys []string, present bool) {
if c == nil {
return nil, false
}
if section == "" {
section = DefaultSection
}
cm := c
s, ok := cm[section]
if !ok {
return nil, false
}
keys = make([]string, 0, len(s))
for key := range s {
keys = append(keys, key)
}
return keys, true
}

View File

@ -0,0 +1,142 @@
package iniconf
import (
"errors"
"fmt"
"os"
"sort"
"testing"
)
// FailWithError is a utility for dumping errors and failing the test.
func FailWithError(t *testing.T, err error) {
fmt.Println("failed")
if err != nil {
fmt.Println("[!] ", err.Error())
}
t.FailNow()
}
// UnlinkIfExists removes a file if it exists.
func UnlinkIfExists(file string) {
_, err := os.Stat(file)
if err != nil && os.IsNotExist(err) {
panic("failed to remove " + file)
}
os.Remove(file)
}
// stringSlicesEqual compares two string lists, checking that they
// contain the same elements.
func stringSlicesEqual(slice1, slice2 []string) bool {
if len(slice1) != len(slice2) {
return false
}
for i := range slice1 {
if slice1[i] != slice2[i] {
return false
}
}
for i := range slice2 {
if slice1[i] != slice2[i] {
return false
}
}
return true
}
func TestGoodConfig(t *testing.T) {
testFile := "testdata/test.conf"
fmt.Printf("[+] validating known-good config... ")
cmap, err := ParseFile(testFile)
if err != nil {
FailWithError(t, err)
} else if len(cmap) != 2 {
FailWithError(t, err)
}
fmt.Println("ok")
}
func TestGoodConfig2(t *testing.T) {
testFile := "testdata/test2.conf"
fmt.Printf("[+] validating second known-good config... ")
cmap, err := ParseFile(testFile)
if err != nil {
FailWithError(t, err)
} else if len(cmap) != 1 {
FailWithError(t, err)
} else if len(cmap["default"]) != 3 {
FailWithError(t, err)
}
fmt.Println("ok")
}
func TestBadConfig(t *testing.T) {
testFile := "testdata/bad.conf"
fmt.Printf("[+] ensure invalid config file fails... ")
_, err := ParseFile(testFile)
if err == nil {
err = fmt.Errorf("invalid config file should fail")
FailWithError(t, err)
}
fmt.Println("ok")
}
func TestWriteConfigFile(t *testing.T) {
fmt.Printf("[+] ensure config file is written properly... ")
const testFile = "testdata/test.conf"
const testOut = "testdata/test.out"
cmap, err := ParseFile(testFile)
if err != nil {
FailWithError(t, err)
}
defer UnlinkIfExists(testOut)
err = cmap.WriteFile(testOut)
if err != nil {
FailWithError(t, err)
}
cmap2, err := ParseFile(testOut)
if err != nil {
FailWithError(t, err)
}
sectionList1 := cmap.ListSections()
sectionList2 := cmap2.ListSections()
sort.Strings(sectionList1)
sort.Strings(sectionList2)
if !stringSlicesEqual(sectionList1, sectionList2) {
err = fmt.Errorf("section lists don't match")
FailWithError(t, err)
}
for _, section := range sectionList1 {
for _, k := range cmap[section] {
if cmap[section][k] != cmap2[section][k] {
err = fmt.Errorf("config key doesn't match")
FailWithError(t, err)
}
}
}
fmt.Println("ok")
}
func TestQuotedValue(t *testing.T) {
testFile := "testdata/test.conf"
fmt.Printf("[+] validating quoted value... ")
cmap, _ := ParseFile(testFile)
val := cmap["sectionName"]["key4"]
if val != " space at beginning and end " {
FailWithError(t, errors.New("Wrong value in double quotes ["+val+"]"))
}
val = cmap["sectionName"]["key5"]
if val != " is quoted with single quotes " {
FailWithError(t, errors.New("Wrong value in single quotes ["+val+"]"))
}
fmt.Println("ok")
}

5
config/iniconf/testdata/bad.conf vendored Normal file
View File

@ -0,0 +1,5 @@
[]
key
another key
key = val

13
config/iniconf/testdata/test.conf vendored Normal file
View File

@ -0,0 +1,13 @@
[ sectionName ]
key1=some value
key2 = some other value
# we want to explain the importance and great forethought
# in this next value.
key3 = unintuitive value
key4 = " space at beginning and end "
key5 = ' is quoted with single quotes '
[ anotherSection ]
key1 = a value
key2 = yet another value
key1 = overwrites previous value of a value

3
config/iniconf/testdata/test2.conf vendored Normal file
View File

@ -0,0 +1,3 @@
key1 = some value
key2 = some other value
key3 = unintuitive value

19
config/path.go Normal file
View File

@ -0,0 +1,19 @@
//go:build !linux
// +build !linux
package config
import (
"os/user"
"path/filepath"
)
// DefaultConfigPath returns a sensible default configuration file path.
func DefaultConfigPath(dir, base string) string {
user, err := user.Current()
if err != nil || user.HomeDir == "" {
return filepath.Join(dir, base)
}
return filepath.Join(user.HomeDir, dir, base)
}

43
config/path_linux.go Normal file
View File

@ -0,0 +1,43 @@
package config
import (
"os"
"path/filepath"
)
// canUseXDGConfigDir checks whether the XDG config directory exists
// and is accessible by the current user. If it is present, it will
// be returned. Note that if the directory does not exist, it is
// presumed unusable.
func canUseXDGConfigDir() (string, bool) {
xdgDir := os.Getenv("XDG_CONFIG_DIR")
if xdgDir == "" {
userDir := os.Getenv("HOME")
if userDir == "" {
return "", false
}
xdgDir = filepath.Join(userDir, ".config")
}
fi, err := os.Stat(xdgDir)
if err != nil {
return "", false
}
if !fi.IsDir() {
return "", false
}
return xdgDir, true
}
// DefaultConfigPath returns a sensible default configuration file path.
func DefaultConfigPath(dir, base string) string {
dirPath, ok := canUseXDGConfigDir()
if !ok {
dirPath = "/etc"
}
return filepath.Join(dirPath, dir, base)
}

7
config/path_test.go Normal file
View File

@ -0,0 +1,7 @@
package config
import "testing"
func TestDefaultPath(t *testing.T) {
t.Log(DefaultConfigPath("demoapp", "app.conf"))
}

2
config/testdata/test.env vendored Normal file
View File

@ -0,0 +1,2 @@
ORDER=stringiformes
SPECIES=strix aluco

76
dbg/dbg.go Normal file
View File

@ -0,0 +1,76 @@
// Package dbg implements a debug printer.
package dbg
import (
"fmt"
"io"
"os"
)
// A DebugPrinter is a drop-in replacement for fmt.Print*, and also acts as
// an io.WriteCloser when enabled.
type DebugPrinter struct {
// If Enabled is false, the print statements won't do anything.
Enabled bool
out io.WriteCloser
}
// Close satisfies the Closer interface.
func (dbg *DebugPrinter) Close() error {
return dbg.out.Close()
}
// Write satisfies the Writer interface.
func (dbg *DebugPrinter) Write(p []byte) (int, error) {
if dbg.Enabled {
return dbg.out.Write(p)
}
return 0, nil
}
// New returns a new DebugPrinter on os.Stdout.
func New() *DebugPrinter {
return &DebugPrinter{
out: os.Stdout,
}
}
// ToFile sets up a new DebugPrinter to a file, truncating it if it exists.
func ToFile(path string) (*DebugPrinter, error) {
file, err := os.Create(path)
if err != nil {
return nil, err
}
return &DebugPrinter{
out: file,
}, nil
}
// To sets up a new DebugPrint to an io.WriteCloser.
func To(w io.WriteCloser) *DebugPrinter {
return &DebugPrinter{
out: w,
}
}
// Print calls fmt.Print if Enabled is true.
func (dbg *DebugPrinter) Print(v ...interface{}) {
if dbg.Enabled {
fmt.Fprint(dbg.out, v...)
}
}
// Println calls fmt.Println if Enabled is true.
func (dbg *DebugPrinter) Println(v ...interface{}) {
if dbg.Enabled {
fmt.Fprintln(dbg.out, v...)
}
}
// Printf calls fmt.Printf if Enabled is true.
func (dbg *DebugPrinter) Printf(format string, v ...interface{}) {
if dbg.Enabled {
fmt.Fprintf(dbg.out, format, v...)
}
}

120
dbg/dbg_test.go Normal file
View File

@ -0,0 +1,120 @@
package dbg
import (
"fmt"
"io/ioutil"
"os"
"testing"
"git.wntrmute.dev/kyle/goutils/assert"
"git.wntrmute.dev/kyle/goutils/testio"
)
func TestNew(t *testing.T) {
buf := testio.NewBufCloser(nil)
dbg := New()
dbg.out = buf
dbg.Print("hello")
dbg.Println("hello")
dbg.Printf("hello %s", "world")
assert.BoolT(t, buf.Len() == 0)
dbg.Enabled = true
dbg.Print("hello") // +5
dbg.Println("hello") // +6
dbg.Printf("hello %s", "world") // +11
assert.BoolT(t, buf.Len() == 22, fmt.Sprintf("buffer should be length 22 but is length %d", buf.Len()))
err := dbg.Close()
assert.NoErrorT(t, err)
}
func TestTo(t *testing.T) {
buf := testio.NewBufCloser(nil)
dbg := To(buf)
dbg.Print("hello")
dbg.Println("hello")
dbg.Printf("hello %s", "world")
assert.BoolT(t, buf.Len() == 0, "debug output should be suppressed")
dbg.Enabled = true
dbg.Print("hello") // +5
dbg.Println("hello") // +6
dbg.Printf("hello %s", "world") // +11
assert.BoolT(t, buf.Len() == 22, "didn't get the expected debug output")
err := dbg.Close()
assert.NoErrorT(t, err)
}
func TestToFile(t *testing.T) {
testFile, err := ioutil.TempFile("", "dbg")
assert.NoErrorT(t, err)
err = testFile.Close()
assert.NoErrorT(t, err)
testFileName := testFile.Name()
defer os.Remove(testFileName)
dbg, err := ToFile(testFileName)
assert.NoErrorT(t, err)
dbg.Print("hello")
dbg.Println("hello")
dbg.Printf("hello %s", "world")
stat, err := os.Stat(testFileName)
assert.NoErrorT(t, err)
assert.BoolT(t, stat.Size() == 0, "no debug output should have been sent to the log file")
dbg.Enabled = true
dbg.Print("hello") // +5
dbg.Println("hello") // +6
dbg.Printf("hello %s", "world") // +11
stat, err = os.Stat(testFileName)
assert.NoErrorT(t, err)
assert.BoolT(t, stat.Size() == 22, fmt.Sprintf("have %d bytes in the log file, expected 22", stat.Size()))
err = dbg.Close()
assert.NoErrorT(t, err)
}
func TestWriting(t *testing.T) {
data := []byte("hello, world")
buf := testio.NewBufCloser(nil)
dbg := To(buf)
n, err := dbg.Write(data)
assert.NoErrorT(t, err)
assert.BoolT(t, n == 0, "expected nothing to be written to the buffer")
dbg.Enabled = true
n, err = dbg.Write(data)
assert.NoErrorT(t, err)
assert.BoolT(t, n == 12, fmt.Sprintf("wrote %d bytes in the buffer, expected to write 12", n))
err = dbg.Close()
assert.NoErrorT(t, err)
}
func TestToFileError(t *testing.T) {
testFile, err := ioutil.TempFile("", "dbg")
assert.NoErrorT(t, err)
err = testFile.Chmod(0400)
assert.NoErrorT(t, err)
err = testFile.Close()
assert.NoErrorT(t, err)
testFileName := testFile.Name()
_, err = ToFile(testFileName)
assert.ErrorT(t, err)
err = os.Remove(testFileName)
assert.NoErrorT(t, err)
}

View File

@ -1,3 +1,6 @@
//go:build !windows
// +build !windows
// Package fileutil contains common file functions.
package fileutil

View File

@ -0,0 +1,49 @@
//go:build windows
// +build windows
// Package fileutil contains common file functions.
package fileutil
import (
"errors"
"os"
)
// FileDoesExist returns true if the file exists.
func FileDoesExist(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// DirectoryDoesExist returns true if the file exists.
func DirectoryDoesExist(path string) bool {
fi, err := os.Stat(path)
if err != nil {
return false
}
return fi.Mode().IsDir()
}
const (
// AccessExists checks whether the file exists. This is invalid outside of
// Unix systems.
AccessExists = 0
// AccessRead checks whether the user has read permissions on
// the file. This is invalid outside of Unix systems.
AccessRead = 0
// AccessWrite checks whether the user has write permissions
// on the file. This is invalid outside of Unix systems.
AccessWrite = 0
// AccessExec checks whether the user has executable
// permissions on the file. This is invalid outside of Unix systems.
AccessExec = 0
)
// Access is a Unix-only call, and has no meaning on Windows.
func Access(path string, mode int) error {
return errors.New("fileutil: Access is meaningless on Windows")
}

16
fileutil/symlinks.go Normal file
View File

@ -0,0 +1,16 @@
package fileutil
import (
"path/filepath"
"strings"
)
// ValidateSymlink checks to make sure a symlink exists in some top-level
// directory.
func ValidateSymlink(symlink, topLevel string) bool {
target, err := filepath.EvalSymlinks(symlink)
if err != nil {
return false
}
return strings.HasPrefix(target, topLevel)
}

24
go.mod Normal file
View File

@ -0,0 +1,24 @@
module git.wntrmute.dev/kyle/goutils
go 1.20
require (
github.com/hashicorp/go-syslog v1.0.0
github.com/kr/text v0.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.12.0
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
gopkg.in/yaml.v2 v2.4.0
)
require (
github.com/davecgh/go-spew v1.1.1
github.com/google/certificate-transparency-go v1.0.21
)
require (
github.com/kr/fs v0.1.0 // indirect
github.com/kr/pretty v0.1.0 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
)

43
go.sum Normal file
View File

@ -0,0 +1,43 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.12.0 h1:/f3b24xrDhkhddlaobPe2JgBqfdt+gC/NYl0QY9IOuI=
github.com/pkg/sftp v1.12.0/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,4 +1,4 @@
// +build freebsd darwin netbsd
// +build freebsd darwin,386 netbsd
package lib

View File

@ -1,4 +1,4 @@
// +build unix linux openbsd
// +build unix linux openbsd darwin,amd64
package lib

View File

@ -79,12 +79,13 @@ var (
yearDuration = (365 * dayDuration) + (6 * time.Hour)
)
// Duration returns a prettier string for time.Durations.
func Duration(d time.Duration) string {
var s string
if d >= yearDuration {
years := d / yearDuration
s += fmt.Sprintf("%dy", years)
d -= (years * yearDuration)
d -= years * yearDuration
}
if d >= dayDuration {
@ -98,7 +99,7 @@ func Duration(d time.Duration) string {
d %= 1 * time.Second
hours := d / time.Hour
d -= (hours * time.Hour)
d -= hours * time.Hour
s += fmt.Sprintf("%dh%s", hours, d)
return s
}

288
log/logger.go Normal file
View File

@ -0,0 +1,288 @@
// Package syslog is a syslog-type facility for logging.
package log
import (
"fmt"
"os"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
gsyslog "github.com/hashicorp/go-syslog"
)
type logger struct {
l gsyslog.Syslogger
p gsyslog.Priority
writeConsole bool
}
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if p <= log.p && log.writeConsole {
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
fmt.Printf(format, args...)
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
}
}
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
if p <= log.p && log.writeConsole {
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
fmt.Print(args...)
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
}
}
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
if p <= log.p && log.writeConsole {
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
fmt.Println(args...)
}
if log.l != nil {
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
}
}
func (log *logger) spew(args ...interface{}) {
if log.p == gsyslog.LOG_DEBUG {
spew.Dump(args...)
}
}
func (log *logger) adjustPriority(level string) error {
priority, ok := priorities[level]
if !ok {
return fmt.Errorf("log: unknown priority %s", level)
}
log.p = priority
return nil
}
var log = &logger{p: gsyslog.LOG_WARNING}
var priorities = map[string]gsyslog.Priority{
"EMERG": gsyslog.LOG_EMERG,
"ALERT": gsyslog.LOG_ALERT,
"CRIT": gsyslog.LOG_CRIT,
"ERR": gsyslog.LOG_ERR,
"WARNING": gsyslog.LOG_WARNING,
"NOTICE": gsyslog.LOG_NOTICE,
"INFO": gsyslog.LOG_INFO,
"DEBUG": gsyslog.LOG_DEBUG,
}
var prioritiev = map[gsyslog.Priority]string{
gsyslog.LOG_EMERG: "EMERG",
gsyslog.LOG_ALERT: "ALERT",
gsyslog.LOG_CRIT: "CRIT",
gsyslog.LOG_ERR: "ERR",
gsyslog.LOG_WARNING: "WARNING",
gsyslog.LOG_NOTICE: "NOTICE",
gsyslog.LOG_INFO: "INFO",
gsyslog.LOG_DEBUG: "DEBUG",
}
func timestamp() string {
return time.Now().Format("2006-01-02 15:04:05 MST")
}
type Options struct {
Level string
Tag string
Facility string
WriteSyslog bool
WriteConsole bool
}
// DefaultOptions returns a sane set of defaults for syslog, using the program
// name as the tag name. withSyslog controls whether logs should be sent to
// syslog, too.
func DefaultOptions(tag string, withSyslog bool) *Options {
if tag == "" {
tag = os.Args[0]
}
return &Options{
Level: "WARNING",
Tag: tag,
Facility: "daemon",
WriteSyslog: withSyslog,
WriteConsole: true,
}
}
// DefaultDebugOptions returns a sane set of debug defaults for syslog,
// using the program name as the tag name. withSyslog controls whether logs
// should be sent to syslog, too.
func DefaultDebugOptions(tag string, withSyslog bool) *Options {
if tag == "" {
tag = os.Args[0]
}
return &Options{
Level: "DEBUG",
Tag: tag,
Facility: "daemon",
WriteSyslog: withSyslog,
WriteConsole: true,
}
}
func Setup(opts *Options) error {
priority, ok := priorities[opts.Level]
if !ok {
return fmt.Errorf("log: unknown priority %s", opts.Level)
}
log.p = priority
log.writeConsole = opts.WriteConsole
if opts.WriteSyslog {
var err error
log.l, err = gsyslog.NewLogger(priority, opts.Facility, opts.Tag)
if err != nil {
return err
}
}
return nil
}
func Debug(args ...interface{}) {
log.print(gsyslog.LOG_DEBUG, args...)
}
func Info(args ...interface{}) {
log.print(gsyslog.LOG_INFO, args...)
}
func Notice(args ...interface{}) {
log.print(gsyslog.LOG_NOTICE, args...)
}
func Warning(args ...interface{}) {
log.print(gsyslog.LOG_WARNING, args...)
}
func Err(args ...interface{}) {
log.print(gsyslog.LOG_ERR, args...)
}
func Crit(args ...interface{}) {
log.print(gsyslog.LOG_CRIT, args...)
}
func Alert(args ...interface{}) {
log.print(gsyslog.LOG_ALERT, args...)
}
func Emerg(args ...interface{}) {
log.print(gsyslog.LOG_EMERG, args...)
}
func Debugln(args ...interface{}) {
log.println(gsyslog.LOG_DEBUG, args...)
}
func Infoln(args ...interface{}) {
log.println(gsyslog.LOG_INFO, args...)
}
func Noticeln(args ...interface{}) {
log.println(gsyslog.LOG_NOTICE, args...)
}
func Warningln(args ...interface{}) {
log.print(gsyslog.LOG_WARNING, args...)
}
func Errln(args ...interface{}) {
log.println(gsyslog.LOG_ERR, args...)
}
func Critln(args ...interface{}) {
log.println(gsyslog.LOG_CRIT, args...)
}
func Alertln(args ...interface{}) {
log.println(gsyslog.LOG_ALERT, args...)
}
func Emergln(args ...interface{}) {
log.println(gsyslog.LOG_EMERG, args...)
}
func Debugf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_DEBUG, format, args...)
}
func Infof(format string, args ...interface{}) {
log.printf(gsyslog.LOG_INFO, format, args...)
}
func Noticef(format string, args ...interface{}) {
log.printf(gsyslog.LOG_NOTICE, format, args...)
}
func Warningf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_WARNING, format, args...)
}
func Errf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_ERR, format, args...)
}
func Critf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_CRIT, format, args...)
}
func Alertf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_ALERT, format, args...)
}
func Emergf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_EMERG, format, args...)
os.Exit(1)
}
func Fatal(args ...interface{}) {
log.println(gsyslog.LOG_ERR, args...)
os.Exit(1)
}
func Fatalf(format string, args ...interface{}) {
log.printf(gsyslog.LOG_ERR, format, args...)
os.Exit(1)
}
// FatalError will only execute if err != nil. If it does,
// it will print the message (append the error) and exit
// the program.
func FatalError(err error, message string) {
if err == nil {
return
}
Fatal(fmt.Sprintf("%s: %s", message, err))
}
// Spew will pretty print the args if the logger is set to DEBUG priority.
func Spew(args ...interface{}) {
log.spew(args...)
}
func ChangePriority(level string) error {
return log.adjustPriority(level)
}

View File

@ -4,7 +4,7 @@ import (
"os"
"time"
"github.com/kisom/goutils/logging"
"git.wntrmute.dev/kyle/goutils/logging"
)
var log = logging.NewConsole()

View File

@ -3,7 +3,7 @@ package logging_test
import (
"time"
"github.com/kisom/goutils/logging"
"git.wntrmute.dev/kyle/goutils/logging"
)
var log = logging.NewConsole()

View File

@ -8,11 +8,20 @@ type File struct {
*LogWriter
}
func (fl *File) Close() {
fl.fo.Close()
if fl.fe != nil {
fl.fe.Close()
// Close calls close on the underlying log files.
func (fl *File) Close() error {
if fl.fo != nil {
if err := fl.fo.Close(); err != nil {
return err
}
fl.fo = nil
}
if fl.fe != nil {
return fl.fe.Close()
}
return nil
}
// NewFile creates a new Logger that writes all logs to the file
@ -50,7 +59,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
if overwrite {
fl.fo, err = os.Create(outpath)
} else {
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND, 0644)
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
}
if err != nil {
@ -60,7 +69,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
if overwrite {
fl.fe, err = os.Create(errpath)
} else {
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND, 0644)
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
}
if err != nil {

View File

@ -29,6 +29,7 @@ const (
LevelFatal
)
// DefaultLevel is the default logging level when none is provided.
const DefaultLevel = LevelInfo
// Cheap integer to fixed-width decimal ASCII. Give a negative width

View File

@ -8,6 +8,67 @@ import (
)
// Logger provides a standardised logging interface.
//
// Log messages consist of four components:
//
// 1. The **level** attaches a notion of priority to the log message.
// Several log levels are available:
//
// + FATAL (32): the system is in an unsuable state, and cannot
// continue to run. Most of the logging for this will cause the
// program to exit with an error code.
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
// likely to cause a fatal condition shortly. An example is running
// out of disk space. This is something that the ops team should get
// paged for.
// + ERROR (8): error conditions. A single error doesn't require an
// ops team to be paged, but repeated errors should often trigger a
// page based on threshold triggers. An example is a network
// failure: it might be a transient failure (these do happen), but
// most of the time it's self-correcting.
// + WARNING (4): warning conditions. An example of this is a bad
// request sent to a server. This isn't an error on the part of the
// program, but it may be indicative of other things. Like errors,
// the ops team shouldn't be paged for errors, but a page might be
// triggered if a certain threshold of warnings is reached (which is
// typically much higher than errors). For example, repeated
// warnings might be a sign that the system is under attack.
// + INFO (2): informational message. This is a normal log message
// that is used to deliver information, such as recording
// requests. Ops teams are never paged for informational
// messages. This is the default log level.
// + DEBUG (1): debug-level message. These are only used during
// development or if a deployed system repeatedly sees abnormal
// errors.
//
// The numeric values indicate the priority of a given level.
//
// 2. The **actor** is used to specify which component is generating
// the log message. This could be the program name, or it could be
// a specific component inside the system.
//
// 3. The **event** is a short message indicating what happened. This is
// most like the traditional log message.
//
// 4. The **attributes** are an optional set of key-value string pairs that
// provide additional information.
//
// Additionally, each log message has an associated timestamp. For the
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
//
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
//
// Note that this is organised in a manner that facilitates parsing::
//
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
//
// will cover the header:
//
// + ``$1`` contains the timestamp
// + ``$2`` contains the level
// + ``$3`` contains the actor
// + ``$4`` contains the event
type Logger interface {
// SetLevel sets the minimum log level.
SetLevel(Level)
@ -21,68 +82,8 @@ type Logger interface {
Status() error
// Close gives the Logger the opportunity to perform any cleanup.
Close()
Close() error
// Log messages consist of four components:
//
// 1. The **level** attaches a notion of priority to the log message.
// Several log levels are available:
//
// + FATAL (32): the system is in an unsuable state, and cannot
// continue to run. Most of the logging for this will cause the
// program to exit with an error code.
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
// likely to cause a fatal condition shortly. An example is running
// out of disk space. This is something that the ops team should get
// paged for.
// + ERROR (8): error conditions. A single error doesn't require an
// ops team to be paged, but repeated errors should often trigger a
// page based on threshold triggers. An example is a network
// failure: it might be a transient failure (these do happen), but
// most of the time it's self-correcting.
// + WARNING (4): warning conditions. An example of this is a bad
// request sent to a server. This isn't an error on the part of the
// program, but it may be indicative of other things. Like errors,
// the ops team shouldn't be paged for errors, but a page might be
// triggered if a certain threshold of warnings is reached (which is
// typically much higher than errors). For example, repeated
// warnings might be a sign that the system is under attack.
// + INFO (2): informational message. This is a normal log message
// that is used to deliver information, such as recording
// requests. Ops teams are never paged for informational
// messages. This is the default log level.
// + DEBUG (1): debug-level message. These are only used during
// development or if a deployed system repeatedly sees abnormal
// errors.
//
// The numeric values indicate the priority of a given level.
//
// 2. The **actor** is used to specify which component is generating
// the log message. This could be the program name, or it could be
// a specific component inside the system.
//
// 3. The **event** is a short message indicating what happened. This is
// most like the traditional log message.
//
// 4. The **attributes** are an optional set of key-value string pairs that
// provide additional information.
//
// Additionally, each log message has an associated timestamp. For the
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
//
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
//
// Note that this is organised in a manner that facilitates parsing::
//
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
//
// will cover the header:
//
// + ``$1`` contains the timestamp
// + ``$2`` contains the level
// + ``$3`` contains the actor
// + ``$4`` contains the event
Debug(actor, event string, attrs map[string]string)
Info(actor, event string, attrs map[string]string)
Warn(actor, event string, attrs map[string]string)
@ -228,7 +229,7 @@ func (lw *LogWriter) Fatal(actor, event string, attrs map[string]string) {
os.Exit(1)
}
// Fatal emits a message indicating that the system is in an unsuable
// FatalCode emits a message indicating that the system is in an unsuable
// state, and cannot continue to run. The program will exit with the
// exit code speicfied in the exitcode argument.
//
@ -244,7 +245,7 @@ func (lw *LogWriter) FatalCode(exitcode int, actor, event string, attrs map[stri
os.Exit(exitcode)
}
// Fatal emits a message indicating that the system is in an unsuable
// FatalNoDie emits a message indicating that the system is in an unsuable
// state, and cannot continue to run. The program will not exit; it is
// assumed that the caller has some final clean up to perform.
//
@ -276,4 +277,94 @@ func (lw *LogWriter) SetLevel(l Level) {
}
// Close is a no-op that satisfies the Logger interface.
func (lw *LogWriter) Close() {}
func (lw *LogWriter) Close() error { return nil }
// Multi allows combining of loggers.
type Multi struct {
loggers []Logger
}
func NewMulti(loggers ...Logger) *Multi {
return &Multi{loggers: loggers}
}
func (m *Multi) SetLevel(level Level) {
for _, l := range m.loggers {
l.SetLevel(level)
}
}
func (m *Multi) Good() bool {
good := true
for _, l := range m.loggers {
good = good && l.Good()
}
return good
}
func (m *Multi) Status() error {
for _, l := range m.loggers {
if err := l.Status(); err != nil {
return err
}
}
return nil
}
func (m *Multi) Close() error {
for _, l := range m.loggers {
l.Close()
}
return nil
}
func (m *Multi) Debug(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Debug(actor, event, attrs)
}
}
func (m *Multi) Info(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Info(actor, event, attrs)
}
}
func (m *Multi) Warn(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Warn(actor, event, attrs)
}
}
func (m *Multi) Error(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Error(actor, event, attrs)
}
}
func (m *Multi) Critical(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Critical(actor, event, attrs)
}
}
func (m *Multi) Fatal(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.Fatal(actor, event, attrs)
}
}
func (m *Multi) FatalCode(exitcode int, actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.FatalCode(exitcode, actor, event, attrs)
}
}
func (m *Multi) FatalNoDie(actor, event string, attrs map[string]string) {
for _, l := range m.loggers {
l.FatalNoDie(actor, event, attrs)
}
}

View File

@ -53,3 +53,12 @@ func TestDestroyLogFiles(t *testing.T) {
os.Remove("fw2.log")
os.Remove("fw2.err")
}
func TestMulti(t *testing.T) {
c1 := NewConsole()
c2 := NewConsole()
m := NewMulti(c1, c2)
if !m.Good() {
t.Fatal("failed to set up multi logger")
}
}

View File

@ -4,8 +4,8 @@ import (
"bytes"
"testing"
"github.com/kisom/goutils/testio"
"github.com/kisom/goutils/assert"
"git.wntrmute.dev/kyle/goutils/assert"
"git.wntrmute.dev/kyle/goutils/testio"
)
func TestMWC(t *testing.T) {

49
rand/rand.go Normal file
View File

@ -0,0 +1,49 @@
// Package rand contains utilities for interacting with math/rand, including
// seeding from a random sed.
package rand
import (
"crypto/rand"
"encoding/binary"
mrand "math/rand"
)
// CryptoUint64 generates a cryptographically-secure 64-bit integer.
func CryptoUint64() (uint64, error) {
bs := make([]byte, 8)
_, err := rand.Read(bs)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint64(bs), nil
}
// Seed initialises the non-cryptographic PRNG with a random,
// cryptographically secure value. This is done just as a good
// way to make this random. The returned 64-bit value is the seed.
func Seed() (uint64, error) {
seed, err := CryptoUint64()
if err != nil {
return 0, err
}
// NB: this is permitted.
mrand.Seed(int64(seed))
return seed, nil
}
// Int is a wrapper for math.Int so only one package needs to be imported.
func Int() int {
return mrand.Int()
}
// Intn is a wrapper for math.Intn so only one package needs to be imported.
func Intn(max int) int {
return mrand.Intn(max)
}
// Intn2 returns a random value between min and max, inclusive.
func Intn2(min, max int) int {
return Intn(max-min) + min
}

74
rand/rand_test.go Normal file
View File

@ -0,0 +1,74 @@
package rand
import (
"fmt"
mrand "math/rand"
"testing"
)
func TestCryptoUint64(t *testing.T) {
n1, err := CryptoUint64()
if err != nil {
t.Fatal(err)
}
n2, err := CryptoUint64()
if err != nil {
t.Fatal(err)
}
// This has such a low chance of occurring that it's likely to be
// indicative of a bad CSPRNG.
if n1 == n2 {
t.Fatalf("repeated random uint64s: %d", n1)
}
}
func TestIntn(t *testing.T) {
expected := []int{3081, 4887, 4847, 1059, 3081}
mrand.Seed(1)
for i := 0; i < 5; i++ {
n := Intn2(1000, 5000)
if n != expected[i] {
fmt.Printf("invalid sequence at %d: expected %d, have %d", i, expected[i], n)
}
}
}
func TestSeed(t *testing.T) {
seed1, err := Seed()
if err != nil {
t.Fatal(err)
}
var seed2 uint64
n1 := Int()
tries := 0
for {
seed2, err = Seed()
if err != nil {
t.Fatal(err)
}
if seed1 != seed2 {
break
}
tries++
if tries > 3 {
t.Fatal("can't generate two unique seeds")
}
}
n2 := Int()
// Again, this not impossible, merely statistically improbably and a
// potential canary for RNG issues.
if n1 == n2 {
t.Fatalf("repeated integers fresh from two unique seeds: %d/%d -> %d",
seed1, seed2, n1)
}
}

Some files were not shown because too many files have changed in this diff Show More